aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kbuild3
-rw-r--r--arch/powerpc/Kconfig428
-rw-r--r--arch/powerpc/Kconfig.debug59
-rw-r--r--arch/powerpc/Makefile164
-rw-r--r--arch/powerpc/Makefile.postlink2
-rw-r--r--arch/powerpc/boot/.gitignore2
-rw-r--r--arch/powerpc/boot/44x.h5
-rw-r--r--arch/powerpc/boot/4xx.h5
-rw-r--r--arch/powerpc/boot/Makefile67
-rw-r--r--arch/powerpc/boot/crt0.S81
-rw-r--r--arch/powerpc/boot/cuboot-hotfoot.c2
-rw-r--r--arch/powerpc/boot/decompress.c5
-rw-r--r--arch/powerpc/boot/devtree.c59
-rw-r--r--arch/powerpc/boot/dts/Makefile1
-rw-r--r--arch/powerpc/boot/dts/a4m072.dts6
-rw-r--r--arch/powerpc/boot/dts/akebono.dts8
-rw-r--r--arch/powerpc/boot/dts/bluestone.dts27
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts22
-rw-r--r--arch/powerpc/boot/dts/charon.dts8
-rw-r--r--arch/powerpc/boot/dts/currituck.dts6
-rw-r--r--arch/powerpc/boot/dts/digsy_mtc.dts16
-rw-r--r--arch/powerpc/boot/dts/ep405.dts230
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/c293si-post.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi51
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8540ads.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8541cds.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8555cds.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8560ads.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi29
-rw-r--r--arch/powerpc/boot/dts/fsl/p2020si-post.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi16
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080ds.dts43
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/sbc8641d.dts176
-rw-r--r--arch/powerpc/boot/dts/fsl/t1023rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/fsl/t1024rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts30
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040rdb.dts107
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi78
-rw-r--r--arch/powerpc/boot/dts/fsl/t104xrdb.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240rdb.dts1
-rw-r--r--arch/powerpc/boot/dts/glacier.dts4
-rw-r--r--arch/powerpc/boot/dts/haleakala.dts2
-rw-r--r--arch/powerpc/boot/dts/icon.dts11
-rw-r--r--arch/powerpc/boot/dts/katmai.dts24
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts32
-rw-r--r--arch/powerpc/boot/dts/ksi8560.dts2
-rw-r--r--arch/powerpc/boot/dts/lite5200.dts8
-rw-r--r--arch/powerpc/boot/dts/lite5200b.dts8
-rw-r--r--arch/powerpc/boot/dts/makalu.dts4
-rw-r--r--arch/powerpc/boot/dts/media5200.dts8
-rw-r--r--arch/powerpc/boot/dts/mgcoge.dts7
-rw-r--r--arch/powerpc/boot/dts/microwatt.dts152
-rw-r--r--arch/powerpc/boot/dts/mpc5200b.dtsi6
-rw-r--r--arch/powerpc/boot/dts/mucmc52.dts6
-rw-r--r--arch/powerpc/boot/dts/o2d.dts2
-rw-r--r--arch/powerpc/boot/dts/o2d.dtsi8
-rw-r--r--arch/powerpc/boot/dts/o2dnt2.dts2
-rw-r--r--arch/powerpc/boot/dts/o3dnt.dts2
-rw-r--r--arch/powerpc/boot/dts/pcm030.dts6
-rw-r--r--arch/powerpc/boot/dts/pcm032.dts12
-rw-r--r--arch/powerpc/boot/dts/redwood.dts25
-rw-r--r--arch/powerpc/boot/dts/sbc8548-altflash.dts111
-rw-r--r--arch/powerpc/boot/dts/sbc8548-post.dtsi289
-rw-r--r--arch/powerpc/boot/dts/sbc8548-pre.dtsi48
-rw-r--r--arch/powerpc/boot/dts/sbc8548.dts106
-rw-r--r--arch/powerpc/boot/dts/stx_gp3_8560.dts2
-rw-r--r--arch/powerpc/boot/dts/stxssa8555.dts2
-rw-r--r--arch/powerpc/boot/dts/tqm5200.dts8
-rw-r--r--arch/powerpc/boot/dts/tqm8540.dts2
-rw-r--r--arch/powerpc/boot/dts/tqm8541.dts2
-rw-r--r--arch/powerpc/boot/dts/tqm8555.dts2
-rw-r--r--arch/powerpc/boot/dts/tqm8560.dts2
-rw-r--r--arch/powerpc/boot/dts/turris1x.dts483
-rw-r--r--arch/powerpc/boot/dts/virtex440-ml507.dts406
-rw-r--r--arch/powerpc/boot/dts/virtex440-ml510.dts466
-rw-r--r--arch/powerpc/boot/dts/walnut.dts246
-rw-r--r--arch/powerpc/boot/dts/wii.dts18
-rw-r--r--arch/powerpc/boot/dts/xpedite5200.dts2
-rw-r--r--arch/powerpc/boot/dts/xpedite5200_xmon.dts2
-rw-r--r--arch/powerpc/boot/dummy.c4
-rw-r--r--arch/powerpc/boot/ep405.c71
-rwxr-xr-x[-rw-r--r--]arch/powerpc/boot/install.sh22
-rw-r--r--arch/powerpc/boot/main.c4
-rw-r--r--arch/powerpc/boot/microwatt.c24
-rw-r--r--arch/powerpc/boot/ns16550.c9
-rw-r--r--arch/powerpc/boot/opal-calls.S6
-rw-r--r--arch/powerpc/boot/ops.h13
-rw-r--r--arch/powerpc/boot/ppc_asm.h10
-rw-r--r--arch/powerpc/boot/ps3.c9
-rw-r--r--arch/powerpc/boot/serial.c15
-rw-r--r--arch/powerpc/boot/simple_alloc.c6
-rw-r--r--arch/powerpc/boot/treeboot-walnut.c81
-rw-r--r--arch/powerpc/boot/uartlite.c79
-rw-r--r--arch/powerpc/boot/util.S24
-rw-r--r--arch/powerpc/boot/virtex.c97
-rw-r--r--arch/powerpc/boot/virtex405-head.S31
-rwxr-xr-xarch/powerpc/boot/wrapper45
-rw-r--r--arch/powerpc/boot/zImage.lds.S18
-rw-r--r--arch/powerpc/boot/zImage.ps3.lds.S2
-rw-r--r--arch/powerpc/configs/32-bit.config1
-rw-r--r--arch/powerpc/configs/40x/acadia_defconfig1
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig62
-rw-r--r--arch/powerpc/configs/40x/kilauea_defconfig1
-rw-r--r--arch/powerpc/configs/40x/klondike_defconfig1
-rw-r--r--arch/powerpc/configs/40x/makalu_defconfig1
-rw-r--r--arch/powerpc/configs/40x/obs600_defconfig1
-rw-r--r--arch/powerpc/configs/40x/virtex_defconfig75
-rw-r--r--arch/powerpc/configs/44x/akebono_defconfig11
-rw-r--r--arch/powerpc/configs/44x/arches_defconfig2
-rw-r--r--arch/powerpc/configs/44x/bamboo_defconfig2
-rw-r--r--arch/powerpc/configs/44x/bluestone_defconfig2
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig2
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig5
-rw-r--r--arch/powerpc/configs/44x/eiger_defconfig2
-rw-r--r--arch/powerpc/configs/44x/fsp2_defconfig4
-rw-r--r--arch/powerpc/configs/44x/icon_defconfig3
-rw-r--r--arch/powerpc/configs/44x/iss476-smp_defconfig4
-rw-r--r--arch/powerpc/configs/44x/katmai_defconfig2
-rw-r--r--arch/powerpc/configs/44x/rainier_defconfig2
-rw-r--r--arch/powerpc/configs/44x/redwood_defconfig2
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig2
-rw-r--r--arch/powerpc/configs/44x/sequoia_defconfig2
-rw-r--r--arch/powerpc/configs/44x/taishan_defconfig2
-rw-r--r--arch/powerpc/configs/44x/virtex5_defconfig74
-rw-r--r--arch/powerpc/configs/44x/warp_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig2
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig2
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig2
-rw-r--r--arch/powerpc/configs/64-bit.config1
-rw-r--r--arch/powerpc/configs/83xx/mpc837x_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/85xx-hw.config2
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig3
-rw-r--r--arch/powerpc/configs/85xx/mpc85xx_cds_defconfig6
-rw-r--r--arch/powerpc/configs/85xx/ppa8548_defconfig2
-rw-r--r--arch/powerpc/configs/85xx/sbc8548_defconfig50
-rw-r--r--arch/powerpc/configs/85xx/tqm8540_defconfig6
-rw-r--r--arch/powerpc/configs/85xx/tqm8541_defconfig6
-rw-r--r--arch/powerpc/configs/85xx/tqm8555_defconfig6
-rw-r--r--arch/powerpc/configs/85xx/tqm8560_defconfig6
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig3
-rw-r--r--arch/powerpc/configs/86xx-hw.config2
-rw-r--r--arch/powerpc/configs/adder875_defconfig3
-rw-r--r--arch/powerpc/configs/amigaone_defconfig1
-rw-r--r--arch/powerpc/configs/cell_defconfig3
-rw-r--r--arch/powerpc/configs/chrp32_defconfig2
-rw-r--r--arch/powerpc/configs/disable-werror.config1
-rw-r--r--arch/powerpc/configs/ep8248e_defconfig2
-rw-r--r--arch/powerpc/configs/ep88xc_defconfig3
-rw-r--r--arch/powerpc/configs/fsl-emb-nonhw.config5
-rw-r--r--arch/powerpc/configs/g5_defconfig6
-rw-r--r--arch/powerpc/configs/gamecube_defconfig2
-rw-r--r--arch/powerpc/configs/holly_defconfig1
-rw-r--r--arch/powerpc/configs/linkstation_defconfig2
-rw-r--r--arch/powerpc/configs/maple_defconfig3
-rw-r--r--arch/powerpc/configs/mgcoge_defconfig2
-rw-r--r--arch/powerpc/configs/microwatt_defconfig110
-rw-r--r--arch/powerpc/configs/mpc512x_defconfig2
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig2
-rw-r--r--arch/powerpc/configs/mpc8272_ads_defconfig2
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_base.config1
-rw-r--r--arch/powerpc/configs/mpc866_ads_defconfig1
-rw-r--r--arch/powerpc/configs/mpc86xx_base.config1
-rw-r--r--arch/powerpc/configs/mpc885_ads_defconfig33
-rw-r--r--arch/powerpc/configs/mvme5100_defconfig5
-rw-r--r--arch/powerpc/configs/pasemi_defconfig6
-rw-r--r--arch/powerpc/configs/pmac32_defconfig12
-rw-r--r--arch/powerpc/configs/powernv_defconfig15
-rw-r--r--arch/powerpc/configs/ppc40x_defconfig12
-rw-r--r--arch/powerpc/configs/ppc44x_defconfig8
-rw-r--r--arch/powerpc/configs/ppc64_defconfig16
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig5
-rw-r--r--arch/powerpc/configs/ppc64le.config2
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig63
-rw-r--r--arch/powerpc/configs/pq2fads_defconfig2
-rw-r--r--arch/powerpc/configs/ps3_defconfig9
-rw-r--r--arch/powerpc/configs/pseries_defconfig20
-rw-r--r--arch/powerpc/configs/security.config15
-rw-r--r--arch/powerpc/configs/skiroot_defconfig6
-rw-r--r--arch/powerpc/configs/storcenter_defconfig2
-rw-r--r--arch/powerpc/configs/tqm8xx_defconfig3
-rw-r--r--arch/powerpc/configs/wii_defconfig3
-rw-r--r--arch/powerpc/crypto/Kconfig97
-rw-r--r--arch/powerpc/crypto/aes-spe-glue.c4
-rw-r--r--arch/powerpc/crypto/crc-vpmsum_test.c8
-rw-r--r--arch/powerpc/crypto/crc32-vpmsum_core.S2
-rw-r--r--arch/powerpc/crypto/md5-asm.S10
-rw-r--r--arch/powerpc/crypto/md5-glue.c1
-rw-r--r--arch/powerpc/crypto/sha1-powerpc-asm.S6
-rw-r--r--arch/powerpc/crypto/sha1-spe-glue.c22
-rw-r--r--arch/powerpc/crypto/sha1.c45
-rw-r--r--arch/powerpc/crypto/sha256-spe-glue.c46
-rw-r--r--arch/powerpc/include/asm/Kbuild8
-rw-r--r--arch/powerpc/include/asm/agp.h4
-rw-r--r--arch/powerpc/include/asm/archrandom.h41
-rw-r--r--arch/powerpc/include/asm/asm-405.h19
-rw-r--r--arch/powerpc/include/asm/asm-compat.h6
-rw-r--r--arch/powerpc/include/asm/asm-const.h1
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h119
-rw-r--r--arch/powerpc/include/asm/atomic.h312
-rw-r--r--arch/powerpc/include/asm/barrier.h21
-rw-r--r--arch/powerpc/include/asm/bitops.h130
-rw-r--r--arch/powerpc/include/asm/book3s/32/hash.h45
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h264
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h128
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h211
-rw-r--r--arch/powerpc/include/asm/book3s/32/tlbflush.h71
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h45
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-64k.h33
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-pkey.h45
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h19
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h7
-rw-r--r--arch/powerpc/include/asm/book3s/64/kexec.h28
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup-radix.h134
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup.h427
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h58
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h92
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgalloc.h22
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-4k.h10
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h9
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h307
-rw-r--r--arch/powerpc/include/asm/book3s/64/pkeys.h25
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix-4k.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/radix.h51
-rw-r--r--arch/powerpc/include/asm/book3s/64/slice.h26
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-hash.h17
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h23
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h86
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h12
-rw-r--r--arch/powerpc/include/asm/bpf_perf_event.h9
-rw-r--r--arch/powerpc/include/asm/btext.h10
-rw-r--r--arch/powerpc/include/asm/bug.h92
-rw-r--r--arch/powerpc/include/asm/cache.h4
-rw-r--r--arch/powerpc/include/asm/cacheflush.h72
-rw-r--r--arch/powerpc/include/asm/checksum.h50
-rw-r--r--arch/powerpc/include/asm/clocksource.h7
-rw-r--r--arch/powerpc/include/asm/cmpxchg.h41
-rw-r--r--arch/powerpc/include/asm/code-patching.h134
-rw-r--r--arch/powerpc/include/asm/compat.h79
-rw-r--r--arch/powerpc/include/asm/context_tracking.h2
-rw-r--r--arch/powerpc/include/asm/cpm1.h1
-rw-r--r--arch/powerpc/include/asm/cpm2.h8
-rw-r--r--arch/powerpc/include/asm/cpu_has_feature.h4
-rw-r--r--arch/powerpc/include/asm/cpu_setup.h49
-rw-r--r--arch/powerpc/include/asm/cpuidle.h2
-rw-r--r--arch/powerpc/include/asm/cputable.h160
-rw-r--r--arch/powerpc/include/asm/cputhreads.h64
-rw-r--r--arch/powerpc/include/asm/cputime.h50
-rw-r--r--arch/powerpc/include/asm/crashdump-ppc64.h19
-rw-r--r--arch/powerpc/include/asm/dbell.h64
-rw-r--r--arch/powerpc/include/asm/dcr-native.h8
-rw-r--r--arch/powerpc/include/asm/debug.h6
-rw-r--r--arch/powerpc/include/asm/debugfs.h13
-rw-r--r--arch/powerpc/include/asm/delay.h2
-rw-r--r--arch/powerpc/include/asm/device.h16
-rw-r--r--arch/powerpc/include/asm/dma-direct.h4
-rw-r--r--arch/powerpc/include/asm/dma.h9
-rw-r--r--arch/powerpc/include/asm/drmem.h59
-rw-r--r--arch/powerpc/include/asm/dtl.h44
-rw-r--r--arch/powerpc/include/asm/eeh.h61
-rw-r--r--arch/powerpc/include/asm/elf.h26
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h6
-rw-r--r--arch/powerpc/include/asm/exception-64e.h59
-rw-r--r--arch/powerpc/include/asm/exception-64s.h56
-rw-r--r--arch/powerpc/include/asm/extable.h14
-rw-r--r--arch/powerpc/include/asm/fadump-internal.h17
-rw-r--r--arch/powerpc/include/asm/feature-fixups.h54
-rw-r--r--arch/powerpc/include/asm/firmware.h19
-rw-r--r--arch/powerpc/include/asm/fixmap.h27
-rw-r--r--arch/powerpc/include/asm/floppy.h27
-rw-r--r--arch/powerpc/include/asm/fsl_85xx_cache_sram.h35
-rw-r--r--arch/powerpc/include/asm/fsl_pamu_stash.h12
-rw-r--r--arch/powerpc/include/asm/ftrace.h90
-rw-r--r--arch/powerpc/include/asm/futex.h18
-rw-r--r--arch/powerpc/include/asm/hardirq.h1
-rw-r--r--arch/powerpc/include/asm/head-64.h16
-rw-r--r--arch/powerpc/include/asm/highmem.h37
-rw-r--r--arch/powerpc/include/asm/hugetlb.h38
-rw-r--r--arch/powerpc/include/asm/hvcall.h153
-rw-r--r--arch/powerpc/include/asm/hvconsole.h3
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h46
-rw-r--r--arch/powerpc/include/asm/hw_irq.h311
-rw-r--r--arch/powerpc/include/asm/hydra.h4
-rw-r--r--arch/powerpc/include/asm/i8259.h2
-rw-r--r--arch/powerpc/include/asm/icswx.h26
-rw-r--r--arch/powerpc/include/asm/idle.h93
-rw-r--r--arch/powerpc/include/asm/ima.h30
-rw-r--r--arch/powerpc/include/asm/imc-pmu.h5
-rw-r--r--arch/powerpc/include/asm/inst.h170
-rw-r--r--arch/powerpc/include/asm/interrupt.h688
-rw-r--r--arch/powerpc/include/asm/io.h219
-rw-r--r--arch/powerpc/include/asm/iommu.h21
-rw-r--r--arch/powerpc/include/asm/ipic.h2
-rw-r--r--arch/powerpc/include/asm/irq.h15
-rw-r--r--arch/powerpc/include/asm/jump_label.h21
-rw-r--r--arch/powerpc/include/asm/kasan.h48
-rw-r--r--arch/powerpc/include/asm/kexec.h64
-rw-r--r--arch/powerpc/include/asm/kexec_ranges.h25
-rw-r--r--arch/powerpc/include/asm/kfence.h48
-rw-r--r--arch/powerpc/include/asm/kgdb.h2
-rw-r--r--arch/powerpc/include/asm/kmap_types.h13
-rw-r--r--arch/powerpc/include/asm/kprobes.h6
-rw-r--r--arch/powerpc/include/asm/kup.h191
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h5
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h49
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h71
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h16
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_uvmem.h20
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h2
-rw-r--r--arch/powerpc/include/asm/kvm_guest.h25
-rw-r--r--arch/powerpc/include/asm/kvm_host.h103
-rw-r--r--arch/powerpc/include/asm/kvm_para.h26
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h136
-rw-r--r--arch/powerpc/include/asm/linkage.h2
-rw-r--r--arch/powerpc/include/asm/livepatch.h22
-rw-r--r--arch/powerpc/include/asm/lppaca.h54
-rw-r--r--arch/powerpc/include/asm/machdep.h55
-rw-r--r--arch/powerpc/include/asm/mce.h46
-rw-r--r--arch/powerpc/include/asm/mem_encrypt.h5
-rw-r--r--arch/powerpc/include/asm/membarrier.h3
-rw-r--r--arch/powerpc/include/asm/mm-arch-hooks.h25
-rw-r--r--arch/powerpc/include/asm/mman.h26
-rw-r--r--arch/powerpc/include/asm/mmu.h148
-rw-r--r--arch/powerpc/include/asm/mmu_context.h85
-rw-r--r--arch/powerpc/include/asm/mmzone.h9
-rw-r--r--arch/powerpc/include/asm/module.h23
-rw-r--r--arch/powerpc/include/asm/module.lds.h (renamed from arch/powerpc/kernel/module.lds)0
-rw-r--r--arch/powerpc/include/asm/mpc52xx.h4
-rw-r--r--arch/powerpc/include/asm/mpc5xxx.h9
-rw-r--r--arch/powerpc/include/asm/mpic.h2
-rw-r--r--arch/powerpc/include/asm/nmi.h5
-rw-r--r--arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h45
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h85
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-40x.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-44x.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h165
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h191
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-40x.h23
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-85xx.h (renamed from arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h)6
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h58
-rw-r--r--arch/powerpc/include/asm/nohash/32/slice.h20
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h5
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable-4k.h36
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h108
-rw-r--r--arch/powerpc/include/asm/nohash/hugetlb-e500.h (renamed from arch/powerpc/include/asm/nohash/hugetlb-book3e.h)8
-rw-r--r--arch/powerpc/include/asm/nohash/kup-booke.h110
-rw-r--r--arch/powerpc/include/asm/nohash/mmu-e500.h (renamed from arch/powerpc/include/asm/nohash/mmu-book3e.h)2
-rw-r--r--arch/powerpc/include/asm/nohash/mmu.h4
-rw-r--r--arch/powerpc/include/asm/nohash/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h47
-rw-r--r--arch/powerpc/include/asm/nohash/pte-e500.h (renamed from arch/powerpc/include/asm/nohash/pte-book3e.h)24
-rw-r--r--arch/powerpc/include/asm/nohash/tlbflush.h30
-rw-r--r--arch/powerpc/include/asm/opal-api.h8
-rw-r--r--arch/powerpc/include/asm/opal.h13
-rw-r--r--arch/powerpc/include/asm/oprofile_impl.h135
-rw-r--r--arch/powerpc/include/asm/paca.h47
-rw-r--r--arch/powerpc/include/asm/page.h42
-rw-r--r--arch/powerpc/include/asm/page_32.h6
-rw-r--r--arch/powerpc/include/asm/page_64.h7
-rw-r--r--arch/powerpc/include/asm/paravirt.h164
-rw-r--r--arch/powerpc/include/asm/paravirt_api_clock.h2
-rw-r--r--arch/powerpc/include/asm/parport.h2
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h27
-rw-r--r--arch/powerpc/include/asm/pci.h13
-rw-r--r--arch/powerpc/include/asm/percpu.h4
-rw-r--r--arch/powerpc/include/asm/perf_event.h9
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h35
-rw-r--r--arch/powerpc/include/asm/pgalloc.h5
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h20
-rw-r--r--arch/powerpc/include/asm/pgtable.h68
-rw-r--r--arch/powerpc/include/asm/pkeys.h73
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h70
-rw-r--r--arch/powerpc/include/asm/pmac_feature.h12
-rw-r--r--arch/powerpc/include/asm/pmc.h7
-rw-r--r--arch/powerpc/include/asm/pnv-ocxl.h87
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h3
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h763
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h12
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h210
-rw-r--r--arch/powerpc/include/asm/probes.h44
-rw-r--r--arch/powerpc/include/asm/processor.h144
-rw-r--r--arch/powerpc/include/asm/prom.h15
-rw-r--r--arch/powerpc/include/asm/ps3.h8
-rw-r--r--arch/powerpc/include/asm/ps3av.h2
-rw-r--r--arch/powerpc/include/asm/pte-walk.h29
-rw-r--r--arch/powerpc/include/asm/ptrace.h230
-rw-r--r--arch/powerpc/include/asm/qspinlock.h84
-rw-r--r--arch/powerpc/include/asm/qspinlock_paravirt.h7
-rw-r--r--arch/powerpc/include/asm/reg.h144
-rw-r--r--arch/powerpc/include/asm/reg_booke.h76
-rw-r--r--arch/powerpc/include/asm/rtas-types.h116
-rw-r--r--arch/powerpc/include/asm/rtas.h155
-rw-r--r--arch/powerpc/include/asm/runlatch.h6
-rw-r--r--arch/powerpc/include/asm/seccomp.h23
-rw-r--r--arch/powerpc/include/asm/sections.h79
-rw-r--r--arch/powerpc/include/asm/security_features.h18
-rw-r--r--arch/powerpc/include/asm/set_memory.h44
-rw-r--r--arch/powerpc/include/asm/setjmp.h6
-rw-r--r--arch/powerpc/include/asm/setup.h44
-rw-r--r--arch/powerpc/include/asm/signal.h8
-rw-r--r--arch/powerpc/include/asm/simple_spinlock.h266
-rw-r--r--arch/powerpc/include/asm/simple_spinlock_types.h21
-rw-r--r--arch/powerpc/include/asm/slice.h48
-rw-r--r--arch/powerpc/include/asm/smp.h56
-rw-r--r--arch/powerpc/include/asm/smu.h4
-rw-r--r--arch/powerpc/include/asm/sparsemem.h10
-rw-r--r--arch/powerpc/include/asm/spinlock.h313
-rw-r--r--arch/powerpc/include/asm/spinlock_types.h19
-rw-r--r--arch/powerpc/include/asm/spu.h37
-rw-r--r--arch/powerpc/include/asm/sstep.h27
-rw-r--r--arch/powerpc/include/asm/static_call.h29
-rw-r--r--arch/powerpc/include/asm/string.h4
-rw-r--r--arch/powerpc/include/asm/svm.h2
-rw-r--r--arch/powerpc/include/asm/swiotlb.h1
-rw-r--r--arch/powerpc/include/asm/switch_to.h33
-rw-r--r--arch/powerpc/include/asm/synch.h26
-rw-r--r--arch/powerpc/include/asm/syscall.h88
-rw-r--r--arch/powerpc/include/asm/syscall_wrapper.h49
-rw-r--r--arch/powerpc/include/asm/syscalls.h155
-rw-r--r--arch/powerpc/include/asm/syscalls_32.h (renamed from arch/powerpc/kernel/ppc32.h)6
-rw-r--r--arch/powerpc/include/asm/task_size_64.h14
-rw-r--r--arch/powerpc/include/asm/tce.h8
-rw-r--r--arch/powerpc/include/asm/termios.h18
-rw-r--r--arch/powerpc/include/asm/thread_info.h40
-rw-r--r--arch/powerpc/include/asm/time.h137
-rw-r--r--arch/powerpc/include/asm/timex.h6
-rw-r--r--arch/powerpc/include/asm/tlb.h27
-rw-r--r--arch/powerpc/include/asm/topology.h68
-rw-r--r--arch/powerpc/include/asm/types.h14
-rw-r--r--arch/powerpc/include/asm/uaccess.h556
-rw-r--r--arch/powerpc/include/asm/udbg.h64
-rw-r--r--arch/powerpc/include/asm/unaligned.h22
-rw-r--r--arch/powerpc/include/asm/unistd.h6
-rw-r--r--arch/powerpc/include/asm/uprobes.h8
-rw-r--r--arch/powerpc/include/asm/user.h5
-rw-r--r--arch/powerpc/include/asm/vas.h136
-rw-r--r--arch/powerpc/include/asm/vdso.h54
-rw-r--r--arch/powerpc/include/asm/vdso/clocksource.h7
-rw-r--r--arch/powerpc/include/asm/vdso/gettimeofday.h160
-rw-r--r--arch/powerpc/include/asm/vdso/processor.h38
-rw-r--r--arch/powerpc/include/asm/vdso/timebase.h73
-rw-r--r--arch/powerpc/include/asm/vdso/vsyscall.h25
-rw-r--r--arch/powerpc/include/asm/vdso_datapage.h55
-rw-r--r--arch/powerpc/include/asm/vermagic.h20
-rw-r--r--arch/powerpc/include/asm/vio.h3
-rw-r--r--arch/powerpc/include/asm/vmalloc.h20
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h2
-rw-r--r--arch/powerpc/include/asm/xics.h12
-rw-r--r--arch/powerpc/include/asm/xilinx_intc.h16
-rw-r--r--arch/powerpc/include/asm/xilinx_pci.h21
-rw-r--r--arch/powerpc/include/asm/xive-regs.h11
-rw-r--r--arch/powerpc/include/asm/xive.h21
-rw-r--r--arch/powerpc/include/asm/xmon.h6
-rw-r--r--arch/powerpc/include/asm/xor_altivec.h25
-rw-r--r--arch/powerpc/include/uapi/asm/auxvec.h4
-rw-r--r--arch/powerpc/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h2
-rw-r--r--arch/powerpc/include/uapi/asm/elf.h8
-rw-r--r--arch/powerpc/include/uapi/asm/errno.h1
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h7
-rw-r--r--arch/powerpc/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/powerpc/include/uapi/asm/papr_pdsm.h165
-rw-r--r--arch/powerpc/include/uapi/asm/perf_regs.h44
-rw-r--r--arch/powerpc/include/uapi/asm/posix_types.h5
-rw-r--r--arch/powerpc/include/uapi/asm/ptrace.h1
-rw-r--r--arch/powerpc/include/uapi/asm/shmbuf.h5
-rw-r--r--arch/powerpc/include/uapi/asm/signal.h31
-rw-r--r--arch/powerpc/include/uapi/asm/stat.h10
-rw-r--r--arch/powerpc/include/uapi/asm/termbits.h182
-rw-r--r--arch/powerpc/include/uapi/asm/vas-api.h28
-rw-r--r--arch/powerpc/kernel/.gitignore1
-rw-r--r--arch/powerpc/kernel/85xx_entry_mapping.S (renamed from arch/powerpc/kernel/fsl_booke_entry_mapping.S)8
-rw-r--r--arch/powerpc/kernel/Makefile97
-rw-r--r--arch/powerpc/kernel/align.c97
-rw-r--r--arch/powerpc/kernel/asm-offsets.c241
-rw-r--r--arch/powerpc/kernel/audit.c12
-rw-r--r--arch/powerpc/kernel/btext.c44
-rw-r--r--arch/powerpc/kernel/cacheinfo.c180
-rw-r--r--arch/powerpc/kernel/compat_audit.c13
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S2
-rw-r--r--arch/powerpc/kernel/cpu_setup_e500.S (renamed from arch/powerpc/kernel/cpu_setup_fsl_booke.S)15
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S219
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.c280
-rw-r--r--arch/powerpc/kernel/cpu_specs.h29
-rw-r--r--arch/powerpc/kernel/cpu_specs_40x.h280
-rw-r--r--arch/powerpc/kernel/cpu_specs_44x.h304
-rw-r--r--arch/powerpc/kernel/cpu_specs_47x.h74
-rw-r--r--arch/powerpc/kernel/cpu_specs_85xx.h57
-rw-r--r--arch/powerpc/kernel/cpu_specs_8xx.h23
-rw-r--r--arch/powerpc/kernel/cpu_specs_book3s_32.h605
-rw-r--r--arch/powerpc/kernel/cpu_specs_book3s_64.h481
-rw-r--r--arch/powerpc/kernel/cpu_specs_e500mc.h75
-rw-r--r--arch/powerpc/kernel/cputable.c2144
-rw-r--r--arch/powerpc/kernel/crash_dump.c44
-rw-r--r--arch/powerpc/kernel/dawr.c33
-rw-r--r--arch/powerpc/kernel/dbell.c67
-rw-r--r--arch/powerpc/kernel/dma-iommu.c161
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c1
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c157
-rw-r--r--arch/powerpc/kernel/early_32.c1
-rw-r--r--arch/powerpc/kernel/eeh.c559
-rw-r--r--arch/powerpc/kernel/eeh_cache.c11
-rw-r--r--arch/powerpc/kernel/eeh_dev.c67
-rw-r--r--arch/powerpc/kernel/eeh_driver.c189
-rw-r--r--arch/powerpc/kernel/eeh_event.c2
-rw-r--r--arch/powerpc/kernel/eeh_pe.c188
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c3
-rw-r--r--arch/powerpc/kernel/entry_32.S1281
-rw-r--r--arch/powerpc/kernel/entry_64.S1057
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c3
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S412
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2808
-rw-r--r--arch/powerpc/kernel/fadump.c471
-rw-r--r--arch/powerpc/kernel/firmware.c23
-rw-r--r--arch/powerpc/kernel/fpu.S36
-rw-r--r--arch/powerpc/kernel/head_32.h361
-rw-r--r--arch/powerpc/kernel/head_40x.S617
-rw-r--r--arch/powerpc/kernel/head_44x.S86
-rw-r--r--arch/powerpc/kernel/head_64.S148
-rw-r--r--arch/powerpc/kernel/head_85xx.S (renamed from arch/powerpc/kernel/head_fsl_booke.S)207
-rw-r--r--arch/powerpc/kernel/head_8xx.S616
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S (renamed from arch/powerpc/kernel/head_32.S)616
-rw-r--r--arch/powerpc/kernel/head_booke.h333
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c651
-rw-r--r--arch/powerpc/kernel/hw_breakpoint_constraints.c153
-rw-r--r--arch/powerpc/kernel/idle.c16
-rw-r--r--arch/powerpc/kernel/idle_64e.S (renamed from arch/powerpc/kernel/idle_book3e.S)10
-rw-r--r--arch/powerpc/kernel/idle_6xx.S17
-rw-r--r--arch/powerpc/kernel/idle_85xx.S (renamed from arch/powerpc/kernel/idle_e500.S)15
-rw-r--r--arch/powerpc/kernel/idle_book3s.S152
-rw-r--r--arch/powerpc/kernel/ima_arch.c6
-rw-r--r--arch/powerpc/kernel/interrupt.c505
-rw-r--r--arch/powerpc/kernel/interrupt_64.S734
-rw-r--r--arch/powerpc/kernel/io-workarounds.c18
-rw-r--r--arch/powerpc/kernel/iomap.c166
-rw-r--r--arch/powerpc/kernel/iommu.c163
-rw-r--r--arch/powerpc/kernel/irq.c550
-rw-r--r--arch/powerpc/kernel/irq_64.c495
-rw-r--r--arch/powerpc/kernel/isa-bridge.c30
-rw-r--r--arch/powerpc/kernel/jump_label.c7
-rw-r--r--arch/powerpc/kernel/kdebugfs.c14
-rw-r--r--arch/powerpc/kernel/kgdb.c47
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c17
-rw-r--r--arch/powerpc/kernel/kprobes.c310
-rw-r--r--arch/powerpc/kernel/kvm.c11
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S10
-rw-r--r--arch/powerpc/kernel/legacy_serial.c40
-rw-r--r--arch/powerpc/kernel/mce.c181
-rw-r--r--arch/powerpc/kernel/mce_power.c267
-rw-r--r--arch/powerpc/kernel/misc.S8
-rw-r--r--arch/powerpc/kernel/misc_32.S108
-rw-r--r--arch/powerpc/kernel/misc_64.S47
-rw-r--r--arch/powerpc/kernel/module.c43
-rw-r--r--arch/powerpc/kernel/module_32.c125
-rw-r--r--arch/powerpc/kernel/module_64.c433
-rw-r--r--arch/powerpc/kernel/nvram_64.c24
-rw-r--r--arch/powerpc/kernel/of_platform.c14
-rw-r--r--arch/powerpc/kernel/optprobes.c169
-rw-r--r--arch/powerpc/kernel/optprobes_head.S64
-rw-r--r--arch/powerpc/kernel/paca.c66
-rw-r--r--arch/powerpc/kernel/pci-common.c168
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c5
-rw-r--r--arch/powerpc/kernel/pci_32.c37
-rw-r--r--arch/powerpc/kernel/pci_64.c73
-rw-r--r--arch/powerpc/kernel/pci_dn.c96
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c4
-rw-r--r--arch/powerpc/kernel/ppc_save_regs.S6
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c6
-rw-r--r--arch/powerpc/kernel/process.c810
-rw-r--r--arch/powerpc/kernel/prom.c219
-rw-r--r--arch/powerpc/kernel/prom_init.c260
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh5
-rw-r--r--arch/powerpc/kernel/ptrace.c3468
-rw-r--r--arch/powerpc/kernel/ptrace/Makefile21
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-adv.c494
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-altivec.c115
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-decl.h177
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-fpu.c58
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-noadv.c298
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-novsx.c64
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-spe.c60
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-tm.c788
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-view.c847
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace-vsx.c148
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace.c450
-rw-r--r--arch/powerpc/kernel/ptrace/ptrace32.c (renamed from arch/powerpc/kernel/ptrace32.c)21
-rw-r--r--arch/powerpc/kernel/reloc_32.S2
-rw-r--r--arch/powerpc/kernel/reloc_64.S67
-rw-r--r--arch/powerpc/kernel/rtas-proc.c24
-rw-r--r--arch/powerpc/kernel/rtas-rtc.c3
-rw-r--r--arch/powerpc/kernel/rtas.c693
-rw-r--r--arch/powerpc/kernel/rtas_entry.S176
-rw-r--r--arch/powerpc/kernel/rtas_flash.c2
-rw-r--r--arch/powerpc/kernel/rtas_pci.c5
-rw-r--r--arch/powerpc/kernel/rtasd.c39
-rw-r--r--arch/powerpc/kernel/secure_boot.c18
-rw-r--r--arch/powerpc/kernel/security.c505
-rw-r--r--arch/powerpc/kernel/secvar-sysfs.c9
-rw-r--r--arch/powerpc/kernel/setup-common.c184
-rw-r--r--arch/powerpc/kernel/setup.h16
-rw-r--r--arch/powerpc/kernel/setup_32.c25
-rw-r--r--arch/powerpc/kernel/setup_64.c370
-rw-r--r--arch/powerpc/kernel/signal.c229
-rw-r--r--arch/powerpc/kernel/signal.h179
-rw-r--r--arch/powerpc/kernel/signal_32.c1052
-rw-r--r--arch/powerpc/kernel/signal_64.c433
-rw-r--r--arch/powerpc/kernel/smp.c817
-rw-r--r--arch/powerpc/kernel/stacktrace.c141
-rw-r--r--arch/powerpc/kernel/static_call.c37
-rw-r--r--arch/powerpc/kernel/swsusp_32.S4
-rw-r--r--arch/powerpc/kernel/swsusp_64.c5
-rw-r--r--arch/powerpc/kernel/swsusp_85xx.S (renamed from arch/powerpc/kernel/swsusp_booke.S)0
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S19
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c110
-rw-r--r--arch/powerpc/kernel/syscall.c189
-rw-r--r--arch/powerpc/kernel/syscalls.c87
-rw-r--r--arch/powerpc/kernel/syscalls/Makefile55
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl90
-rw-r--r--arch/powerpc/kernel/syscalls/syscallhdr.sh37
-rw-r--r--arch/powerpc/kernel/syscalls/syscalltbl.sh36
-rw-r--r--arch/powerpc/kernel/sysfs.c560
-rw-r--r--arch/powerpc/kernel/systbl.S40
-rw-r--r--arch/powerpc/kernel/systbl.c46
-rw-r--r--arch/powerpc/kernel/systbl_chk.sh30
-rw-r--r--arch/powerpc/kernel/tau_6xx.c154
-rw-r--r--arch/powerpc/kernel/time.c521
-rw-r--r--arch/powerpc/kernel/tm.S73
-rw-r--r--arch/powerpc/kernel/trace/Makefile9
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c529
-rw-r--r--arch/powerpc/kernel/trace/ftrace_32.S95
-rw-r--r--arch/powerpc/kernel/trace/ftrace_low.S (renamed from arch/powerpc/kernel/trace/ftrace_64.S)16
-rw-r--r--arch/powerpc/kernel/trace/ftrace_mprofile.S (renamed from arch/powerpc/kernel/trace/ftrace_64_mprofile.S)239
-rw-r--r--arch/powerpc/kernel/traps.c510
-rw-r--r--arch/powerpc/kernel/udbg.c4
-rw-r--r--arch/powerpc/kernel/udbg_16550.c43
-rw-r--r--arch/powerpc/kernel/uprobes.c20
-rw-r--r--arch/powerpc/kernel/vdso.c803
-rw-r--r--arch/powerpc/kernel/vdso/.gitignore5
-rw-r--r--arch/powerpc/kernel/vdso/Makefile104
-rw-r--r--arch/powerpc/kernel/vdso/cacheflush.S (renamed from arch/powerpc/kernel/vdso32/cacheflush.S)24
-rw-r--r--arch/powerpc/kernel/vdso/datapage.S (renamed from arch/powerpc/kernel/vdso32/datapage.S)19
-rwxr-xr-xarch/powerpc/kernel/vdso/gen_vdso32_offsets.sh16
-rwxr-xr-xarch/powerpc/kernel/vdso/gen_vdso64_offsets.sh16
-rw-r--r--arch/powerpc/kernel/vdso/getcpu.S (renamed from arch/powerpc/kernel/vdso32/getcpu.S)4
-rw-r--r--arch/powerpc/kernel/vdso/gettimeofday.S129
-rw-r--r--arch/powerpc/kernel/vdso/note.S (renamed from arch/powerpc/kernel/vdso32/note.S)0
-rw-r--r--arch/powerpc/kernel/vdso/sigtramp32.S (renamed from arch/powerpc/kernel/vdso32/sigtramp.S)0
-rw-r--r--arch/powerpc/kernel/vdso/sigtramp64.S (renamed from arch/powerpc/kernel/vdso64/sigtramp.S)22
-rw-r--r--arch/powerpc/kernel/vdso/vdso32.lds.S (renamed from arch/powerpc/kernel/vdso32/vdso32.lds.S)70
-rw-r--r--arch/powerpc/kernel/vdso/vdso64.lds.S (renamed from arch/powerpc/kernel/vdso64/vdso64.lds.S)67
-rw-r--r--arch/powerpc/kernel/vdso/vgettimeofday.c49
-rw-r--r--arch/powerpc/kernel/vdso32/.gitignore2
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile65
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S339
-rw-r--r--arch/powerpc/kernel/vdso32_wrapper.S (renamed from arch/powerpc/kernel/vdso32/vdso32_wrapper.S)2
-rw-r--r--arch/powerpc/kernel/vdso64/.gitignore2
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile47
-rw-r--r--arch/powerpc/kernel/vdso64/cacheflush.S80
-rw-r--r--arch/powerpc/kernel/vdso64/datapage.S84
-rw-r--r--arch/powerpc/kernel/vdso64/getcpu.S33
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S289
-rw-r--r--arch/powerpc/kernel/vdso64/note.S1
-rw-r--r--arch/powerpc/kernel/vdso64_wrapper.S (renamed from arch/powerpc/kernel/vdso64/vdso64_wrapper.S)2
-rw-r--r--arch/powerpc/kernel/vecemu.c20
-rw-r--r--arch/powerpc/kernel/vector.S44
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S193
-rw-r--r--arch/powerpc/kernel/watchdog.c262
-rw-r--r--arch/powerpc/kexec/Makefile14
-rw-r--r--arch/powerpc/kexec/core.c45
-rw-r--r--arch/powerpc/kexec/core_32.c4
-rw-r--r--arch/powerpc/kexec/core_64.c29
-rw-r--r--arch/powerpc/kexec/crash.c84
-rw-r--r--arch/powerpc/kexec/elf_64.c68
-rw-r--r--arch/powerpc/kexec/file_load.c215
-rw-r--r--arch/powerpc/kexec/file_load_64.c1287
-rw-r--r--arch/powerpc/kexec/ima.c219
-rw-r--r--arch/powerpc/kexec/ranges.c412
-rw-r--r--arch/powerpc/kexec/relocate_32.S16
-rw-r--r--arch/powerpc/kvm/Kconfig77
-rw-r--r--arch/powerpc/kvm/Makefile21
-rw-r--r--arch/powerpc/kvm/book3s.c166
-rw-r--r--arch/powerpc/kvm/book3s.h16
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c4
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c5
-rw-r--r--arch/powerpc/kvm/book3s_32_sr.S26
-rw-r--r--arch/powerpc/kvm/book3s_64_entry.S429
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c10
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c309
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c239
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c116
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c684
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c22
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2295
-rw-r--r--arch/powerpc/kvm/book3s_hv.h52
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c366
-rw-r--r--arch/powerpc/kvm/book3s_hv_hmi.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S19
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c508
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c930
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_perf.c219
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c73
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c113
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c34
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xive.c47
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S962
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm.c89
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm_builtin.c16
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c782
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S60
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c72
-rw-r--r--arch/powerpc/kvm/book3s_pr.c163
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c31
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c27
-rw-r--r--arch/powerpc/kvm/book3s_segment.S10
-rw-r--r--arch/powerpc/kvm/book3s_xics.c117
-rw-r--r--arch/powerpc/kvm/book3s_xics.h3
-rw-r--r--arch/powerpc/kvm/book3s_xive.c1012
-rw-r--r--arch/powerpc/kvm/book3s_xive.h42
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c105
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c638
-rw-r--r--arch/powerpc/kvm/booke.c174
-rw-r--r--arch/powerpc/kvm/booke.h10
-rw-r--r--arch/powerpc/kvm/booke_emulate.c2
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S13
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S10
-rw-r--r--arch/powerpc/kvm/e500.c2
-rw-r--r--arch/powerpc/kvm/e500.h2
-rw-r--r--arch/powerpc/kvm/e500_emulate.c17
-rw-r--r--arch/powerpc/kvm/e500_mmu.c4
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c38
-rw-r--r--arch/powerpc/kvm/e500mc.c5
-rw-r--r--arch/powerpc/kvm/emulate.c10
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c44
-rw-r--r--arch/powerpc/kvm/fpu.S2
-rw-r--r--arch/powerpc/kvm/mpic.c1
-rw-r--r--arch/powerpc/kvm/powerpc.c311
-rw-r--r--arch/powerpc/kvm/timing.c30
-rw-r--r--arch/powerpc/kvm/timing.h13
-rw-r--r--arch/powerpc/kvm/tm.S2
-rw-r--r--arch/powerpc/kvm/trace_booke.h15
-rw-r--r--arch/powerpc/kvm/trace_hv.h38
-rw-r--r--arch/powerpc/lib/Makefile22
-rw-r--r--arch/powerpc/lib/checksum_32.S78
-rw-r--r--arch/powerpc/lib/checksum_64.S37
-rw-r--r--arch/powerpc/lib/checksum_wrappers.c83
-rw-r--r--arch/powerpc/lib/code-patching.c610
-rw-r--r--arch/powerpc/lib/copy_32.S3
-rw-r--r--arch/powerpc/lib/copy_mc_64.S (renamed from arch/powerpc/lib/memcpy_mcsafe_64.S)4
-rw-r--r--arch/powerpc/lib/copypage_64.S7
-rw-r--r--arch/powerpc/lib/error-inject.c2
-rw-r--r--arch/powerpc/lib/feature-fixups-test.S69
-rw-r--r--arch/powerpc/lib/feature-fixups.c532
-rw-r--r--arch/powerpc/lib/locks.c12
-rw-r--r--arch/powerpc/lib/pmem.c49
-rw-r--r--arch/powerpc/lib/restart_table.c56
-rw-r--r--arch/powerpc/lib/sstep.c1041
-rw-r--r--arch/powerpc/lib/string_64.S7
-rw-r--r--arch/powerpc/lib/test-code-patching.c362
-rw-r--r--arch/powerpc/lib/test_emulate_step.c1007
-rw-r--r--arch/powerpc/lib/test_emulate_step_exec_instr.S10
-rw-r--r--arch/powerpc/lib/vmx-helper.c13
-rw-r--r--arch/powerpc/lib/xor_vmx.c28
-rw-r--r--arch/powerpc/lib/xor_vmx.h27
-rw-r--r--arch/powerpc/lib/xor_vmx_glue.c32
-rw-r--r--arch/powerpc/math-emu/Makefile7
-rw-r--r--arch/powerpc/math-emu/math.c24
-rw-r--r--arch/powerpc/math-emu/math_efp.c62
-rw-r--r--arch/powerpc/mm/Makefile11
-rw-r--r--arch/powerpc/mm/book3s32/Makefile5
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S239
-rw-r--r--arch/powerpc/mm/book3s32/kuap.c36
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c193
-rw-r--r--arch/powerpc/mm/book3s32/mmu_context.c47
-rw-r--r--arch/powerpc/mm/book3s32/nohash_low.S80
-rw-r--r--arch/powerpc/mm/book3s32/tlb.c101
-rw-r--r--arch/powerpc/mm/book3s64/Makefile30
-rw-r--r--arch/powerpc/mm/book3s64/hash_4k.c2
-rw-r--r--arch/powerpc/mm/book3s64/hash_64k.c4
-rw-r--r--arch/powerpc/mm/book3s64/hash_hugepage.c2
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c152
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c163
-rw-r--r--arch/powerpc/mm/book3s64/hash_tlb.c25
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c447
-rw-r--r--arch/powerpc/mm/book3s64/hugetlbpage.c (renamed from arch/powerpc/mm/book3s64/hash_hugetlbpage.c)12
-rw-r--r--arch/powerpc/mm/book3s64/internal.h20
-rw-r--r--arch/powerpc/mm/book3s64/iommu_api.c123
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c60
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c140
-rw-r--r--arch/powerpc/mm/book3s64/pkeys.c444
-rw-r--r--arch/powerpc/mm/book3s64/radix_hugetlbpage.c71
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c474
-rw-r--r--arch/powerpc/mm/book3s64/radix_tlb.c724
-rw-r--r--arch/powerpc/mm/book3s64/slb.c279
-rw-r--r--arch/powerpc/mm/book3s64/slice.c (renamed from arch/powerpc/mm/slice.c)53
-rw-r--r--arch/powerpc/mm/book3s64/subpage_prot.c35
-rw-r--r--arch/powerpc/mm/book3s64/trace.c7
-rw-r--r--arch/powerpc/mm/cacheflush.c234
-rw-r--r--arch/powerpc/mm/copro_fault.c18
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c2
-rw-r--r--arch/powerpc/mm/drmem.c141
-rw-r--r--arch/powerpc/mm/fault.c421
-rw-r--r--arch/powerpc/mm/highmem.c83
-rw-r--r--arch/powerpc/mm/hugetlbpage.c238
-rw-r--r--arch/powerpc/mm/init-common.c25
-rw-r--r--arch/powerpc/mm/init_32.c52
-rw-r--r--arch/powerpc/mm/init_64.c135
-rw-r--r--arch/powerpc/mm/ioremap.c3
-rw-r--r--arch/powerpc/mm/ioremap_32.c4
-rw-r--r--arch/powerpc/mm/ioremap_64.c52
-rw-r--r--arch/powerpc/mm/kasan/8xx.c73
-rw-r--r--arch/powerpc/mm/kasan/Makefile6
-rw-r--r--arch/powerpc/mm/kasan/book3s_32.c60
-rw-r--r--arch/powerpc/mm/kasan/init_32.c (renamed from arch/powerpc/mm/kasan/kasan_init_32.c)129
-rw-r--r--arch/powerpc/mm/kasan/init_book3e_64.c133
-rw-r--r--arch/powerpc/mm/kasan/init_book3s_64.c104
-rw-r--r--arch/powerpc/mm/maccess.c13
-rw-r--r--arch/powerpc/mm/mem.c464
-rw-r--r--arch/powerpc/mm/mmap.c228
-rw-r--r--arch/powerpc/mm/mmu_context.c39
-rw-r--r--arch/powerpc/mm/mmu_decl.h52
-rw-r--r--arch/powerpc/mm/nohash/40x.c20
-rw-r--r--arch/powerpc/mm/nohash/44x.c5
-rw-r--r--arch/powerpc/mm/nohash/8xx.c289
-rw-r--r--arch/powerpc/mm/nohash/Makefile8
-rw-r--r--arch/powerpc/mm/nohash/book3e_pgtable.c30
-rw-r--r--arch/powerpc/mm/nohash/e500.c (renamed from arch/powerpc/mm/nohash/fsl_booke.c)108
-rw-r--r--arch/powerpc/mm/nohash/e500_hugetlbpage.c (renamed from arch/powerpc/mm/nohash/book3e_hugetlbpage.c)32
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c21
-rw-r--r--arch/powerpc/mm/nohash/kup.c33
-rw-r--r--arch/powerpc/mm/nohash/mmu_context.c186
-rw-r--r--arch/powerpc/mm/nohash/tlb.c110
-rw-r--r--arch/powerpc/mm/nohash/tlb_low.S74
-rw-r--r--arch/powerpc/mm/nohash/tlb_low_64e.S240
-rw-r--r--arch/powerpc/mm/numa.c1180
-rw-r--r--arch/powerpc/mm/pageattr.c99
-rw-r--r--arch/powerpc/mm/pgtable-frag.c5
-rw-r--r--arch/powerpc/mm/pgtable.c137
-rw-r--r--arch/powerpc/mm/pgtable_32.c161
-rw-r--r--arch/powerpc/mm/pgtable_64.c27
-rw-r--r--arch/powerpc/mm/ptdump/8xx.c18
-rw-r--r--arch/powerpc/mm/ptdump/Makefile11
-rw-r--r--arch/powerpc/mm/ptdump/bats.c108
-rw-r--r--arch/powerpc/mm/ptdump/book3s64.c8
-rw-r--r--arch/powerpc/mm/ptdump/hashpagetable.c49
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c241
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.h3
-rw-r--r--arch/powerpc/mm/ptdump/segment_regs.c24
-rw-r--r--arch/powerpc/mm/ptdump/shared.c19
-rw-r--r--arch/powerpc/net/Makefile6
-rw-r--r--arch/powerpc/net/bpf_jit.h290
-rw-r--r--arch/powerpc/net/bpf_jit32.h139
-rw-r--r--arch/powerpc/net/bpf_jit64.h108
-rw-r--r--arch/powerpc/net/bpf_jit_asm.S226
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c873
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c1257
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c1055
-rw-r--r--arch/powerpc/oprofile/Makefile19
-rw-r--r--arch/powerpc/oprofile/backtrace.c118
-rw-r--r--arch/powerpc/oprofile/cell/pr_util.h110
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c248
-rw-r--r--arch/powerpc/oprofile/cell/spu_task_sync.c657
-rw-r--r--arch/powerpc/oprofile/cell/vma_map.c279
-rw-r--r--arch/powerpc/oprofile/common.c243
-rw-r--r--arch/powerpc/oprofile/op_model_7450.c207
-rw-r--r--arch/powerpc/oprofile/op_model_cell.c1709
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_emb.c380
-rw-r--r--arch/powerpc/oprofile/op_model_pa6t.c227
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c438
-rw-r--r--arch/powerpc/perf/8xx-pmu.c29
-rw-r--r--arch/powerpc/perf/Makefile7
-rw-r--r--arch/powerpc/perf/bhrb.S2
-rw-r--r--arch/powerpc/perf/callchain.c359
-rw-r--r--arch/powerpc/perf/callchain.h35
-rw-r--r--arch/powerpc/perf/callchain_32.c178
-rw-r--r--arch/powerpc/perf/callchain_64.c120
-rw-r--r--arch/powerpc/perf/core-book3s.c513
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c25
-rw-r--r--arch/powerpc/perf/e500-pmu.c9
-rw-r--r--arch/powerpc/perf/e6500-pmu.c5
-rw-r--r--arch/powerpc/perf/generic-compat-pmu.c190
-rw-r--r--arch/powerpc/perf/hv-24x7.c236
-rw-r--r--arch/powerpc/perf/hv-gpci-requests.h6
-rw-r--r--arch/powerpc/perf/hv-gpci.c81
-rw-r--r--arch/powerpc/perf/hv-gpci.h27
-rw-r--r--arch/powerpc/perf/imc-pmu.c239
-rw-r--r--arch/powerpc/perf/internal.h17
-rw-r--r--arch/powerpc/perf/isa207-common.c408
-rw-r--r--arch/powerpc/perf/isa207-common.h86
-rw-r--r--arch/powerpc/perf/mpc7450-pmu.c33
-rw-r--r--arch/powerpc/perf/perf_regs.c56
-rw-r--r--arch/powerpc/perf/power10-events-list.h79
-rw-r--r--arch/powerpc/perf/power10-pmu.c636
-rw-r--r--arch/powerpc/perf/power5+-pmu.c32
-rw-r--r--arch/powerpc/perf/power5-pmu.c31
-rw-r--r--arch/powerpc/perf/power6-pmu.c30
-rw-r--r--arch/powerpc/perf/power7-pmu.c37
-rw-r--r--arch/powerpc/perf/power8-pmu.c23
-rw-r--r--arch/powerpc/perf/power9-pmu.c55
-rw-r--r--arch/powerpc/perf/ppc970-pmu.c40
-rw-r--r--arch/powerpc/platforms/40x/Kconfig77
-rw-r--r--arch/powerpc/platforms/40x/Makefile3
-rw-r--r--arch/powerpc/platforms/40x/ep405.c123
-rw-r--r--arch/powerpc/platforms/40x/ppc40x_simple.c1
-rw-r--r--arch/powerpc/platforms/40x/virtex.c54
-rw-r--r--arch/powerpc/platforms/40x/walnut.c65
-rw-r--r--arch/powerpc/platforms/44x/Kconfig53
-rw-r--r--arch/powerpc/platforms/44x/Makefile2
-rw-r--r--arch/powerpc/platforms/44x/canyonlands.c1
-rw-r--r--arch/powerpc/platforms/44x/fsp2.c8
-rw-r--r--arch/powerpc/platforms/44x/machine_check.c5
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c1
-rw-r--r--arch/powerpc/platforms/44x/ppc476.c13
-rw-r--r--arch/powerpc/platforms/44x/sam440ep.c1
-rw-r--r--arch/powerpc/platforms/44x/virtex.c60
-rw-r--r--arch/powerpc/platforms/44x/virtex_ml510.c30
-rw-r--r--arch/powerpc/platforms/44x/warp.c6
-rw-r--r--arch/powerpc/platforms/4xx/Makefile1
-rw-r--r--arch/powerpc/platforms/4xx/cpm.c8
-rw-r--r--arch/powerpc/platforms/4xx/hsta_msi.c8
-rw-r--r--arch/powerpc/platforms/4xx/machine_check.c2
-rw-r--r--arch/powerpc/platforms/4xx/msi.c281
-rw-r--r--arch/powerpc/platforms/4xx/pci.c7
-rw-r--r--arch/powerpc/platforms/4xx/uic.c7
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c62
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads.c14
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c26
-rw-r--r--arch/powerpc/platforms/512x/mpc512x.h4
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_generic.c1
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c8
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/52xx/efika.c5
-rw-r--r--arch/powerpc/platforms/52xx/lite5200.c4
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_pm.c2
-rw-r--r--arch/powerpc/platforms/52xx/lite5200_sleep.S6
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c18
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c5
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c41
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c43
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c13
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pci.c22
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c5
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pm.c2
-rw-r--r--arch/powerpc/platforms/82xx/ep8248e.c1
-rw-r--r--arch/powerpc/platforms/82xx/km82xx.c1
-rw-r--r--arch/powerpc/platforms/82xx/mpc8272_ads.c2
-rw-r--r--arch/powerpc/platforms/82xx/pq2.c3
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c21
-rw-r--r--arch/powerpc/platforms/82xx/pq2fads.c3
-rw-r--r--arch/powerpc/platforms/83xx/asp834x.c1
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c13
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c30
-rw-r--r--arch/powerpc/platforms/83xx/misc.c16
-rw-r--r--arch/powerpc/platforms/83xx/mpc830x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_mds.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc832x_rdb.c8
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_itx.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_mds.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc836x_rdk.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c5
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c3
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h8
-rw-r--r--arch/powerpc/platforms/83xx/suspend-asm.S1
-rw-r--r--arch/powerpc/platforms/83xx/suspend.c59
-rw-r--r--arch/powerpc/platforms/83xx/usb.c8
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig40
-rw-r--r--arch/powerpc/platforms/85xx/Makefile5
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c2
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c12
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c10
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c21
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c1
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c9
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1022_rdk.c1
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rdb.c2
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c4
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c134
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c35
-rw-r--r--arch/powerpc/platforms/85xx/smp.c28
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c1
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c2
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.h2
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c1
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c1
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c6
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig8
-rw-r--r--arch/powerpc/platforms/86xx/Makefile1
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c4
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c4
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c3
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c7
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_smp.c5
-rw-r--r--arch/powerpc/platforms/86xx/mvme7100.c1
-rw-r--r--arch/powerpc/platforms/86xx/sbc8641d.c87
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig43
-rw-r--r--arch/powerpc/platforms/8xx/Makefile2
-rw-r--r--arch/powerpc/platforms/8xx/adder875.c4
-rw-r--r--arch/powerpc/platforms/8xx/cpm1-ic.c188
-rw-r--r--arch/powerpc/platforms/8xx/cpm1.c148
-rw-r--r--arch/powerpc/platforms/8xx/ep88xc.c3
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c40
-rw-r--r--arch/powerpc/platforms/8xx/machine_check.c2
-rw-r--r--arch/powerpc/platforms/8xx/micropatch.c12
-rw-r--r--arch/powerpc/platforms/8xx/mpc86xads_setup.c3
-rw-r--r--arch/powerpc/platforms/8xx/mpc885ads_setup.c3
-rw-r--r--arch/powerpc/platforms/8xx/mpc8xx.h1
-rw-r--r--arch/powerpc/platforms/8xx/pic.c19
-rw-r--r--arch/powerpc/platforms/8xx/pic.h2
-rw-r--r--arch/powerpc/platforms/8xx/tqm8xx_setup.c6
-rw-r--r--arch/powerpc/platforms/Kconfig46
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype281
-rw-r--r--arch/powerpc/platforms/Makefile2
-rw-r--r--arch/powerpc/platforms/amigaone/setup.c12
-rw-r--r--arch/powerpc/platforms/book3s/Kconfig15
-rw-r--r--arch/powerpc/platforms/book3s/Makefile2
-rw-r--r--arch/powerpc/platforms/book3s/vas-api.c636
-rw-r--r--arch/powerpc/platforms/cell/Kconfig10
-rw-r--r--arch/powerpc/platforms/cell/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c23
-rw-r--r--arch/powerpc/platforms/cell/cbe_powerbutton.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c45
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c3
-rw-r--r--arch/powerpc/platforms/cell/cpufreq_spudemand.c26
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c13
-rw-r--r--arch/powerpc/platforms/cell/iommu.c32
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c5
-rw-r--r--arch/powerpc/platforms/cell/pervasive.h3
-rw-r--r--arch/powerpc/platforms/cell/pmu.c1
-rw-r--r--arch/powerpc/platforms/cell/ras.c8
-rw-r--r--arch/powerpc/platforms/cell/ras.h9
-rw-r--r--arch/powerpc/platforms/cell/setup.c4
-rw-r--r--arch/powerpc/platforms/cell/smp.c6
-rw-r--r--arch/powerpc/platforms/cell/spider-pci.c6
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c16
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c11
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c8
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c23
-rw-r--r--arch/powerpc/platforms/cell/spu_notify.c55
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c1
-rw-r--r--arch/powerpc/platforms/cell/spufs/.gitignore1
-rw-r--r--arch/powerpc/platforms/cell/spufs/coredump.c107
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c340
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c27
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c14
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h10
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c8
-rw-r--r--arch/powerpc/platforms/chrp/Kconfig2
-rw-r--r--arch/powerpc/platforms/chrp/chrp.h1
-rw-r--r--arch/powerpc/platforms/chrp/nvram.c2
-rw-r--r--arch/powerpc/platforms/chrp/pci.c15
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c34
-rw-r--r--arch/powerpc/platforms/chrp/smp.c3
-rw-r--r--arch/powerpc/platforms/chrp/time.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig7
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/gamecube.c1
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c20
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.h2
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c25
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c11
-rw-r--r--arch/powerpc/platforms/embedded6xx/ls_uart.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c26
-rw-r--r--arch/powerpc/platforms/embedded6xx/mvme5100.c15
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c12
-rw-r--r--arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c7
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c44
-rw-r--r--arch/powerpc/platforms/fsl_uli1575.c1
-rw-r--r--arch/powerpc/platforms/maple/Kconfig3
-rw-r--r--arch/powerpc/platforms/maple/pci.c7
-rw-r--r--arch/powerpc/platforms/maple/setup.c41
-rw-r--r--arch/powerpc/platforms/maple/time.c4
-rw-r--r--arch/powerpc/platforms/microwatt/Kconfig11
-rw-r--r--arch/powerpc/platforms/microwatt/Makefile1
-rw-r--r--arch/powerpc/platforms/microwatt/microwatt.h7
-rw-r--r--arch/powerpc/platforms/microwatt/rng.c44
-rw-r--r--arch/powerpc/platforms/microwatt/setup.c49
-rw-r--r--arch/powerpc/platforms/pasemi/Kconfig3
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c6
-rw-r--r--arch/powerpc/platforms/pasemi/idle.c5
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c5
-rw-r--r--arch/powerpc/platforms/pasemi/misc.c7
-rw-r--r--arch/powerpc/platforms/pasemi/msi.c13
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h2
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c13
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c7
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig4
-rw-r--r--arch/powerpc/platforms/powermac/Makefile2
-rw-r--r--arch/powerpc/platforms/powermac/backlight.c1
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c19
-rw-r--r--arch/powerpc/platforms/powermac/cache.S6
-rw-r--r--arch/powerpc/platforms/powermac/feature.c20
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c14
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c8
-rw-r--r--arch/powerpc/platforms/powermac/pci.c7
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c12
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c4
-rw-r--r--arch/powerpc/platforms/powermac/pic.c41
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h7
-rw-r--r--arch/powerpc/platforms/powermac/setup.c28
-rw-r--r--arch/powerpc/platforms/powermac/sleep.S146
-rw-r--r--arch/powerpc/platforms/powermac/smp.c47
-rw-r--r--arch/powerpc/platforms/powermac/time.c3
-rw-r--r--arch/powerpc/platforms/powermac/udbg_adb.c2
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c14
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig27
-rw-r--r--arch/powerpc/platforms/powernv/Makefile14
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c276
-rw-r--r--arch/powerpc/platforms/powernv/idle.c331
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c230
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c630
-rw-r--r--arch/powerpc/platforms/powernv/ocxl.c137
-rw-r--r--arch/powerpc/platforms/powernv/opal-async.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-call.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-core.c73
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c59
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c51
-rw-r--r--arch/powerpc/platforms/powernv/opal-fadump.c106
-rw-r--r--arch/powerpc/platforms/powernv/opal-fadump.h10
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-hmi.c29
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c53
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c11
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c7
-rw-r--r--arch/powerpc/platforms/powernv/opal-memory-errors.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-msglog.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-power.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-powercap.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c19
-rw-r--r--arch/powerpc/platforms/powernv/opal-psr.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor-groups.c10
-rw-r--r--arch/powerpc/platforms/powernv/opal-tracepoints.c1
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S2
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c23
-rw-r--r--arch/powerpc/platforms/powernv/pci-cxl.c23
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c39
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c1747
-rw-r--r--arch/powerpc/platforms/powernv/pci-sriov.c760
-rw-r--r--arch/powerpc/platforms/powernv/pci.c113
-rw-r--r--arch/powerpc/platforms/powernv/pci.h156
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h13
-rw-r--r--arch/powerpc/platforms/powernv/rng.c138
-rw-r--r--arch/powerpc/platforms/powernv/setup.c122
-rw-r--r--arch/powerpc/platforms/powernv/smp.c12
-rw-r--r--arch/powerpc/platforms/powernv/subcore.c10
-rw-r--r--arch/powerpc/platforms/powernv/subcore.h2
-rw-r--r--arch/powerpc/platforms/powernv/ultravisor.c1
-rw-r--r--arch/powerpc/platforms/powernv/vas-debug.c64
-rw-r--r--arch/powerpc/platforms/powernv/vas-fault.c245
-rw-r--r--arch/powerpc/platforms/powernv/vas-trace.h4
-rw-r--r--arch/powerpc/platforms/powernv/vas-window.c385
-rw-r--r--arch/powerpc/platforms/powernv/vas.c90
-rw-r--r--arch/powerpc/platforms/powernv/vas.h94
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig12
-rw-r--r--arch/powerpc/platforms/ps3/device-init.c18
-rw-r--r--arch/powerpc/platforms/ps3/gelic_udbg.c2
-rw-r--r--arch/powerpc/platforms/ps3/htab.c4
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c5
-rw-r--r--arch/powerpc/platforms/ps3/mm.c79
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c10
-rw-r--r--arch/powerpc/platforms/ps3/platform.h14
-rw-r--r--arch/powerpc/platforms/ps3/repository.c20
-rw-r--r--arch/powerpc/platforms/ps3/setup.c47
-rw-r--r--arch/powerpc/platforms/ps3/smp.c2
-rw-r--r--arch/powerpc/platforms/ps3/spu.c6
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c29
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig37
-rw-r--r--arch/powerpc/platforms/pseries/Makefile15
-rw-r--r--arch/powerpc/platforms/pseries/cc_platform.c26
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c65
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c34
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c93
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c701
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c2
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c6
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c700
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c343
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S43
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c25
-rw-r--r--arch/powerpc/platforms/pseries/hvcserver.c4
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c19
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c882
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c10
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c155
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c141
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c583
-rw-r--r--arch/powerpc/platforms/pseries/msi.c322
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c2
-rw-r--r--arch/powerpc/platforms/pseries/of_helpers.c2
-rw-r--r--arch/powerpc/platforms/pseries/offline_states.h38
-rw-r--r--arch/powerpc/platforms/pseries/papr_platform_attributes.c362
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c1136
-rw-r--r--arch/powerpc/platforms/pseries/pci.c90
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c16
-rw-r--r--arch/powerpc/platforms/pseries/plpks.c461
-rw-r--r--arch/powerpc/platforms/pseries/plpks.h71
-rw-r--r--arch/powerpc/platforms/pseries/pmem.c10
-rw-r--r--arch/powerpc/platforms/pseries/power.c2
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h22
-rw-r--r--arch/powerpc/platforms/pseries/ras.c260
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c6
-rw-r--r--arch/powerpc/platforms/pseries/rng.c12
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fadump.c25
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c195
-rw-r--r--arch/powerpc/platforms/pseries/setup.c223
-rw-r--r--arch/powerpc/platforms/pseries/smp.c97
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c102
-rw-r--r--arch/powerpc/platforms/pseries/svm.c11
-rw-r--r--arch/powerpc/platforms/pseries/vas-sysfs.c281
-rw-r--r--arch/powerpc/platforms/pseries/vas.c1071
-rw-r--r--arch/powerpc/platforms/pseries/vas.h155
-rw-r--r--arch/powerpc/platforms/pseries/vio.c53
-rw-r--r--arch/powerpc/platforms/pseries/vphn.c3
-rw-r--r--arch/powerpc/purgatory/.gitignore2
-rw-r--r--arch/powerpc/purgatory/Makefile10
-rw-r--r--arch/powerpc/purgatory/kexec-purgatory.S14
-rw-r--r--arch/powerpc/purgatory/trampoline_64.S (renamed from arch/powerpc/purgatory/trampoline.S)55
-rw-r--r--arch/powerpc/sysdev/Kconfig6
-rw-r--r--arch/powerpc/sysdev/Makefile3
-rw-r--r--arch/powerpc/sysdev/cpm2.c9
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c2
-rw-r--r--arch/powerpc/sysdev/cpm_common.c2
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c13
-rw-r--r--arch/powerpc/sysdev/dcr-low.S2
-rw-r--r--arch/powerpc/sysdev/dcr.c2
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c1
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h88
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c147
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c216
-rw-r--r--arch/powerpc/sysdev/fsl_gtm.c4
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c5
-rw-r--r--arch/powerpc/sysdev/fsl_mpic_err.c16
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c26
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c37
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h3
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c20
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c1
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.c6
-rw-r--r--arch/powerpc/sysdev/grackle.c2
-rw-r--r--arch/powerpc/sysdev/i8259.c7
-rw-r--r--arch/powerpc/sysdev/indirect_pci.c1
-rw-r--r--arch/powerpc/sysdev/ipic.c5
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/powerpc/sysdev/mpc5xxx_clocks.c41
-rw-r--r--arch/powerpc/sysdev/mpic.c15
-rw-r--r--arch/powerpc/sysdev/mpic.h10
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c16
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c11
-rw-r--r--arch/powerpc/sysdev/mpic_timer.c8
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c15
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c1
-rw-r--r--arch/powerpc/sysdev/of_rtc.c2
-rw-r--r--arch/powerpc/sysdev/pmi.c3
-rw-r--r--arch/powerpc/sysdev/rtc_cmos_setup.c2
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c11
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c8
-rw-r--r--arch/powerpc/sysdev/udbg_memcons.c2
-rw-r--r--arch/powerpc/sysdev/xics/Kconfig3
-rw-r--r--arch/powerpc/sysdev/xics/Makefile1
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c4
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c3
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c4
-rw-r--r--arch/powerpc/sysdev/xics/ics-native.c254
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c41
-rw-r--r--arch/powerpc/sysdev/xics/ics-rtas.c41
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c143
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c88
-rw-r--r--arch/powerpc/sysdev/xilinx_pci.c132
-rw-r--r--arch/powerpc/sysdev/xive/common.c706
-rw-r--r--arch/powerpc/sysdev/xive/native.c107
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c138
-rw-r--r--arch/powerpc/sysdev/xive/xive-internal.h17
-rwxr-xr-xarch/powerpc/tools/checkpatch.sh1
-rw-r--r--arch/powerpc/tools/head_check.sh32
-rwxr-xr-xarch/powerpc/tools/relocs_check.sh7
-rwxr-xr-xarch/powerpc/tools/unrel_branch_check.sh122
-rw-r--r--arch/powerpc/xmon/Makefile8
-rw-r--r--arch/powerpc/xmon/nonstdio.c2
-rw-r--r--arch/powerpc/xmon/ppc-opc.c2
-rw-r--r--arch/powerpc/xmon/ppc.h2
-rw-r--r--arch/powerpc/xmon/spr_access.S4
-rw-r--r--arch/powerpc/xmon/xmon.c667
-rw-r--r--arch/powerpc/xmon/xmon_bpts.S11
-rw-r--r--arch/powerpc/xmon/xmon_bpts.h14
1301 files changed, 67935 insertions, 58752 deletions
diff --git a/arch/powerpc/Kbuild b/arch/powerpc/Kbuild
index 5e2f9eaa3ee7..22cd0d55a892 100644
--- a/arch/powerpc/Kbuild
+++ b/arch/powerpc/Kbuild
@@ -16,3 +16,6 @@ obj-$(CONFIG_KVM) += kvm/
obj-$(CONFIG_PERF_EVENTS) += perf/
obj-$(CONFIG_KEXEC_CORE) += kexec/
obj-$(CONFIG_KEXEC_FILE) += purgatory/
+
+# for cleaning
+subdir- += boot
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 497b7d0b2d7e..2ca5418457ed 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -9,6 +9,10 @@ config 64BIT
bool
default y if PPC64
+config LIVEPATCH_64
+ def_bool PPC64
+ depends on LIVEPATCH
+
config MMU
bool
default y
@@ -55,15 +59,9 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
default 9 if PPC_16K_PAGES # 9 = 23 (8MB) - 14 (16K)
default 11 # 11 = 23 (8MB) - 12 (4K)
-config HAVE_SETUP_PER_CPU_AREA
- def_bool PPC64
-
-config NEED_PER_CPU_EMBED_FIRST_CHUNK
- def_bool PPC64
-
config NR_IRQS
int "Number of virtual interrupt numbers"
- range 32 32768
+ range 32 1048576
default "512"
help
This defines the number of virtual interrupt numbers the kernel
@@ -84,17 +82,13 @@ config PPC_WATCHDOG
help
This is a placeholder when the powerpc hardlockup detector
watchdog is selected (arch/powerpc/kernel/watchdog.c). It is
- seleted via the generic lockup detector menu which is why we
+ selected via the generic lockup detector menu which is why we
have no standalone config option for it here.
config STACKTRACE_SUPPORT
bool
default y
-config TRACE_IRQFLAGS_SUPPORT
- bool
- default y
-
config LOCKDEP_SUPPORT
bool
default y
@@ -115,61 +109,95 @@ config PPC
# Please keep this list sorted alphabetically.
#
select ARCH_32BIT_OFF_T if PPC32
+ select ARCH_DISABLE_KASAN_INLINE if PPC_RADIX_MMU
+ select ARCH_ENABLE_MEMORY_HOTPLUG
+ select ARCH_ENABLE_MEMORY_HOTREMOVE
+ select ARCH_HAS_COPY_MC if PPC64
+ select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
+ select ARCH_HAS_DEBUG_VM_PGTABLE
+ select ARCH_HAS_DEBUG_WX if STRICT_KERNEL_RWX
select ARCH_HAS_DEVMEM_IS_ALLOWED
- select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_DMA_MAP_DIRECT if PPC_PSERIES
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_KCOV
select ARCH_HAS_HUGEPD if HUGETLB_PAGE
+ select ARCH_HAS_KCOV
+ select ARCH_HAS_MEMBARRIER_CALLBACKS
+ select ARCH_HAS_MEMBARRIER_SYNC_CORE
+ select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_64S_HASH_MMU
select ARCH_HAS_MMIOWB if PPC64
+ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API
select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64
select ARCH_HAS_PTE_SPECIAL
- select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC_BOOK3S_64
- select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !HIBERNATION)
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_STRICT_KERNEL_RWX if (PPC_BOOK3S || PPC_8xx || 40x) && !HIBERNATION
+ select ARCH_HAS_STRICT_KERNEL_RWX if PPC_85xx && !HIBERNATION && !RANDOMIZE_BASE
+ select ARCH_HAS_STRICT_MODULE_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_HAS_SYSCALL_WRAPPER if !SPU_BASE && !COMPAT
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAS_UACCESS_FLUSHCACHE
- select ARCH_HAS_UACCESS_MCSAFE if PPC64
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
+ select ARCH_SPLIT_ARG64 if PPC32
+ select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+ select ARCH_USE_MEMTEST
+ select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS
+ select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select ARCH_WANT_IPC_PARSE_VERSION
+ select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+ select ARCH_WANT_LD_ORPHAN_WARN
+ select ARCH_WANTS_MODULES_DATA_IN_VMALLOC if PPC_BOOK3S_32 || PPC_8xx
+ select ARCH_WANTS_NO_INSTR
select ARCH_WEAK_RELEASE_ACQUIRE
select BINFMT_ELF
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
+ select CPUMASK_OFFSTACK if NR_CPUS >= 8192
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
+ select DMA_OPS_BYPASS if PPC64
+ select DMA_OPS if PPC64
select DYNAMIC_FTRACE if FUNCTION_TRACER
select EDAC_ATOMIC_SCRUB
select EDAC_SUPPORT
select GENERIC_ATOMIC64 if PPC32
- select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC
select GENERIC_EARLY_IOREMAP
+ select GENERIC_GETTIMEOFDAY
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_PCI_IOMAP if PCI
+ select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_STRNCPY_FROM_USER
- select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
+ select GENERIC_VDSO_TIME_NS
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU
+ select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
+ select HAVE_ARCH_HUGE_VMAP if PPC_RADIX_MMU || PPC_8xx
select HAVE_ARCH_JUMP_LABEL
- select HAVE_ARCH_KASAN if PPC32
- select HAVE_ARCH_KASAN_VMALLOC if PPC32
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14
+ select HAVE_ARCH_KASAN if PPC_RADIX_MMU
+ select HAVE_ARCH_KASAN if PPC_BOOK3E_64
+ select HAVE_ARCH_KASAN_VMALLOC if HAVE_ARCH_KASAN
+ select HAVE_ARCH_KFENCE if ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
@@ -177,28 +205,28 @@ config PPC
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ASM_MODVERSIONS
+ select HAVE_CONTEXT_TRACKING_USER if PPC64
select HAVE_C_RECORDMCOUNT
- select HAVE_CBPF_JIT if !PPC64
- select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
- select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
- select HAVE_CONTEXT_TRACKING if PPC64
- select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE
- select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
- select HAVE_EBPF_JIT if PPC64
- select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
+ select HAVE_DYNAMIC_FTRACE_WITH_ARGS if MPROFILE_KERNEL || PPC32
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL || PPC32
+ select HAVE_EBPF_JIT
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_FAST_GUP
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1
select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC
+ select HAVE_GENERIC_VDSO
+ select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC_BOOK3S_64 && SMP
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
- select HAVE_IDE
select HAVE_IOREMAP_PROT
- select HAVE_IRQ_EXIT_ON_IRQ_STACK
+ select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE
select HAVE_KERNEL_LZO if DEFAULT_UIMAGE
@@ -208,30 +236,35 @@ config PPC
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
- select HAVE_MEMBLOCK_NODE_MAP
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
- select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
- select HAVE_OPROFILE
- select HAVE_OPTPROBES if PPC64
+ select HAVE_OPTPROBES
select HAVE_PERF_EVENTS
select HAVE_PERF_EVENTS_NMI if PPC64
- select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
- select MMU_GATHER_RCU_TABLE_FREE
- select MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
+ select HAVE_RELIABLE_STACKTRACE
+ select HAVE_RSEQ
+ select HAVE_SETUP_PER_CPU_AREA if PPC64
+ select HAVE_SOFTIRQ_ON_OWN_STACK
+ select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
+ select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
+ select HAVE_STATIC_CALL if PPC32
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
- select HAVE_IRQ_TIME_ACCOUNTING
- select HAVE_RSEQ
+ select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE
select IOMMU_HELPER if PPC64
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
+ select KASAN_VMALLOC if KASAN && MODULES
+ select MMU_GATHER_PAGE_SIZE
+ select MMU_GATHER_RCU_TABLE_FREE
+ select MMU_GATHER_MERGE_VMAS
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE
+ select NEED_PER_CPU_EMBED_FIRST_CHUNK if PPC64
+ select NEED_PER_CPU_PAGE_FIRST_CHUNK if PPC64
select NEED_SG_DMA_LENGTH
select OF
select OF_DMA_DEFAULT_COHERENT if !NOT_COHERENT_CACHE
@@ -239,21 +272,27 @@ config PPC
select OLD_SIGACTION if PPC32
select OLD_SIGSUSPEND
select PCI_DOMAINS if PCI
+ select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
select PCI_SYSCALL if PCI
select PPC_DAWR if PPC64
select RTC_LIB
select SPARSE_IRQ
+ select STRICT_KERNEL_RWX if STRICT_MODULE_RWX
select SYSCTL_EXCEPTION_TRACE
select THREAD_INFO_IN_TASK
- select VIRT_TO_BUS if !PPC64
+ select TRACE_IRQFLAGS_SUPPORT
#
# Please keep this list sorted alphabetically.
#
+config PPC_LONG_DOUBLE_128
+ depends on PPC64 && ALTIVEC
+ def_bool $(success,test "$(shell,echo __LONG_DOUBLE_128__ | $(CC) -E -P -)" = 1)
+
config PPC_BARRIER_NOSPEC
bool
default y
- depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
+ depends on PPC_BOOK3S_64 || PPC_E500
config EARLY_PRINTK
bool
@@ -264,17 +303,13 @@ config PANIC_TIMEOUT
default 180
config COMPAT
- bool
- default y if PPC64
- select COMPAT_BINFMT_ELF
+ bool "Enable support for 32bit binaries"
+ depends on PPC64
+ depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
+ default y if !CPU_LITTLE_ENDIAN
select ARCH_WANT_OLD_COMPAT_IPC
select COMPAT_OLD_SIGACTION
-config SYSVIPC_COMPAT
- bool
- depends on COMPAT && SYSVIPC
- default y
-
config SCHED_OMIT_FRAME_POINTER
bool
default y
@@ -299,6 +334,10 @@ config GENERIC_BUG
default y
depends on BUG
+config GENERIC_BUG_RELATIVE_POINTERS
+ def_bool y
+ depends on GENERIC_BUG
+
config SYS_SUPPORTS_APM_EMULATION
default y if PMAC_APM_EMU
bool
@@ -327,6 +366,10 @@ config ARCH_SUSPEND_NONZERO_CPU
def_bool y
depends on PPC_POWERNV || PPC_PSERIES
+config ARCH_HAS_ADD_PAGES
+ def_bool y
+ depends on ARCH_ENABLE_MEMORY_HOTPLUG
+
config PPC_DCR_NATIVE
bool
@@ -338,15 +381,22 @@ config PPC_DCR
depends on PPC_DCR_NATIVE || PPC_DCR_MMIO
default y
+config PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
+ depends on PPC32
+ depends on !PPC_PMAC && !PPC_CHRP
+ bool "Assign PCI bus numbers from zero individually for each PCI domain"
+ help
+ By default on PPC32 were PCI bus numbers unique across all PCI domains.
+ So system could have only 256 PCI buses independently of available
+ PCI domains. When this option is enabled then PCI bus numbers are
+ PCI domain dependent and each PCI controller on own domain can have
+ 256 PCI buses, like it is on other Linux architectures.
+
config PPC_OF_PLATFORM_PCI
bool
depends on PCI
depends on PPC64 # not supported on 32 bits yet
-config ARCH_SUPPORTS_DEBUG_PAGEALLOC
- depends on PPC32 || PPC_BOOK3S_64
- def_bool y
-
config ARCH_SUPPORTS_UPROBES
def_bool y
@@ -380,10 +430,6 @@ config PPC_ADV_DEBUG_DAC_RANGE
config PPC_DAWR
bool
-config ZONE_DMA
- bool
- default y if PPC_BOOK3E_64
-
config PGTABLE_LEVELS
int
default 2 if !PPC64
@@ -397,17 +443,14 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
depends on PPC32
+ select KMAP_LOCAL
source "kernel/Kconfig.hz"
-config HUGETLB_PAGE_SIZE_VARIABLE
- bool
- depends on HUGETLB_PAGE && PPC_BOOK3S_64
- default y
-
config MATH_EMULATION
bool "Math emulation"
- depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE
+ depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE || PPC_MICROWATT
+ select PPC_FPU_REGS
help
Some PowerPC chips designed for embedded applications do not have
a floating-point unit and therefore do not implement the
@@ -425,7 +468,7 @@ choice
default MATH_EMULATION_FULL
depends on MATH_EMULATION
-config MATH_EMULATION_FULL
+config MATH_EMULATION_FULL
bool "Emulate all the floating point instructions"
help
Select this option will enable the kernel to support to emulate
@@ -473,7 +516,7 @@ config LD_HEAD_STUB_CATCH
If unsure, say "N".
config MPROFILE_KERNEL
- depends on PPC64 && CPU_LITTLE_ENDIAN
+ depends on PPC64 && CPU_LITTLE_ENDIAN && FUNCTION_TRACER
def_bool $(success,$(srctree)/arch/powerpc/tools/gcc-check-mprofile-kernel.sh $(CC) -I$(srctree)/include -D__KERNEL__)
config HOTPLUG_CPU
@@ -486,16 +529,19 @@ config HOTPLUG_CPU
Say N if you are unsure.
+config PPC_QUEUED_SPINLOCKS
+ bool "Queued spinlocks" if EXPERT
+ depends on SMP
+ default PPC_BOOK3S_64
+ help
+ Say Y here to use queued spinlocks which give better scalability and
+ fairness on large SMP and NUMA systems without harming single threaded
+ performance.
+
config ARCH_CPU_PROBE_RELEASE
def_bool y
depends on HOTPLUG_CPU
-config ARCH_ENABLE_MEMORY_HOTPLUG
- def_bool y
-
-config ARCH_ENABLE_MEMORY_HOTREMOVE
- def_bool y
-
config PPC64_SUPPORTS_MEMORY_FAILURE
bool "Add support for memory hwpoison"
depends on PPC_BOOK3S_64
@@ -504,7 +550,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
config KEXEC
bool "kexec system call"
- depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) || PPC_BOOK3E
+ depends on PPC_BOOK3S || PPC_E500 || (44x && !SMP)
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
@@ -523,8 +569,7 @@ config KEXEC
config KEXEC_FILE
bool "kexec file based system call"
select KEXEC_CORE
- select HAVE_IMA_KEXEC
- select BUILD_BIN2C
+ select HAVE_IMA_KEXEC if IMA
select KEXEC_ELF
depends on PPC64
depends on CRYPTO=y
@@ -540,9 +585,8 @@ config ARCH_HAS_KEXEC_PURGATORY
config RELOCATABLE
bool "Build a relocatable kernel"
- depends on PPC64 || (FLATMEM && (44x || FSL_BOOKE))
+ depends on PPC64 || (FLATMEM && (44x || PPC_85xx))
select NONSTATIC_KERNEL
- select MODULE_REL_CRCS if MODVERSIONS
help
This builds a kernel image that is capable of running at the
location the kernel is loaded at. For ppc32, there is no any
@@ -564,7 +608,7 @@ config RELOCATABLE
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
- depends on (FSL_BOOKE && FLATMEM && PPC32)
+ depends on PPC_85xx && FLATMEM
depends on RELOCATABLE
help
Randomizes the virtual address at which the kernel image is
@@ -583,8 +627,8 @@ config RELOCATABLE_TEST
config CRASH_DUMP
bool "Build a dump capture kernel"
- depends on PPC64 || PPC_BOOK3S_32 || FSL_BOOKE || (44x && !SMP)
- select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
+ depends on PPC64 || PPC_BOOK3S_32 || PPC_85xx || (44x && !SMP)
+ select RELOCATABLE if PPC64 || 44x || PPC_85xx
help
Build a kernel suitable for use as a dump capture kernel.
The same kernel binary can be used as production kernel and dump
@@ -634,18 +678,21 @@ config IRQ_ALL_CPUS
reported with SMP Power Macintoshes with this option enabled.
config NUMA
- bool "NUMA support"
- depends on PPC64
- default y if SMP && PPC_PSERIES
+ bool "NUMA Memory Allocation and Scheduler Support"
+ depends on PPC64 && SMP
+ default y if PPC_PSERIES || PPC_POWERNV
+ select USE_PERCPU_NUMA_NODE_ID
+ help
+ Enable NUMA (Non-Uniform Memory Access) support.
+
+ The kernel will try to allocate memory used by a CPU on the
+ local memory controller of the CPU and add some more
+ NUMA awareness to the kernel.
config NODES_SHIFT
int
default "8" if PPC64
default "4"
- depends on NEED_MULTIPLE_NODES
-
-config USE_PERCPU_NUMA_NODE_ID
- def_bool y
depends on NUMA
config HAVE_MEMORYLESS_NODES
@@ -669,9 +716,6 @@ config ARCH_SPARSEMEM_DEFAULT
def_bool y
depends on PPC_BOOK3S_64
-config SYS_SUPPORTS_HUGETLBFS
- bool
-
config ILLEGAL_POINTER_VALUE
hex
# This is roughly half way between the top of user space and the bottom
@@ -683,29 +727,9 @@ config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
-# Some NUMA nodes have memory ranges that span
-# other nodes. Even though a pfn is valid and
-# between a node's start and end pfns, it may not
-# reside on that node. See memmap_init_zone()
-# for details.
-config NODES_SPAN_OTHER_NODES
- def_bool y
- depends on NEED_MULTIPLE_NODES
-
-config STDBINUTILS
- bool "Using standard binutils settings"
- depends on 44x
- default y
- help
- Turning this option off allows you to select 256KB PAGE_SIZE on 44x.
- Note, that kernel will be able to run only those applications,
- which had been compiled using binutils later than 2.17.50.0.3 with
- '-zmax-page-size' set to 256K (the default is 64K). Or, if using
- the older binutils, you can patch them with a trivial patch, which
- changes the ELF_MAXPAGESIZE definition from 0x10000 to 0x40000.
-
choice
prompt "Page size"
+ default PPC_64K_PAGES if PPC_BOOK3S_64
default PPC_4K_PAGES
help
Select the kernel logical page size. Increasing the page size
@@ -743,20 +767,34 @@ config PPC_64K_PAGES
select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
config PPC_256K_PAGES
- bool "256k page size"
- depends on 44x && !STDBINUTILS
+ bool "256k page size (Requires non-standard binutils settings)"
+ depends on 44x && !PPC_47x
help
Make the page size 256k.
- As the ELF standard only requires alignment to support page
- sizes up to 64k, you will need to compile all of your user
- space applications with a non-standard binutils settings
- (see the STDBINUTILS description for details).
-
- Say N unless you know what you are doing.
+ The kernel will only be able to run applications that have been
+ compiled with '-zmax-page-size' set to 256K (the default is 64K) using
+ binutils later than 2.17.50.0.3, or by patching the ELF_MAXPAGESIZE
+ definition from 0x10000 to 0x40000 in older versions.
endchoice
+config PAGE_SIZE_4KB
+ def_bool y
+ depends on PPC_4K_PAGES
+
+config PAGE_SIZE_16KB
+ def_bool y
+ depends on PPC_16K_PAGES
+
+config PAGE_SIZE_64KB
+ def_bool y
+ depends on PPC_64K_PAGES
+
+config PAGE_SIZE_256KB
+ def_bool y
+ depends on PPC_256K_PAGES
+
config PPC_PAGE_SHIFT
int
default 18 if PPC_256K_PAGES
@@ -774,36 +812,12 @@ config THREAD_SHIFT
Used to define the stack size. The default is almost always what you
want. Only change this if you know what you are doing.
-config ETEXT_SHIFT_BOOL
- bool "Set custom etext alignment" if STRICT_KERNEL_RWX && \
- (PPC_BOOK3S_32 || PPC_8xx)
- depends on ADVANCED_OPTIONS
- help
- This option allows you to set the kernel end of text alignment. When
- RAM is mapped by blocks, the alignment needs to fit the size and
- number of possible blocks. The default should be OK for most configs.
-
- Say N here unless you know what you are doing.
-
-config ETEXT_SHIFT
- int "_etext shift" if ETEXT_SHIFT_BOOL
- range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
- range 19 23 if STRICT_KERNEL_RWX && PPC_8xx
- default 17 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
- default 19 if STRICT_KERNEL_RWX && PPC_8xx
- default PPC_PAGE_SHIFT
- help
- On Book3S 32 (603+), IBATs are used to map kernel text.
- Smaller is the alignment, greater is the number of necessary IBATs.
-
- On 8xx, large pages (512kb or 8M) are used to map kernel linear
- memory. Aligning to 8M reduces TLB misses as only 8M pages are used
- in that case.
-
config DATA_SHIFT_BOOL
- bool "Set custom data alignment" if STRICT_KERNEL_RWX && \
- (PPC_BOOK3S_32 || PPC_8xx)
+ bool "Set custom data alignment"
depends on ADVANCED_OPTIONS
+ depends on STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE
+ depends on PPC_BOOK3S_32 || (PPC_8xx && !PIN_TLB_DATA && !STRICT_KERNEL_RWX) || \
+ PPC_85xx
help
This option allows you to set the kernel data alignment. When
RAM is mapped by blocks, the alignment needs to fit the size and
@@ -814,10 +828,15 @@ config DATA_SHIFT_BOOL
config DATA_SHIFT
int "Data shift" if DATA_SHIFT_BOOL
default 24 if STRICT_KERNEL_RWX && PPC64
- range 17 28 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
- range 19 23 if STRICT_KERNEL_RWX && PPC_8xx
+ range 17 28 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
+ range 19 23 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
+ range 20 24 if (STRICT_KERNEL_RWX || DEBUG_PAGEALLOC || KFENCE) && PPC_85xx
default 22 if STRICT_KERNEL_RWX && PPC_BOOK3S_32
+ default 18 if (DEBUG_PAGEALLOC || KFENCE) && PPC_BOOK3S_32
default 23 if STRICT_KERNEL_RWX && PPC_8xx
+ default 23 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx && PIN_TLB_DATA
+ default 19 if (DEBUG_PAGEALLOC || KFENCE) && PPC_8xx
+ default 24 if STRICT_KERNEL_RWX && PPC_85xx
default PPC_PAGE_SHIFT
help
On Book3S 32 (603+), DBATs are used to map kernel text and rodata RO.
@@ -825,9 +844,10 @@ config DATA_SHIFT
On 8xx, large pages (512kb or 8M) are used to map kernel linear
memory. Aligning to 8M reduces TLB misses as only 8M pages are used
- in that case.
+ in that case. If PIN_TLB is selected, it must be aligned to 8M as
+ 8M pages will be pinned.
-config FORCE_MAX_ZONEORDER
+config ARCH_FORCE_MAX_ORDER
int "Maximum zone order"
range 8 9 if PPC64 && PPC_64K_PAGES
default "9" if PPC64 && PPC_64K_PAGES
@@ -857,13 +877,28 @@ config FORCE_MAX_ZONEORDER
this in mind when choosing a value for this option.
config PPC_SUBPAGE_PROT
- bool "Support setting protections for 4k subpages"
- depends on PPC_BOOK3S_64 && PPC_64K_PAGES
+ bool "Support setting protections for 4k subpages (subpage_prot syscall)"
+ default n
+ depends on PPC_64S_HASH_MMU && PPC_64K_PAGES
help
- This option adds support for a system call to allow user programs
+ This option adds support for system call to allow user programs
to set access permissions (read/write, readonly, or no access)
on the 4k subpages of each 64k page.
+ If unsure, say N here.
+
+config PPC_PROT_SAO_LPAR
+ bool "Support PROT_SAO mappings in LPARs"
+ depends on PPC_BOOK3S_64
+ help
+ This option adds support for PROT_SAO mappings from userspace
+ inside LPARs on supported CPUs.
+
+ This may cause issues when performing guest migration from
+ a CPU that supports SAO to one that does not.
+
+ If unsure, say N here.
+
config PPC_COPRO_BASE
bool
@@ -883,12 +918,8 @@ config PPC_DENORMALISATION
Add support for handling denormalisation of single precision
values. Useful for bare metal only. If unsure say Y here.
-config CMDLINE_BOOL
- bool "Default bootloader kernel arguments"
-
config CMDLINE
- string "Initial kernel command string" if CMDLINE_BOOL
- default "console=ttyS0,9600 console=tty0 root=/dev/sda2" if CMDLINE_BOOL
+ string "Initial kernel command string"
default ""
help
On some platforms, there is currently no way for the boot loader to
@@ -942,27 +973,11 @@ config ARCH_WANTS_FREEZER_CONTROL
source "kernel/power/Kconfig"
-config SECCOMP
- bool "Enable seccomp to safely compute untrusted bytecode"
- depends on PROC_FS
- default y
- help
- This kernel feature is useful for number crunching applications
- that may need to compute untrusted bytecode during their
- execution. By using pipes or other transports made available to
- the process as file descriptors supporting the read/write
- syscalls, it's possible to isolate those applications in
- their own address space using seccomp. Once seccomp is
- enabled via /proc/<pid>/seccomp, it cannot be disabled
- and the task is only allowed to execute a few safe syscalls
- defined by each seccomp mode.
-
- If unsure, say Y. Only embedded should say N here.
-
config PPC_MEM_KEYS
prompt "PowerPC Memory Protection Keys"
def_bool y
depends on PPC_BOOK3S_64
+ depends on PPC_64S_HASH_MMU
select ARCH_USES_HIGH_VMA_FLAGS
select ARCH_HAS_PKEYS
help
@@ -977,8 +992,9 @@ config PPC_MEM_KEYS
config PPC_SECURE_BOOT
prompt "Enable secure boot support"
bool
- depends on PPC_POWERNV
+ depends on PPC_POWERNV || PPC_PSERIES
depends on IMA_ARCH_POLICY
+ imply IMA_SECURE_AND_OR_TRUSTED_BOOT
help
Systems with firmware secure boot enabled need to define security
policies to extend secure boot to the OS. This config allows a user
@@ -996,6 +1012,19 @@ config PPC_SECVAR_SYSFS
read/write operations on these variables. Say Y if you have
secure boot enabled and want to expose variables to userspace.
+config PPC_RTAS_FILTER
+ bool "Enable filtering of RTAS syscalls"
+ default y
+ depends on PPC_RTAS
+ help
+ The RTAS syscall API has security issues that could be used to
+ compromise system integrity. This option enforces restrictions on the
+ RTAS calls and arguments passed by userspace programs to mitigate
+ these issues.
+
+ Say Y unless you know what you are doing and the filter is causing
+ problems for you.
+
endmenu
config ISA_DMA_API
@@ -1123,7 +1152,7 @@ config LOWMEM_SIZE
config LOWMEM_CAM_NUM_BOOL
bool "Set number of CAMs to use to map low memory"
- depends on ADVANCED_OPTIONS && FSL_BOOKE
+ depends on ADVANCED_OPTIONS && PPC_85xx
help
This option allows you to set the maximum number of CAM slots that
will be used to map low memory. There are a limited number of slots
@@ -1134,13 +1163,16 @@ config LOWMEM_CAM_NUM_BOOL
Say N here unless you know what you are doing.
config LOWMEM_CAM_NUM
- depends on FSL_BOOKE
+ depends on PPC_85xx
int "Number of CAMs to use to map low memory" if LOWMEM_CAM_NUM_BOOL
- default 3
+ default 3 if !STRICT_KERNEL_RWX
+ default 9 if DATA_SHIFT >= 24
+ default 12 if DATA_SHIFT >= 22
+ default 15
config DYNAMIC_MEMSTART
bool "Enable page aligned dynamic load address for kernel"
- depends on ADVANCED_OPTIONS && FLATMEM && (FSL_BOOKE || 44x)
+ depends on ADVANCED_OPTIONS && FLATMEM && (PPC_85xx || 44x)
select NONSTATIC_KERNEL
help
This option enables the kernel to be loaded at any page aligned
@@ -1189,7 +1221,7 @@ config KERNEL_START
config PHYSICAL_START_BOOL
bool "Set physical address where the kernel is loaded"
- depends on ADVANCED_OPTIONS && FLATMEM && FSL_BOOKE
+ depends on ADVANCED_OPTIONS && FLATMEM && PPC_85xx
help
This gives the physical address where the kernel is loaded.
@@ -1202,7 +1234,7 @@ config PHYSICAL_START
config PHYSICAL_ALIGN
hex
- default "0x04000000" if FSL_BOOKE
+ default "0x04000000" if PPC_85xx
help
This value puts the alignment restrictions on physical address
where kernel is loaded and run from. Kernel is compiled for an
@@ -1221,27 +1253,8 @@ config TASK_SIZE_BOOL
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
default "0x80000000" if PPC_8xx
+ default "0xb0000000" if PPC_BOOK3S_32
default "0xc0000000"
-
-config PIN_TLB
- bool "Pinned Kernel TLBs (860 ONLY)"
- depends on ADVANCED_OPTIONS && PPC_8xx && \
- !DEBUG_PAGEALLOC && !STRICT_KERNEL_RWX
-
-config PIN_TLB_DATA
- bool "Pinned TLB for DATA"
- depends on PIN_TLB
- default y
-
-config PIN_TLB_IMMR
- bool "Pinned TLB for IMMR"
- depends on PIN_TLB || PPC_EARLY_DEBUG_CPM
- default y
-
-config PIN_TLB_TEXT
- bool "Pinned TLB for TEXT"
- depends on PIN_TLB
- default y
endmenu
if PPC64
@@ -1257,9 +1270,6 @@ config PHYSICAL_START
default "0x00000000"
endif
-config ARCH_RANDOM
- def_bool n
-
config PPC_LIB_RHEAP
bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 0b063830eea8..6aaf8dc60610 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -84,10 +84,16 @@ config MSI_BITMAP_SELFTEST
config PPC_IRQ_SOFT_MASK_DEBUG
bool "Include extra checks for powerpc irq soft masking"
+ depends on PPC64
+
+config PPC_RFI_SRR_DEBUG
+ bool "Include extra checks for RFI SRR register validity"
+ depends on PPC_BOOK3S_64
config XMON
bool "Include xmon kernel debugger"
depends on DEBUG_KERNEL
+ select CONSOLE_POLL if SERIAL_CPM_CONSOLE
help
Include in-kernel hooks for the xmon kernel monitor/debugger.
Unless you are intending to debug the kernel, say N here.
@@ -230,7 +236,7 @@ config PPC_EARLY_DEBUG_40x
help
Select this to enable early debugging for IBM 40x chips via the
inbuilt serial port. This works on chips with a 16550 compatible
- UART. Xilinx chips with uartlite cannot use this option.
+ UART.
config PPC_EARLY_DEBUG_CPM
bool "Early serial debugging for Freescale CPM-based serial ports"
@@ -277,6 +283,12 @@ config PPC_EARLY_DEBUG_MEMCONS
This console provides input and output buffers stored within the
kernel BSS and should be safe to select on any system. A debugger
can then be used to read kernel output or send input to the console.
+
+config PPC_EARLY_DEBUG_16550
+ bool "Serial 16550"
+ depends on PPC_UDBG_16550
+ help
+ Select this to enable early debugging via Serial 16550 console
endchoice
config PPC_MEMCONS_OUTPUT_SIZE
@@ -299,7 +311,6 @@ config PPC_EARLY_DEBUG_OPAL
def_bool y
depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
-
config PPC_EARLY_DEBUG_HVSI_VTERMNO
hex "vterm number to use with early debug HVSI"
depends on PPC_EARLY_DEBUG_LPAR_HVSI
@@ -349,45 +360,25 @@ config PPC_EARLY_DEBUG_CPM_ADDR
platform probing is done, all platforms selected must
share the same address.
+config PPC_EARLY_DEBUG_16550_PHYSADDR
+ hex "Early debug Serial 16550 physical address"
+ depends on PPC_EARLY_DEBUG_16550
+
+config PPC_EARLY_DEBUG_16550_STRIDE
+ int "Early debug Serial 16550 stride"
+ depends on PPC_EARLY_DEBUG_16550
+ default 1
+
config FAIL_IOMMU
bool "Fault-injection capability for IOMMU"
depends on FAULT_INJECTION
+ depends on PCI || IBMVIO
help
Provide fault-injection capability for IOMMU. Each device can
be selectively enabled via the fail_iommu property.
If you are unsure, say N.
-config PPC_PTDUMP
- bool "Export kernel pagetable layout to userspace via debugfs"
- depends on DEBUG_KERNEL && DEBUG_FS
- help
- This option exports the state of the kernel pagetables to a
- debugfs file. This is only useful for kernel developers who are
- working in architecture specific areas of the kernel - probably
- not a good idea to enable this feature in a production kernel.
-
- If you are unsure, say N.
-
-config PPC_DEBUG_WX
- bool "Warn on W+X mappings at boot"
- depends on PPC_PTDUMP && STRICT_KERNEL_RWX
- help
- Generate a warning if any W+X mappings are found at boot.
-
- This is useful for discovering cases where the kernel is leaving
- W+X mappings after applying NX, as such mappings are a security risk.
-
- Note that even if the check fails, your kernel is possibly
- still fine, as W+X mappings are not a security hole in
- themselves, what they do is that they make the exploitation
- of other unfixed kernel bugs easier.
-
- There is no runtime or memory usage effect of this option
- once the kernel has booted up - it's a one time check.
-
- If in doubt, say "Y".
-
config PPC_FAST_ENDIAN_SWITCH
bool "Deprecated fast endian-switch syscall"
depends on DEBUG_KERNEL && PPC_BOOK3S_64
@@ -397,4 +388,6 @@ config PPC_FAST_ENDIAN_SWITCH
config KASAN_SHADOW_OFFSET
hex
depends on KASAN
- default 0xe0000000
+ default 0xe0000000 if PPC32
+ default 0xa80e000000000000 if PPC_BOOK3S_64
+ default 0xa8001c0000000000 if PPC_BOOK3E_64
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index f35730548e42..dc4cbf0a5ca9 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -1,7 +1,5 @@
# This file is included by the global makefile so that you can add your own
-# architecture-specific flags and dependencies. Remember to do have actions
-# for "archclean" and "archdep" for cleaning up and making dependencies for
-# this architecture.
+# architecture-specific flags and dependencies.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
@@ -17,23 +15,6 @@ HAS_BIARCH := $(call cc-option-yn, -m32)
# Set default 32 bits cross compilers for vdso and boot wrapper
CROSS32_COMPILE ?=
-ifeq ($(HAS_BIARCH),y)
-ifeq ($(CROSS32_COMPILE),)
-ifdef CONFIG_PPC32
-# These options will be overridden by any -mcpu option that the CPU
-# or platform code sets later on the command line, but they are needed
-# to set a sane 32-bit cpu target for the 64-bit cross compiler which
-# may default to the wrong ISA.
-KBUILD_CFLAGS += -mcpu=powerpc
-KBUILD_AFLAGS += -mcpu=powerpc
-endif
-endif
-endif
-
-ifdef CONFIG_PPC_BOOK3S_32
-KBUILD_CFLAGS += -mcpu=powerpc
-endif
-
# If we're on a ppc/ppc64/ppc64le machine use that defconfig, otherwise just use
# ppc64_defconfig because we have nothing better to go on.
uname := $(shell uname -m)
@@ -65,8 +46,7 @@ UTS_MACHINE := $(subst $(space),,$(machine-y))
ifdef CONFIG_PPC32
KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
else
-KBUILD_LDS_MODULE += $(srctree)/arch/powerpc/kernel/module.lds
-ifeq ($(call ld-ifversion, -ge, 225000000, y),y)
+ifeq ($(call ld-ifversion, -ge, 22500, y),y)
# Have the linker provide sfpr if possible.
# There is a corresponding test in arch/powerpc/lib/Makefile
KBUILD_LDFLAGS_MODULE += --save-restore-funcs
@@ -92,10 +72,10 @@ endif
ifdef CONFIG_PPC64
ifndef CONFIG_CC_IS_CLANG
-cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
-cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
-aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
-aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
+cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1)
+cflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mcall-aixdesc)
+aflags-$(CONFIG_PPC64_ELF_ABI_V1) += $(call cc-option,-mabi=elfv1)
+aflags-$(CONFIG_PPC64_ELF_ABI_V2) += -mabi=elfv2
endif
endif
@@ -123,8 +103,8 @@ endif
LDFLAGS_vmlinux-y := -Bstatic
LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
+LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) += -z notext
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
-LDFLAGS_vmlinux += $(call ld-option,--orphan-handling=warn)
ifdef CONFIG_PPC64
ifeq ($(call cc-option-yn,-mcmodel=medium),y)
@@ -144,7 +124,7 @@ endif
CFLAGS-$(CONFIG_PPC64) := $(call cc-option,-mtraceback=no)
ifndef CONFIG_CC_IS_CLANG
-ifdef CONFIG_CPU_LITTLE_ENDIAN
+ifdef CONFIG_PPC64_ELF_ABI_V2
CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
else
@@ -169,12 +149,13 @@ CFLAGS-$(CONFIG_PPC32) += $(call cc-option,-mno-readonly-in-sdata)
ifdef CONFIG_PPC_BOOK3S_64
ifdef CONFIG_CPU_LITTLE_ENDIAN
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power8
-CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power9,-mtune=power8)
else
-CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,$(call cc-option,-mtune=power5))
-CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mcpu=power5,-mcpu=power4)
+CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=power4
endif
-else
+CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power10, \
+ $(call cc-option,-mtune=power9, \
+ $(call cc-option,-mtune=power8)))
+else ifdef CONFIG_PPC_BOOK3E_64
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
endif
@@ -183,33 +164,14 @@ CC_FLAGS_FTRACE := -pg
ifdef CONFIG_MPROFILE_KERNEL
CC_FLAGS_FTRACE += -mprofile-kernel
endif
-# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8
-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199
-# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828
-ifndef CONFIG_CC_IS_CLANG
-CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog)
-endif
endif
CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
+AFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
-# Altivec option not allowed with e500mc64 in GCC.
-ifdef CONFIG_ALTIVEC
-E5500_CPU := -mcpu=powerpc64
-else
-E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
-endif
-CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU)
+CFLAGS-$(CONFIG_E5500_CPU) += $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
-ifdef CONFIG_PPC32
-ifdef CONFIG_PPC_E500MC
-CFLAGS-y += $(call cc-option,-mcpu=e500mc,-mcpu=powerpc)
-else
-CFLAGS-$(CONFIG_E500) += $(call cc-option,-mcpu=8540 -msoft-float,-mcpu=powerpc)
-endif
-endif
-
asinstr := $(call as-instr,lis 9$(comma)foo@high,-DHAVE_AS_ATHIGH=1)
KBUILD_CPPFLAGS += -I $(srctree)/arch/$(ARCH) $(asinstr)
@@ -230,33 +192,38 @@ ifdef CONFIG_476FPE_ERR46
-T $(srctree)/arch/powerpc/platforms/44x/ppc476_modules.lds
endif
-# No AltiVec or VSX instructions when building kernel
+# No prefix or pcrel
+KBUILD_CFLAGS += $(call cc-option,-mno-prefixed)
+KBUILD_CFLAGS += $(call cc-option,-mno-pcrel)
+
+# No AltiVec or VSX or MMA instructions when building kernel
KBUILD_CFLAGS += $(call cc-option,-mno-altivec)
KBUILD_CFLAGS += $(call cc-option,-mno-vsx)
+KBUILD_CFLAGS += $(call cc-option,-mno-mma)
# No SPE instruction when building kernel
# (We use all available options to help semi-broken compilers)
KBUILD_CFLAGS += $(call cc-option,-mno-spe)
KBUILD_CFLAGS += $(call cc-option,-mspe=no)
-# FIXME: the module load should be taught about the additional relocs
-# generated by this.
-# revert to pre-gcc-4.4 behaviour of .eh_frame
-KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
+# Don't emit .eh_frame since we have no use for it
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# Never use string load/store instructions as they are
# often slow when they are implemented at all
KBUILD_CFLAGS += $(call cc-option,-mno-string)
-cpu-as-$(CONFIG_4xx) += -Wa,-m405
+cpu-as-$(CONFIG_40x) += -Wa,-m405
+cpu-as-$(CONFIG_44x) += -Wa,-m440
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
-cpu-as-$(CONFIG_E200) += -Wa,-me200
-cpu-as-$(CONFIG_E500) += -Wa,-me500
+cpu-as-$(CONFIG_PPC_E500) += -Wa,-me500
# When using '-many -mpower4' gas will first try and find a matching power4
# mnemonic and failing that it will allow any valid mnemonic that GAS knows
# about. GCC will pass -many to GAS when assembling, clang does not.
-cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 -Wa,-many
+# LLVM IAS doesn't understand either flag: https://github.com/ClangBuiltLinux/linux/issues/675
+# but LLVM IAS only supports ISA >= 2.06 for Book3S 64 anyway...
+cpu-as-$(CONFIG_PPC_BOOK3S_64) += $(call as-option,-Wa$(comma)-mpower4) $(call as-option,-Wa$(comma)-many)
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
KBUILD_AFLAGS += $(cpu-as-y)
@@ -265,22 +232,6 @@ KBUILD_CFLAGS += $(cpu-as-y)
KBUILD_AFLAGS += $(aflags-y)
KBUILD_CFLAGS += $(cflags-y)
-head-y := arch/powerpc/kernel/head_$(BITS).o
-head-$(CONFIG_PPC_8xx) := arch/powerpc/kernel/head_8xx.o
-head-$(CONFIG_40x) := arch/powerpc/kernel/head_40x.o
-head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o
-head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
-
-head-$(CONFIG_PPC64) += arch/powerpc/kernel/entry_64.o
-head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
-head-$(CONFIG_ALTIVEC) += arch/powerpc/kernel/vector.o
-head-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += arch/powerpc/kernel/prom_init.o
-
-# See arch/powerpc/Kbuild for content of core part of the kernel
-core-y += arch/powerpc/
-
-drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
-
# Default to zImage, override when needed
all: zImage
@@ -298,6 +249,7 @@ $(BOOT_TARGETS2): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
+PHONY += bootwrapper_install
bootwrapper_install:
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
@@ -376,11 +328,26 @@ ppc64le_allmodconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/le.config \
-f $(srctree)/Makefile allmodconfig
+PHONY += ppc64le_allnoconfig
+ppc64le_allnoconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/ppc64le.config \
+ -f $(srctree)/Makefile allnoconfig
+
PHONY += ppc64_book3e_allmodconfig
ppc64_book3e_allmodconfig:
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/85xx-64bit.config \
-f $(srctree)/Makefile allmodconfig
+PHONY += ppc32_randconfig
+ppc32_randconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/32-bit.config \
+ -f $(srctree)/Makefile randconfig
+
+PHONY += ppc64_randconfig
+ppc64_randconfig:
+ $(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/powerpc/configs/64-bit.config \
+ -f $(srctree)/Makefile randconfig
+
define archhelp
@echo '* zImage - Build default images selected by kernel config'
@echo ' zImage.* - Compressed kernel image (arch/$(ARCH)/boot/zImage.*)'
@@ -403,20 +370,25 @@ define archhelp
@echo ' (minus the .dts extension).'
endef
+PHONY += install
install:
- $(Q)$(MAKE) $(build)=$(boot) install
-
-vdso_install:
-ifdef CONFIG_PPC64
- $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
-endif
-ifdef CONFIG_VDSO32
- $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
+ $(call cmd,install)
+
+ifeq ($(KBUILD_EXTMOD),)
+# We need to generate vdso-offsets.h before compiling certain files in kernel/.
+# In order to do that, we should use the archprepare target, but we can't since
+# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
+# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
+# Therefore we need to generate the header after prepare0 has been made, hence
+# this hack.
+prepare: vdso_prepare
+vdso_prepare: prepare0
+ $(if $(CONFIG_VDSO32),$(Q)$(MAKE) \
+ $(build)=arch/powerpc/kernel/vdso include/generated/vdso32-offsets.h)
+ $(if $(CONFIG_PPC64),$(Q)$(MAKE) \
+ $(build)=arch/powerpc/kernel/vdso include/generated/vdso64-offsets.h)
endif
-archclean:
- $(Q)$(MAKE) $(clean)=$(boot)
-
archprepare: checkbin
archheaders:
@@ -425,6 +397,7 @@ archheaders:
ifdef CONFIG_STACKPROTECTOR
prepare: stack_protector_prepare
+PHONY += stack_protector_prepare
stack_protector_prepare: prepare0
ifdef CONFIG_PPC64
$(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h))
@@ -433,20 +406,15 @@ else
endif
endif
-ifdef CONFIG_SMP
-prepare: task_cpu_prepare
-
-task_cpu_prepare: prepare0
- $(eval KBUILD_CFLAGS += -D_TASK_CPU=$(shell awk '{if ($$2 == "TASK_CPU") print $$3;}' include/generated/asm-offsets.h))
-endif
-
+PHONY += checkbin
# Check toolchain versions:
# - gcc-4.6 is the minimum kernel-wide version so nothing required.
checkbin:
- @if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \
- && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \
+ @if test "x${CONFIG_LD_IS_LLD}" != "xy" -a \
+ "x$(call ld-ifversion, -le, 22400, y)" = "xy" ; then \
echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
echo 'in some circumstances.' ; \
+ echo '*** binutils 2.23 do not define the TOC symbol ' ; \
echo -n '*** Please use a different binutils version.' ; \
false ; \
fi
diff --git a/arch/powerpc/Makefile.postlink b/arch/powerpc/Makefile.postlink
index 2268396ff4bb..a6c77f4d32b2 100644
--- a/arch/powerpc/Makefile.postlink
+++ b/arch/powerpc/Makefile.postlink
@@ -18,7 +18,7 @@ quiet_cmd_relocs_check = CHKREL $@
ifdef CONFIG_PPC_BOOK3S_64
cmd_relocs_check = \
$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@" ; \
- $(BASH) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$@"
+ $(BASH) $(srctree)/arch/powerpc/tools/unrel_branch_check.sh "$(OBJDUMP)" "$(NM)" "$@"
else
cmd_relocs_check = \
$(CONFIG_SHELL) $(srctree)/arch/powerpc/tools/relocs_check.sh "$(OBJDUMP)" "$(NM)" "$@"
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 6610665fcf5e..a4716d138cfc 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
addnote
decompress_inflate.c
empty.c
@@ -15,6 +16,7 @@ kernel-vmlinux.strip.c
kernel-vmlinux.strip.gz
mktree
otheros.bld
+otheros-too-big.bld
uImage
cuImage.*
dtbImage.*
diff --git a/arch/powerpc/boot/44x.h b/arch/powerpc/boot/44x.h
index 02563443788a..9b15e59522d6 100644
--- a/arch/powerpc/boot/44x.h
+++ b/arch/powerpc/boot/44x.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* PowerPC 44x related functions
*
* Copyright 2007 David Gibson, IBM Corporation.
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _PPC_BOOT_44X_H_
#define _PPC_BOOT_44X_H_
diff --git a/arch/powerpc/boot/4xx.h b/arch/powerpc/boot/4xx.h
index 7dc5d45361bc..77f15d124c81 100644
--- a/arch/powerpc/boot/4xx.h
+++ b/arch/powerpc/boot/4xx.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* PowerPC 4xx related functions
*
* Copyright 2007 IBM Corporation.
* Josh Boyer <jwboyer@linux.vnet.ibm.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _POWERPC_BOOT_4XX_H_
#define _POWERPC_BOOT_4XX_H_
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 0556bf4fc9e9..d32d95aea5d6 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -7,7 +7,7 @@
# Based on coffboot by Paul Mackerras
# Simplified for ppc64 by Todd Inglett
#
-# NOTE: this code is built for 32 bit in ELF32 format even though
+# NOTE: this code may be built for 32 bit in ELF32 format even though
# it packages a 64 bit kernel. We do this to simplify the
# bootloader and increase compatibility with OpenFirmware.
#
@@ -21,7 +21,11 @@
all: $(obj)/zImage
ifdef CROSS32_COMPILE
+ifdef CONFIG_CC_IS_CLANG
+ BOOTCC := $(CROSS32_COMPILE)clang
+else
BOOTCC := $(CROSS32_COMPILE)gcc
+endif
BOOTAR := $(CROSS32_COMPILE)ar
else
BOOTCC := $(CC)
@@ -30,13 +34,18 @@ endif
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
+ $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
$(LINUXINCLUDE)
ifdef CONFIG_PPC64_BOOT_WRAPPER
-BOOTCFLAGS += -m64
+ifdef CONFIG_CPU_LITTLE_ENDIAN
+BOOTCFLAGS += -m64 -mcpu=powerpc64le
+else
+BOOTCFLAGS += -m64 -mcpu=powerpc64
+endif
else
-BOOTCFLAGS += -m32
+BOOTCFLAGS += -m32 -mcpu=powerpc
endif
BOOTCFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include)
@@ -45,6 +54,8 @@ ifdef CONFIG_CPU_BIG_ENDIAN
BOOTCFLAGS += -mbig-endian
else
BOOTCFLAGS += -mlittle-endian
+endif
+ifdef CONFIG_PPC64_ELF_ABI_V2
BOOTCFLAGS += $(call cc-option,-mabi=elfv2)
endif
@@ -65,21 +76,20 @@ ifeq ($(call cc-option-yn, -fstack-protector),y)
BOOTCFLAGS += -fno-stack-protector
endif
+BOOTCFLAGS += -include $(srctree)/include/linux/compiler_attributes.h
BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
DTC_FLAGS ?= -p 1024
$(obj)/4xx.o: BOOTCFLAGS += -mcpu=405
-$(obj)/ebony.o: BOOTCFLAGS += -mcpu=405
+$(obj)/ebony.o: BOOTCFLAGS += -mcpu=440
$(obj)/cuboot-hotfoot.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
-$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
+$(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=440
+$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=440
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
-$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-currituck.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-akebono.o: BOOTCFLAGS += -mcpu=405
-$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
# The pre-boot decompressors pull in a lot of kernel headers and other source
# files. This creates a bit of a dependency headache since we need to copy
@@ -119,8 +129,8 @@ src-wlib-y := string.S crt0.S stdio.c decompress.c main.c \
elf_util.c $(zlib-y) devtree.c stdlib.c \
oflib.c ofconsole.c cuboot.c
-src-wlib-$(CONFIG_PPC_MPC52XX) += mpc52xx-psc.c
-src-wlib-$(CONFIG_PPC64_BOOT_WRAPPER) += opal-calls.S opal.c
+src-wlib-$(CONFIG_PPC_MPC52xx) += mpc52xx-psc.c
+src-wlib-$(CONFIG_PPC_POWERNV) += opal-calls.S opal.c
ifndef CONFIG_PPC64_BOOT_WRAPPER
src-wlib-y += crtsavres.S
endif
@@ -129,14 +139,12 @@ src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
src-wlib-$(CONFIG_PPC_8xx) += mpc8xx.c planetcore.c fsl-soc.c
src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
src-wlib-$(CONFIG_EMBEDDED6xx) += ugecon.c fsl-soc.c
-src-wlib-$(CONFIG_XILINX_VIRTEX) += uartlite.c
src-wlib-$(CONFIG_CPM) += cpm-serial.c
src-plat-y := of.c epapr.c
-src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
- treeboot-walnut.c cuboot-acadia.c \
- cuboot-kilauea.c simpleboot.c \
- virtex405-head.S virtex.c
+src-plat-$(CONFIG_40x) += fixed-head.S cuboot-hotfoot.c \
+ cuboot-acadia.c \
+ cuboot-kilauea.c simpleboot.c
src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \
cuboot-bamboo.c cuboot-sam440ep.c \
cuboot-sequoia.c cuboot-rainier.c \
@@ -144,7 +152,7 @@ src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \
cuboot-warp.c cuboot-yosemite.c \
treeboot-iss4xx.c treeboot-currituck.c \
treeboot-akebono.c \
- simpleboot.c fixed-head.S virtex.c
+ simpleboot.c fixed-head.S
src-plat-$(CONFIG_PPC_8xx) += cuboot-8xx.c fixed-head.S ep88xc.c redboot-8xx.c
src-plat-$(CONFIG_PPC_MPC52xx) += cuboot-52xx.c
src-plat-$(CONFIG_PPC_82xx) += cuboot-pq2.c fixed-head.S ep8248e.c cuboot-824x.c
@@ -162,6 +170,8 @@ src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c
+src-plat-$(CONFIG_PPC_MICROWATT) += fixed-head.S microwatt.c
+
src-wlib := $(sort $(src-wlib-y))
src-plat := $(sort $(src-plat-y))
src-boot := $(src-wlib) $(src-plat) empty.c
@@ -226,7 +236,7 @@ $(obj)/wrapper.a: $(obj-wlib) FORCE
hostprogs := addnote hack-coff mktree
-targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a)
+targets += $(patsubst $(obj)/%,%,$(obj-boot) wrapper.a) zImage.lds
extra-y := $(obj)/wrapper.a $(obj-plat) $(obj)/empty.o \
$(obj)/zImage.lds $(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds
@@ -279,9 +289,7 @@ image-$(CONFIG_EPAPR_BOOT) += zImage.epapr
#
# Board ports in arch/powerpc/platform/40x/Kconfig
-image-$(CONFIG_EP405) += dtbImage.ep405
image-$(CONFIG_HOTFOOT) += cuImage.hotfoot
-image-$(CONFIG_WALNUT) += treeImage.walnut
image-$(CONFIG_ACADIA) += cuImage.acadia
image-$(CONFIG_OBS600) += uImage.obs600
@@ -340,7 +348,6 @@ image-$(CONFIG_TQM8541) += cuImage.tqm8541
image-$(CONFIG_TQM8548) += cuImage.tqm8548
image-$(CONFIG_TQM8555) += cuImage.tqm8555
image-$(CONFIG_TQM8560) += cuImage.tqm8560
-image-$(CONFIG_SBC8548) += cuImage.sbc8548
image-$(CONFIG_KSI8560) += cuImage.ksi8560
# Board ports in arch/powerpc/platform/86xx/Kconfig
@@ -356,6 +363,8 @@ image-$(CONFIG_MVME5100) += dtbImage.mvme5100
# Board port in arch/powerpc/platform/amigaone/Kconfig
image-$(CONFIG_AMIGAONE) += cuImage.amigaone
+image-$(CONFIG_PPC_MICROWATT) += dtbImage.microwatt
+
# For 32-bit powermacs, build the COFF and miboot images
# as well as the ELF images.
ifdef CONFIG_PPC32
@@ -363,7 +372,7 @@ image-$(CONFIG_PPC_PMAC) += zImage.coff zImage.miboot
endif
# Allow extra targets to be added to the defconfig
-image-y += $(subst ",,$(CONFIG_EXTRA_TARGETS))
+image-y += $(CONFIG_EXTRA_TARGETS)
initrd- := $(patsubst zImage%, zImage.initrd%, $(image-))
initrd-y := $(patsubst zImage%, zImage.initrd%, \
@@ -372,6 +381,10 @@ initrd-y := $(patsubst zImage%, zImage.initrd%, \
$(patsubst treeImage%, treeImage.initrd%, $(image-y)))))
initrd-y := $(filter-out $(image-y), $(initrd-y))
targets += $(image-y) $(initrd-y)
+targets += $(foreach x, dtbImage uImage cuImage simpleImage treeImage, \
+ $(patsubst $(x).%, dts/%.dtb, $(filter $(x).%, $(image-y))))
+targets += $(foreach x, dtbImage uImage cuImage simpleImage treeImage, \
+ $(patsubst $(x).%, dts/fsl/%.dtb, $(filter $(x).%, $(image-y))))
$(addprefix $(obj)/, $(initrd-y)): $(obj)/ramdisk.image.gz
@@ -437,14 +450,6 @@ $(obj)/zImage: $(addprefix $(obj)/, $(image-y))
$(obj)/zImage.initrd: $(addprefix $(obj)/, $(initrd-y))
$(Q)rm -f $@; ln $< $@
-# Only install the vmlinux
-install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
- sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)"
-
-# Install the vmlinux and other built boot targets.
-zInstall: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
- sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" $^
-
# anything not in $(targets)
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
@@ -455,8 +460,8 @@ clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
clean-kernel-base := vmlinux.strip vmlinux.bin
clean-kernel := $(addsuffix .gz,$(clean-kernel-base))
clean-kernel += $(addsuffix .xz,$(clean-kernel-base))
-# If not absolute clean-files are relative to $(obj).
-clean-files += $(addprefix $(objtree)/, $(clean-kernel))
+# clean-files are relative to $(obj).
+clean-files += $(addprefix ../../../, $(clean-kernel))
WRAPPER_OBJDIR := /usr/lib/kernel-wrapper
WRAPPER_DTSDIR := /usr/lib/kernel-wrapper/dts
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 92608f34d312..44544720daae 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -8,7 +8,8 @@
#include "ppc_asm.h"
RELA = 7
-RELACOUNT = 0x6ffffff9
+RELASZ = 8
+RELAENT = 9
.data
/* A procedure descriptor used when booting this as a COFF file.
@@ -28,7 +29,7 @@ p_etext: .8byte _etext
p_bss_start: .8byte __bss_start
p_end: .8byte _end
-p_toc: .8byte __toc_start + 0x8000 - p_base
+p_toc: .8byte .TOC. - p_base
p_dyn: .8byte __dynamic_start - p_base
p_rela: .8byte __rela_dyn_start - p_base
p_prom: .8byte 0
@@ -44,9 +45,6 @@ p_end: .long _end
p_pstack: .long _platform_stack_top
#endif
- .globl _zimage_start
- /* Clang appears to require the .weak directive to be after the symbol
- * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */
.weak _zimage_start
_zimage_start:
.globl _zimage_start_lib
@@ -78,34 +76,39 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
bne 11f
lwz r9,4(r12) /* get RELA pointer in r9 */
b 12f
-11: addis r8,r8,(-RELACOUNT)@ha
- cmpwi r8,RELACOUNT@l
+11: cmpwi r8,RELASZ
+ bne .Lcheck_for_relaent
+ lwz r0,4(r12) /* get RELASZ value in r0 */
+ b 12f
+.Lcheck_for_relaent:
+ cmpwi r8,RELAENT
bne 12f
- lwz r0,4(r12) /* get RELACOUNT value in r0 */
+ lwz r14,4(r12) /* get RELAENT value in r14 */
12: addi r12,r12,8
b 9b
/* The relocation section contains a list of relocations.
* We now do the R_PPC_RELATIVE ones, which point to words
- * which need to be initialized with addend + offset.
- * The R_PPC_RELATIVE ones come first and there are RELACOUNT
- * of them. */
+ * which need to be initialized with addend + offset */
10: /* skip relocation if we don't have both */
cmpwi r0,0
beq 3f
cmpwi r9,0
beq 3f
+ cmpwi r14,0
+ beq 3f
add r9,r9,r11 /* Relocate RELA pointer */
+ divwu r0,r0,r14 /* RELASZ / RELAENT */
mtctr r0
2: lbz r0,4+3(r9) /* ELF32_R_INFO(reloc->r_info) */
cmpwi r0,22 /* R_PPC_RELATIVE */
- bne 3f
+ bne .Lnext
lwz r12,0(r9) /* reloc->r_offset */
lwz r0,8(r9) /* reloc->r_addend */
add r0,r0,r11
stwx r0,r11,r12
- addi r9,r9,12
+.Lnext: add r9,r9,r14
bdnz 2b
/* Do a cache flush for our text, in case the loader didn't */
@@ -163,32 +166,39 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
bne 10f
ld r13,8(r11) /* get RELA pointer in r13 */
b 11f
-10: addis r12,r12,(-RELACOUNT)@ha
- cmpdi r12,RELACOUNT@l
- bne 11f
- ld r8,8(r11) /* get RELACOUNT value in r8 */
+10: cmpwi r12,RELASZ
+ bne .Lcheck_for_relaent
+ lwz r8,8(r11) /* get RELASZ pointer in r8 */
+ b 11f
+.Lcheck_for_relaent:
+ cmpwi r12,RELAENT
+ bne 11f
+ lwz r14,8(r11) /* get RELAENT pointer in r14 */
11: addi r11,r11,16
b 9b
12:
- cmpdi r13,0 /* check we have both RELA and RELACOUNT */
+ cmpdi r13,0 /* check we have both RELA, RELASZ, RELAENT*/
cmpdi cr1,r8,0
beq 3f
beq cr1,3f
+ cmpdi r14,0
+ beq 3f
/* Calcuate the runtime offset. */
subf r13,r13,r9
/* Run through the list of relocations and process the
* R_PPC64_RELATIVE ones. */
+ divdu r8,r8,r14 /* RELASZ / RELAENT */
mtctr r8
13: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */
cmpdi r0,22 /* R_PPC64_RELATIVE */
- bne 3f
+ bne .Lnext
ld r12,0(r9) /* reloc->r_offset */
ld r0,16(r9) /* reloc->r_addend */
add r0,r0,r13
stdx r0,r13,r12
- addi r9,r9,24
+.Lnext: add r9,r9,r14
bdnz 13b
/* Do a cache flush for our text, in case the loader didn't */
@@ -229,16 +239,19 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
#ifdef __powerpc64__
#define PROM_FRAME_SIZE 512
-#define SAVE_GPR(n, base) std n,8*(n)(base)
-#define REST_GPR(n, base) ld n,8*(n)(base)
-#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
-#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
-#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
-#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
-#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
-#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
-#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
-#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+
+.macro OP_REGS op, width, start, end, base, offset
+ .Lreg=\start
+ .rept (\end - \start + 1)
+ \op .Lreg,\offset+\width*.Lreg(\base)
+ .Lreg=.Lreg+1
+ .endr
+.endm
+
+#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, 0
+#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, 0
+#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base)
+#define REST_GPR(n, base) REST_GPRS(n, n, base)
/* prom handles the jump into and return from firmware. The prom args pointer
is loaded in r3. */
@@ -249,9 +262,7 @@ prom:
stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
SAVE_GPR(2, r1)
- SAVE_GPR(13, r1)
- SAVE_8GPRS(14, r1)
- SAVE_10GPRS(22, r1)
+ SAVE_GPRS(13, 31, r1)
mfcr r10
std r10,8*32(r1)
mfmsr r10
@@ -286,9 +297,7 @@ prom:
/* Restore other registers */
REST_GPR(2, r1)
- REST_GPR(13, r1)
- REST_8GPRS(14, r1)
- REST_10GPRS(22, r1)
+ REST_GPRS(13, 31, r1)
ld r10,8*32(r1)
mtcr r10
diff --git a/arch/powerpc/boot/cuboot-hotfoot.c b/arch/powerpc/boot/cuboot-hotfoot.c
index 888a6b9bfead..0e5532f855d6 100644
--- a/arch/powerpc/boot/cuboot-hotfoot.c
+++ b/arch/powerpc/boot/cuboot-hotfoot.c
@@ -70,7 +70,7 @@ static void hotfoot_fixups(void)
printf("Fixing devtree for 4M Flash\n");
- /* First fix up the base addresse */
+ /* First fix up the base address */
getprop(devp, "reg", regs, sizeof(regs));
regs[0] = 0;
regs[1] = 0xffc00000;
diff --git a/arch/powerpc/boot/decompress.c b/arch/powerpc/boot/decompress.c
index 8bf39ef7d2df..977eb15a6d17 100644
--- a/arch/powerpc/boot/decompress.c
+++ b/arch/powerpc/boot/decompress.c
@@ -21,7 +21,6 @@
#define STATIC static
#define INIT
-#define __always_inline inline
/*
* The build process will copy the required zlib source files and headers
@@ -100,8 +99,8 @@ static void print_err(char *s)
* partial_decompress - decompresses part or all of a compressed buffer
* @inbuf: input buffer
* @input_size: length of the input buffer
- * @outbuf: input buffer
- * @output_size: length of the input buffer
+ * @outbuf: output buffer
+ * @output_size: length of the output buffer
* @skip number of output bytes to ignore
*
* This function takes compressed data from inbuf, decompresses and write it to
diff --git a/arch/powerpc/boot/devtree.c b/arch/powerpc/boot/devtree.c
index 5d91036ad626..58fbcfcc98c9 100644
--- a/arch/powerpc/boot/devtree.c
+++ b/arch/powerpc/boot/devtree.c
@@ -13,6 +13,7 @@
#include "string.h"
#include "stdio.h"
#include "ops.h"
+#include "of.h"
void dt_fixup_memory(u64 start, u64 size)
{
@@ -23,21 +24,25 @@ void dt_fixup_memory(u64 start, u64 size)
root = finddevice("/");
if (getprop(root, "#address-cells", &naddr, sizeof(naddr)) < 0)
naddr = 2;
+ else
+ naddr = be32_to_cpu(naddr);
if (naddr < 1 || naddr > 2)
fatal("Can't cope with #address-cells == %d in /\n\r", naddr);
if (getprop(root, "#size-cells", &nsize, sizeof(nsize)) < 0)
nsize = 1;
+ else
+ nsize = be32_to_cpu(nsize);
if (nsize < 1 || nsize > 2)
fatal("Can't cope with #size-cells == %d in /\n\r", nsize);
i = 0;
if (naddr == 2)
- memreg[i++] = start >> 32;
- memreg[i++] = start & 0xffffffff;
+ memreg[i++] = cpu_to_be32(start >> 32);
+ memreg[i++] = cpu_to_be32(start & 0xffffffff);
if (nsize == 2)
- memreg[i++] = size >> 32;
- memreg[i++] = size & 0xffffffff;
+ memreg[i++] = cpu_to_be32(size >> 32);
+ memreg[i++] = cpu_to_be32(size & 0xffffffff);
memory = finddevice("/memory");
if (! memory) {
@@ -45,9 +50,9 @@ void dt_fixup_memory(u64 start, u64 size)
setprop_str(memory, "device_type", "memory");
}
- printf("Memory <- <0x%x", memreg[0]);
+ printf("Memory <- <0x%x", be32_to_cpu(memreg[0]));
for (i = 1; i < (naddr + nsize); i++)
- printf(" 0x%x", memreg[i]);
+ printf(" 0x%x", be32_to_cpu(memreg[i]));
printf("> (%ldMB)\n\r", (unsigned long)(size >> 20));
setprop(memory, "reg", memreg, (naddr + nsize)*sizeof(u32));
@@ -65,10 +70,10 @@ void dt_fixup_cpu_clocks(u32 cpu, u32 tb, u32 bus)
printf("CPU bus-frequency <- 0x%x (%dMHz)\n\r", bus, MHZ(bus));
while ((devp = find_node_by_devtype(devp, "cpu"))) {
- setprop_val(devp, "clock-frequency", cpu);
- setprop_val(devp, "timebase-frequency", tb);
+ setprop_val(devp, "clock-frequency", cpu_to_be32(cpu));
+ setprop_val(devp, "timebase-frequency", cpu_to_be32(tb));
if (bus > 0)
- setprop_val(devp, "bus-frequency", bus);
+ setprop_val(devp, "bus-frequency", cpu_to_be32(bus));
}
timebase_period_ns = 1000000000 / tb;
@@ -80,7 +85,7 @@ void dt_fixup_clock(const char *path, u32 freq)
if (devp) {
printf("%s: clock-frequency <- %x (%dMHz)\n\r", path, freq, MHZ(freq));
- setprop_val(devp, "clock-frequency", freq);
+ setprop_val(devp, "clock-frequency", cpu_to_be32(freq));
}
}
@@ -133,8 +138,12 @@ void dt_get_reg_format(void *node, u32 *naddr, u32 *nsize)
{
if (getprop(node, "#address-cells", naddr, 4) != 4)
*naddr = 2;
+ else
+ *naddr = be32_to_cpu(*naddr);
if (getprop(node, "#size-cells", nsize, 4) != 4)
*nsize = 1;
+ else
+ *nsize = be32_to_cpu(*nsize);
}
static void copy_val(u32 *dest, u32 *src, int naddr)
@@ -163,9 +172,9 @@ static int add_reg(u32 *reg, u32 *add, int naddr)
int i, carry = 0;
for (i = MAX_ADDR_CELLS - 1; i >= MAX_ADDR_CELLS - naddr; i--) {
- u64 tmp = (u64)reg[i] + add[i] + carry;
+ u64 tmp = (u64)be32_to_cpu(reg[i]) + be32_to_cpu(add[i]) + carry;
carry = tmp >> 32;
- reg[i] = (u32)tmp;
+ reg[i] = cpu_to_be32((u32)tmp);
}
return !carry;
@@ -180,18 +189,18 @@ static int compare_reg(u32 *reg, u32 *range, u32 *rangesize)
u32 end;
for (i = 0; i < MAX_ADDR_CELLS; i++) {
- if (reg[i] < range[i])
+ if (be32_to_cpu(reg[i]) < be32_to_cpu(range[i]))
return 0;
- if (reg[i] > range[i])
+ if (be32_to_cpu(reg[i]) > be32_to_cpu(range[i]))
break;
}
for (i = 0; i < MAX_ADDR_CELLS; i++) {
- end = range[i] + rangesize[i];
+ end = be32_to_cpu(range[i]) + be32_to_cpu(rangesize[i]);
- if (reg[i] < end)
+ if (be32_to_cpu(reg[i]) < end)
break;
- if (reg[i] > end)
+ if (be32_to_cpu(reg[i]) > end)
return 0;
}
@@ -240,7 +249,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
return 0;
dt_get_reg_format(parent, &naddr, &nsize);
-
if (nsize > 2)
return 0;
@@ -252,10 +260,10 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
copy_val(last_addr, prop_buf + offset, naddr);
- ret_size = prop_buf[offset + naddr];
+ ret_size = be32_to_cpu(prop_buf[offset + naddr]);
if (nsize == 2) {
ret_size <<= 32;
- ret_size |= prop_buf[offset + naddr + 1];
+ ret_size |= be32_to_cpu(prop_buf[offset + naddr + 1]);
}
for (;;) {
@@ -278,7 +286,6 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
offset = find_range(last_addr, prop_buf, prev_naddr,
naddr, prev_nsize, buflen / 4);
-
if (offset < 0)
return 0;
@@ -296,8 +303,7 @@ static int dt_xlate(void *node, int res, int reglen, unsigned long *addr,
if (naddr > 2)
return 0;
- ret_addr = ((u64)last_addr[2] << 32) | last_addr[3];
-
+ ret_addr = ((u64)be32_to_cpu(last_addr[2]) << 32) | be32_to_cpu(last_addr[3]);
if (sizeof(void *) == 4 &&
(ret_addr >= 0x100000000ULL || ret_size > 0x100000000ULL ||
ret_addr + ret_size > 0x100000000ULL))
@@ -350,11 +356,14 @@ int dt_is_compatible(void *node, const char *compat)
int dt_get_virtual_reg(void *node, void **addr, int nres)
{
unsigned long xaddr;
- int n;
+ int n, i;
n = getprop(node, "virtual-reg", addr, nres * 4);
- if (n > 0)
+ if (n > 0) {
+ for (i = 0; i < n/4; i ++)
+ ((u32 *)addr)[i] = be32_to_cpu(((u32 *)addr)[i]);
return n / 4;
+ }
for (n = 0; n < nres; n++) {
if (!dt_xlate_reg(node, n, &xaddr, NULL))
diff --git a/arch/powerpc/boot/dts/Makefile b/arch/powerpc/boot/dts/Makefile
index 1cbc0e4ce857..fb335d05aae8 100644
--- a/arch/powerpc/boot/dts/Makefile
+++ b/arch/powerpc/boot/dts/Makefile
@@ -4,4 +4,3 @@ subdir-y += fsl
dtstree := $(srctree)/$(src)
dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
-dtb-$(CONFIG_XILINX_VIRTEX440_GENERIC_BOARD) += virtex440-ml507.dtb virtex440-ml510.dtb
diff --git a/arch/powerpc/boot/dts/a4m072.dts b/arch/powerpc/boot/dts/a4m072.dts
index a9cef5726422..d4270a2ec6c7 100644
--- a/arch/powerpc/boot/dts/a4m072.dts
+++ b/arch/powerpc/boot/dts/a4m072.dts
@@ -140,8 +140,8 @@
clock-frequency = <0>; /* From boot loader */
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
- 0x02000000 0 0x90000000 0x90000000 0 0x10000000
- 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
};
diff --git a/arch/powerpc/boot/dts/akebono.dts b/arch/powerpc/boot/dts/akebono.dts
index cd9d66041a3f..df18f8dc4642 100644
--- a/arch/powerpc/boot/dts/akebono.dts
+++ b/arch/powerpc/boot/dts/akebono.dts
@@ -248,7 +248,7 @@
};
};
- PCIE0: pciex@10100000000 {
+ PCIE0: pcie@10100000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -288,7 +288,7 @@
0x0 0x0 0x0 0x4 &MPIC 48 0x2 /* int D */>;
};
- PCIE1: pciex@20100000000 {
+ PCIE1: pcie@20100000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -328,7 +328,7 @@
0x0 0x0 0x0 0x4 &MPIC 56 0x2 /* int D */>;
};
- PCIE2: pciex@18100000000 {
+ PCIE2: pcie@18100000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -368,7 +368,7 @@
0x0 0x0 0x0 0x4 &MPIC 64 0x2 /* int D */>;
};
- PCIE3: pciex@28100000000 {
+ PCIE3: pcie@28100000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts
index cc965a1816b6..6971595319c1 100644
--- a/arch/powerpc/boot/dts/bluestone.dts
+++ b/arch/powerpc/boot/dts/bluestone.dts
@@ -325,7 +325,7 @@
};
};
- PCIE0: pciex@d00000000 {
+ PCIE0: pcie@d00000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -366,30 +366,5 @@
0x0 0x0 0x0 0x3 &UIC3 0xe 0x4 /* swizzled int C */
0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>;
};
-
- MSI: ppc4xx-msi@C10000000 {
- compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = < 0xC 0x10000000 0x100
- 0xC 0x10000000 0x100>;
- sdr-base = <0x36C>;
- msi-data = <0x00004440>;
- msi-mask = <0x0000ffe0>;
- interrupts =<0 1 2 3 4 5 6 7>;
- interrupt-parent = <&MSI>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- msi-available-ranges = <0x0 0x100>;
- interrupt-map = <
- 0 &UIC3 0x18 1
- 1 &UIC3 0x19 1
- 2 &UIC3 0x1A 1
- 3 &UIC3 0x1B 1
- 4 &UIC3 0x1C 1
- 5 &UIC3 0x1D 1
- 6 &UIC3 0x1E 1
- 7 &UIC3 0x1F 1
- >;
- };
};
};
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index 0d6ac92d0f5e..5db1bff6b23d 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -461,7 +461,7 @@
interrupt-map = < 0x0 0x0 0x0 0x0 &UIC1 0x0 0x8 >;
};
- PCIE0: pciex@d00000000 {
+ PCIE0: pcie@d00000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -503,7 +503,7 @@
0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>;
};
- PCIE1: pciex@d20000000 {
+ PCIE1: pcie@d20000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -544,23 +544,5 @@
0x0 0x0 0x0 0x3 &UIC3 0x12 0x4 /* swizzled int C */
0x0 0x0 0x0 0x4 &UIC3 0x13 0x4 /* swizzled int D */>;
};
-
- MSI: ppc4xx-msi@C10000000 {
- compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = < 0xC 0x10000000 0x100>;
- sdr-base = <0x36C>;
- msi-data = <0x00000000>;
- msi-mask = <0x44440000>;
- interrupt-count = <3>;
- interrupts = <0 1 2 3>;
- interrupt-parent = <&UIC3>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &UIC3 0x18 1
- 1 &UIC3 0x19 1
- 2 &UIC3 0x1A 1
- 3 &UIC3 0x1B 1>;
- };
};
};
diff --git a/arch/powerpc/boot/dts/charon.dts b/arch/powerpc/boot/dts/charon.dts
index 408b486b13df..ea6e76ae2545 100644
--- a/arch/powerpc/boot/dts/charon.dts
+++ b/arch/powerpc/boot/dts/charon.dts
@@ -35,7 +35,7 @@
};
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x00000000 0x08000000>; // 128MB
};
@@ -225,8 +225,8 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
- 0x02000000 0 0x90000000 0x90000000 0 0x10000000
- 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
};
diff --git a/arch/powerpc/boot/dts/currituck.dts b/arch/powerpc/boot/dts/currituck.dts
index b6d87b9c2cef..aea8af810106 100644
--- a/arch/powerpc/boot/dts/currituck.dts
+++ b/arch/powerpc/boot/dts/currituck.dts
@@ -122,7 +122,7 @@
};
};
- PCIE0: pciex@10100000000 { // 4xGBIF1
+ PCIE0: pcie@10100000000 { // 4xGBIF1
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -160,7 +160,7 @@
0x0 0x0 0x0 0x4 &MPIC 49 0x2 /* int D */>;
};
- PCIE1: pciex@30100000000 { // 4xGBIF0
+ PCIE1: pcie@30100000000 { // 4xGBIF0
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -197,7 +197,7 @@
0x0 0x0 0x0 0x4 &MPIC 41 0x2 /* int D */>;
};
- PCIE2: pciex@38100000000 { // 2xGBIF0
+ PCIE2: pcie@38100000000 { // 2xGBIF0
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts
index 0e5e9d3acf79..dfaf974c0ce6 100644
--- a/arch/powerpc/boot/dts/digsy_mtc.dts
+++ b/arch/powerpc/boot/dts/digsy_mtc.dts
@@ -16,7 +16,7 @@
model = "intercontrol,digsy-mtc";
compatible = "intercontrol,digsy-mtc";
- memory {
+ memory@0 {
reg = <0x00000000 0x02000000>; // 32MB
};
@@ -25,14 +25,6 @@
status = "disabled";
};
- spi@f00 {
- msp430@0 {
- compatible = "spidev";
- spi-max-frequency = <32000>;
- reg = <0>;
- };
- };
-
psc@2000 { // PSC1
status = "disabled";
};
@@ -98,9 +90,9 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
- 0x02000000 0 0x90000000 0x90000000 0 0x10000000
- 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/ep405.dts b/arch/powerpc/boot/dts/ep405.dts
deleted file mode 100644
index 4ac9c5ab6e6b..000000000000
--- a/arch/powerpc/boot/dts/ep405.dts
+++ /dev/null
@@ -1,230 +0,0 @@
-/*
- * Device Tree Source for EP405
- *
- * Copyright 2007 IBM Corp.
- * Benjamin Herrenschmidt <benh@kernel.crashing.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without
- * any warranty of any kind, whether express or implied.
- */
-
-/dts-v1/;
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
- model = "ep405";
- compatible = "ep405";
- dcr-parent = <&{/cpus/cpu@0}>;
-
- aliases {
- ethernet0 = &EMAC;
- serial0 = &UART0;
- serial1 = &UART1;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- model = "PowerPC,405GP";
- reg = <0x00000000>;
- clock-frequency = <200000000>; /* Filled in by zImage */
- timebase-frequency = <0>; /* Filled in by zImage */
- i-cache-line-size = <32>;
- d-cache-line-size = <32>;
- i-cache-size = <16384>;
- d-cache-size = <16384>;
- dcr-controller;
- dcr-access-method = "native";
- };
- };
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x00000000>; /* Filled in by zImage */
- };
-
- UIC0: interrupt-controller {
- compatible = "ibm,uic";
- interrupt-controller;
- cell-index = <0>;
- dcr-reg = <0x0c0 0x009>;
- #address-cells = <0>;
- #size-cells = <0>;
- #interrupt-cells = <2>;
- };
-
- plb {
- compatible = "ibm,plb3";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- clock-frequency = <0>; /* Filled in by zImage */
-
- SDRAM0: memory-controller {
- compatible = "ibm,sdram-405gp";
- dcr-reg = <0x010 0x002>;
- };
-
- MAL: mcmal {
- compatible = "ibm,mcmal-405gp", "ibm,mcmal";
- dcr-reg = <0x180 0x062>;
- num-tx-chans = <1>;
- num-rx-chans = <1>;
- interrupt-parent = <&UIC0>;
- interrupts = <
- 0xb 0x4 /* TXEOB */
- 0xc 0x4 /* RXEOB */
- 0xa 0x4 /* SERR */
- 0xd 0x4 /* TXDE */
- 0xe 0x4 /* RXDE */>;
- };
-
- POB0: opb {
- compatible = "ibm,opb-405gp", "ibm,opb";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xef600000 0xef600000 0x00a00000>;
- dcr-reg = <0x0a0 0x005>;
- clock-frequency = <0>; /* Filled in by zImage */
-
- UART0: serial@ef600300 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0xef600300 0x00000008>;
- virtual-reg = <0xef600300>;
- clock-frequency = <0>; /* Filled in by zImage */
- current-speed = <9600>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x0 0x4>;
- };
-
- UART1: serial@ef600400 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0xef600400 0x00000008>;
- virtual-reg = <0xef600400>;
- clock-frequency = <0>; /* Filled in by zImage */
- current-speed = <9600>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x1 0x4>;
- };
-
- IIC: i2c@ef600500 {
- compatible = "ibm,iic-405gp", "ibm,iic";
- reg = <0xef600500 0x00000011>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x2 0x4>;
- };
-
- GPIO: gpio@ef600700 {
- compatible = "ibm,gpio-405gp";
- reg = <0xef600700 0x00000020>;
- };
-
- EMAC: ethernet@ef600800 {
- linux,network-index = <0x0>;
- device_type = "network";
- compatible = "ibm,emac-405gp", "ibm,emac";
- interrupt-parent = <&UIC0>;
- interrupts = <
- 0xf 0x4 /* Ethernet */
- 0x9 0x4 /* Ethernet Wake Up */>;
- local-mac-address = [000000000000]; /* Filled in by zImage */
- reg = <0xef600800 0x00000070>;
- mal-device = <&MAL>;
- mal-tx-channel = <0>;
- mal-rx-channel = <0>;
- cell-index = <0>;
- max-frame-size = <1500>;
- rx-fifo-size = <4096>;
- tx-fifo-size = <2048>;
- phy-mode = "rmii";
- phy-map = <0x00000000>;
- };
-
- };
-
- EBC0: ebc {
- compatible = "ibm,ebc-405gp", "ibm,ebc";
- dcr-reg = <0x012 0x002>;
- #address-cells = <2>;
- #size-cells = <1>;
-
-
- /* The ranges property is supplied by the bootwrapper
- * and is based on the firmware's configuration of the
- * EBC bridge
- */
- clock-frequency = <0>; /* Filled in by zImage */
-
- /* NVRAM and RTC */
- nvrtc@4,200000 {
- compatible = "ds1742";
- reg = <0x00000004 0x00200000 0x00000000>; /* size fixed up by zImage */
- };
-
- /* "BCSR" CPLD contains a PCI irq controller */
- bcsr@4,0 {
- compatible = "ep405-bcsr";
- reg = <0x00000004 0x00000000 0x00000010>;
- interrupt-controller;
- /* Routing table */
- irq-routing = [ 00 /* SYSERR */
- 01 /* STTM */
- 01 /* RTC */
- 01 /* FENET */
- 02 /* NB PCIIRQ mux ? */
- 03 /* SB Winbond 8259 ? */
- 04 /* Serial Ring */
- 05 /* USB (ep405pc) */
- 06 /* XIRQ 0 */
- 06 /* XIRQ 1 */
- 06 /* XIRQ 2 */
- 06 /* XIRQ 3 */
- 06 /* XIRQ 4 */
- 06 /* XIRQ 5 */
- 06 /* XIRQ 6 */
- 07]; /* Reserved */
- };
- };
-
- PCI0: pci@ec000000 {
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- compatible = "ibm,plb405gp-pci", "ibm,plb-pci";
- primary;
- reg = <0xeec00000 0x00000008 /* Config space access */
- 0xeed80000 0x00000004 /* IACK */
- 0xeed80000 0x00000004 /* Special cycle */
- 0xef480000 0x00000040>; /* Internal registers */
-
- /* Outbound ranges, one memory and one IO,
- * later cannot be changed. Chip supports a second
- * IO range but we don't use it for now
- */
- ranges = <0x02000000 0x00000000 0x80000000 0x80000000 0x00000000 0x20000000
- 0x01000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>;
-
- /* Inbound 2GB range starting at 0 */
- dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>;
-
- /* That's all I know about IRQs on that thing ... */
- interrupt-map-mask = <0xf800 0x0 0x0 0x0>;
- interrupt-map = <
- /* USB */
- 0x7000 0x0 0x0 0x0 &UIC0 0x1e 0x8 /* IRQ5 */
- >;
- };
- };
-
- chosen {
- stdout-path = "/plb/opb/serial@ef600300";
- };
-};
diff --git a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
index 0c0efa94cfb4..2a677fd323eb 100644
--- a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
@@ -170,8 +170,6 @@ timer@41100 {
/include/ "pq3-etsec2-0.dtsi"
enet0: ethernet@b0000 {
queue-group@b0000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
interrupts = <26 2 0 0 27 2 0 0 28 2 0 0>;
};
};
@@ -179,8 +177,6 @@ enet0: ethernet@b0000 {
/include/ "pq3-etsec2-1.dtsi"
enet1: ethernet@b1000 {
queue-group@b1000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
interrupts = <33 2 0 0 34 2 0 0 35 2 0 0>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
index b5f071574e83..b8e0edd1ac69 100644
--- a/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
@@ -190,8 +190,6 @@ crypto@30000 {
/include/ "pq3-etsec2-0.dtsi"
enet0: ethernet@b0000 {
queue-group@b0000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
interrupts = <26 2 0 0 27 2 0 0 28 2 0 0>;
};
};
@@ -199,8 +197,6 @@ enet0: ethernet@b0000 {
/include/ "pq3-etsec2-1.dtsi"
enet1: ethernet@b1000 {
queue-group@b1000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
interrupts = <33 2 0 0 34 2 0 0 35 2 0 0>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
index bd208320bff5..bec0fc36849d 100644
--- a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
@@ -171,8 +171,6 @@
enet0: ethernet@b0000 {
queue-group@b0000 {
reg = <0x10000 0x1000>;
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
};
};
@@ -180,8 +178,6 @@
enet1: ethernet@b1000 {
queue-group@b1000 {
reg = <0x11000 0x1000>;
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi
new file mode 100644
index 000000000000..7e2a90cde72e
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/e500v1_power_isa.dtsi
@@ -0,0 +1,51 @@
+/*
+ * e500v1 Power ISA Device Tree Source (include)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/ {
+ cpus {
+ power-isa-version = "2.03";
+ power-isa-b; // Base
+ power-isa-e; // Embedded
+ power-isa-atb; // Alternate Time Base
+ power-isa-cs; // Cache Specification
+ power-isa-e.le; // Embedded.Little-Endian
+ power-isa-e.pm; // Embedded.Performance Monitor
+ power-isa-ecl; // Embedded Cache Locking
+ power-isa-mmc; // Memory Coherence
+ power-isa-sp; // Signal Processing Engine
+ power-isa-sp.fs; // SPE.Embedded Float Scalar Single
+ power-isa-sp.fv; // SPE.Embedded Float Vector
+ mmu-type = "power-embedded";
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts
index 18a885130538..e03ae130162b 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8540ads.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8540ads.dts
@@ -7,7 +7,7 @@
/dts-v1/;
-/include/ "e500v2_power_isa.dtsi"
+/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8540ADS";
diff --git a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts
index ac381e7b1c60..a2a6c5cf852e 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8541cds.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8541cds.dts
@@ -7,7 +7,7 @@
/dts-v1/;
-/include/ "e500v2_power_isa.dtsi"
+/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8541CDS";
diff --git a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts
index 9f58db2a7e66..901b6ff06dfb 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8555cds.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8555cds.dts
@@ -7,7 +7,7 @@
/dts-v1/;
-/include/ "e500v2_power_isa.dtsi"
+/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8555CDS";
diff --git a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts
index a24722ccaebf..c2f9aea78b29 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8560ads.dts
+++ b/arch/powerpc/boot/dts/fsl/mpc8560ads.dts
@@ -7,7 +7,7 @@
/dts-v1/;
-/include/ "e500v2_power_isa.dtsi"
+/include/ "e500v1_power_isa.dtsi"
/ {
model = "MPC8560ADS";
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index 1b4aafc1f6a2..ccda0a91abf0 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -122,7 +122,15 @@
};
/include/ "pq3-i2c-0.dtsi"
+ i2c@3000 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "pq3-i2c-1.dtsi"
+ i2c@3100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "pq3-duart-0.dtsi"
/include/ "pq3-espi-0.dtsi"
spi0: spi@7000 {
@@ -172,29 +180,8 @@
/include/ "pq3-mpic-timer-B.dtsi"
/include/ "pq3-etsec2-0.dtsi"
- enet0: ethernet@b0000 {
- queue-group@b0000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
- };
- };
-
/include/ "pq3-etsec2-1.dtsi"
- enet1: ethernet@b1000 {
- queue-group@b1000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
- };
- };
-
/include/ "pq3-etsec2-2.dtsi"
- enet2: ethernet@b2000 {
- queue-group@b2000 {
- fsl,rx-bit-map = <0xff>;
- fsl,tx-bit-map = <0xff>;
- };
-
- };
global-utilities@e0000 {
compatible = "fsl,p1010-guts";
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
index 884e01bcb243..81b9ab2119be 100644
--- a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
@@ -48,6 +48,7 @@
bus-range = <0 255>;
clock-frequency = <33333333>;
interrupts = <26 2 0 0>;
+ law_trgt_if = <2>;
pcie@0 {
reg = <0 0 0 0 0>;
@@ -76,6 +77,7 @@
bus-range = <0 255>;
clock-frequency = <33333333>;
interrupts = <25 2 0 0>;
+ law_trgt_if = <1>;
pcie@0 {
reg = <0 0 0 0 0>;
@@ -105,6 +107,7 @@
bus-range = <0 255>;
clock-frequency = <33333333>;
interrupts = <24 2 0 0>;
+ law_trgt_if = <0>;
pcie@0 {
reg = <0 0 0 0 0>;
@@ -198,4 +201,9 @@
reg = <0xe0000 0x1000>;
fsl,has-rstcr;
};
+
+ pmc: power@e0070 {
+ compatible = "fsl,mpc8548-pmc";
+ reg = <0xe0070 0x20>;
+ };
};
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 872e4485dc3f..ddc018d42252 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -371,7 +371,23 @@
};
/include/ "qoriq-i2c-0.dtsi"
+ i2c@118000 {
+ fsl,i2c-erratum-a004447;
+ };
+
+ i2c@118100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "qoriq-i2c-1.dtsi"
+ i2c@119000 {
+ fsl,i2c-erratum-a004447;
+ };
+
+ i2c@119100 {
+ fsl,i2c-erratum-a004447;
+ };
+
/include/ "qoriq-duart-0.dtsi"
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-gpio-0.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p4080ds.dts b/arch/powerpc/boot/dts/fsl/p4080ds.dts
index 65e20152e22f..969b32c4f2d5 100644
--- a/arch/powerpc/boot/dts/fsl/p4080ds.dts
+++ b/arch/powerpc/boot/dts/fsl/p4080ds.dts
@@ -125,11 +125,11 @@
i2c@118100 {
eeprom@51 {
- compatible = "atmel,24c256";
+ compatible = "atmel,spd";
reg = <0x51>;
};
eeprom@52 {
- compatible = "atmel,24c256";
+ compatible = "atmel,spd";
reg = <0x52>;
};
rtc@68 {
@@ -143,6 +143,45 @@
};
};
+ i2c@118000 {
+ zl2006@21 {
+ compatible = "zl2006";
+ reg = <0x21>;
+ };
+ zl2006@22 {
+ compatible = "zl2006";
+ reg = <0x22>;
+ };
+ zl2006@23 {
+ compatible = "zl2006";
+ reg = <0x23>;
+ };
+ zl2006@24 {
+ compatible = "zl2006";
+ reg = <0x24>;
+ };
+ eeprom@50 {
+ compatible = "atmel,24c64";
+ reg = <0x50>;
+ };
+ eeprom@55 {
+ compatible = "atmel,24c64";
+ reg = <0x55>;
+ };
+ eeprom@56 {
+ compatible = "atmel,24c64";
+ reg = <0x56>;
+ };
+ eeprom@57 {
+ compatible = "atmel,24c02";
+ reg = <0x57>;
+ };
+ };
+
+ i2c@119100 {
+ /* 0x6E: ICS9FG108 */
+ };
+
usb0: usb@210000 {
phy_type = "ulpi";
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
index c90702b04a53..48e5cd61599c 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
@@ -79,6 +79,7 @@ fman0: fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfc000 0x1000>;
+ fsl,erratum-a009885;
};
xmdio0: mdio@fd000 {
@@ -86,6 +87,7 @@ fman0: fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfd000 0x1000>;
+ fsl,erratum-a009885;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/sbc8641d.dts b/arch/powerpc/boot/dts/fsl/sbc8641d.dts
deleted file mode 100644
index 3dca10acc161..000000000000
--- a/arch/powerpc/boot/dts/fsl/sbc8641d.dts
+++ /dev/null
@@ -1,176 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SBC8641D Device Tree Source
- *
- * Copyright 2008 Wind River Systems Inc.
- *
- * Paul Gortmaker (see MAINTAINERS for contact information)
- *
- * Based largely on the mpc8641_hpcn.dts by Freescale Semiconductor Inc.
- */
-
-/include/ "mpc8641si-pre.dtsi"
-
-/ {
- model = "SBC8641D";
- compatible = "wind,sbc8641";
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x20000000>; // 512M at 0x0
- };
-
- lbc: localbus@f8005000 {
- reg = <0xf8005000 0x1000>;
-
- ranges = <0 0 0xff000000 0x01000000 // 16MB Boot flash
- 1 0 0xf0000000 0x00010000 // 64KB EEPROM
- 2 0 0xf1000000 0x00100000 // EPLD (1MB)
- 3 0 0xe0000000 0x04000000 // 64MB LB SDRAM (CS3)
- 4 0 0xe4000000 0x04000000 // 64MB LB SDRAM (CS4)
- 6 0 0xf4000000 0x00100000 // LCD display (1MB)
- 7 0 0xe8000000 0x04000000>; // 64MB OneNAND
-
- flash@0,0 {
- compatible = "cfi-flash";
- reg = <0 0 0x01000000>;
- bank-width = <2>;
- device-width = <2>;
- #address-cells = <1>;
- #size-cells = <1>;
- partition@0 {
- label = "dtb";
- reg = <0x00000000 0x00100000>;
- read-only;
- };
- partition@300000 {
- label = "kernel";
- reg = <0x00100000 0x00400000>;
- read-only;
- };
- partition@400000 {
- label = "fs";
- reg = <0x00500000 0x00a00000>;
- };
- partition@700000 {
- label = "firmware";
- reg = <0x00f00000 0x00100000>;
- read-only;
- };
- };
-
- epld@2,0 {
- compatible = "wrs,epld-localbus";
- #address-cells = <2>;
- #size-cells = <1>;
- reg = <2 0 0x100000>;
- ranges = <0 0 5 0 1 // User switches
- 1 0 5 1 1 // Board ID/Rev
- 3 0 5 3 1>; // LEDs
- };
- };
-
- soc: soc@f8000000 {
- ranges = <0x00000000 0xf8000000 0x00100000>;
-
- enet0: ethernet@24000 {
- tbi-handle = <&tbi0>;
- phy-handle = <&phy0>;
- phy-connection-type = "rgmii-id";
- };
-
- mdio@24520 {
- phy0: ethernet-phy@1f {
- reg = <0x1f>;
- };
- phy1: ethernet-phy@0 {
- reg = <0>;
- };
- phy2: ethernet-phy@1 {
- reg = <1>;
- };
- phy3: ethernet-phy@2 {
- reg = <2>;
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- enet1: ethernet@25000 {
- tbi-handle = <&tbi1>;
- phy-handle = <&phy1>;
- phy-connection-type = "rgmii-id";
- };
-
- mdio@25520 {
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- enet2: ethernet@26000 {
- tbi-handle = <&tbi2>;
- phy-handle = <&phy2>;
- phy-connection-type = "rgmii-id";
- };
-
- mdio@26520 {
- tbi2: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
-
- enet3: ethernet@27000 {
- tbi-handle = <&tbi3>;
- phy-handle = <&phy3>;
- phy-connection-type = "rgmii-id";
- };
-
- mdio@27520 {
- tbi3: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- pci0: pcie@f8008000 {
- reg = <0xf8008000 0x1000>;
- ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x20000000
- 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
- interrupt-map-mask = <0xff00 0 0 7>;
-
- pcie@0 {
- ranges = <0x02000000 0x0 0x80000000
- 0x02000000 0x0 0x80000000
- 0x0 0x20000000
-
- 0x01000000 0x0 0x00000000
- 0x01000000 0x0 0x00000000
- 0x0 0x00100000>;
- };
-
- };
-
- pci1: pcie@f8009000 {
- reg = <0xf8009000 0x1000>;
- ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x20000000
- 0x01000000 0x0 0x00000000 0xe3000000 0x0 0x00100000>;
-
- pcie@0 {
- ranges = <0x02000000 0x0 0xa0000000
- 0x02000000 0x0 0xa0000000
- 0x0 0x20000000
-
- 0x01000000 0x0 0x00000000
- 0x01000000 0x0 0x00000000
- 0x0 0x00100000>;
- };
- };
-};
-
-/include/ "mpc8641si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
index 5ba6fbfca274..f82f85c65964 100644
--- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts
@@ -154,7 +154,7 @@
fm1mac3: ethernet@e4000 {
phy-handle = <&sgmii_aqr_phy3>;
- phy-connection-type = "sgmii-2500";
+ phy-connection-type = "2500base-x";
sleep = <&rcpm 0x20000000>;
};
diff --git a/arch/powerpc/boot/dts/fsl/t1024rdb.dts b/arch/powerpc/boot/dts/fsl/t1024rdb.dts
index 73a645324bc1..dbcd31cc35dc 100644
--- a/arch/powerpc/boot/dts/fsl/t1024rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1024rdb.dts
@@ -161,7 +161,6 @@
rtc@68 {
compatible = "dallas,ds1339";
reg = <0x68>;
- interrupts = <0x1 0x1 0 0>;
};
};
diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
new file mode 100644
index 000000000000..73f8c998c64d
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/t1040rdb-rev-a.dts
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * T1040RDB-REV-A Device Tree Source
+ *
+ * Copyright 2014 - 2015 Freescale Semiconductor Inc.
+ *
+ */
+
+#include "t1040rdb.dts"
+
+/ {
+ model = "fsl,T1040RDB-REV-A";
+ compatible = "fsl,T1040RDB-REV-A";
+};
+
+&seville_port0 {
+ label = "ETH5";
+};
+
+&seville_port2 {
+ label = "ETH7";
+};
+
+&seville_port4 {
+ label = "ETH9";
+};
+
+&seville_port6 {
+ label = "ETH11";
+};
diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts
index 65ff34c49025..b6733e7e6580 100644
--- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts
@@ -64,6 +64,40 @@
phy_sgmii_2: ethernet-phy@3 {
reg = <0x03>;
};
+
+ /* VSC8514 QSGMII PHY */
+ phy_qsgmii_0: ethernet-phy@4 {
+ reg = <0x4>;
+ };
+
+ phy_qsgmii_1: ethernet-phy@5 {
+ reg = <0x5>;
+ };
+
+ phy_qsgmii_2: ethernet-phy@6 {
+ reg = <0x6>;
+ };
+
+ phy_qsgmii_3: ethernet-phy@7 {
+ reg = <0x7>;
+ };
+
+ /* VSC8514 QSGMII PHY */
+ phy_qsgmii_4: ethernet-phy@8 {
+ reg = <0x8>;
+ };
+
+ phy_qsgmii_5: ethernet-phy@9 {
+ reg = <0x9>;
+ };
+
+ phy_qsgmii_6: ethernet-phy@a {
+ reg = <0xa>;
+ };
+
+ phy_qsgmii_7: ethernet-phy@b {
+ reg = <0xb>;
+ };
};
};
};
@@ -76,3 +110,76 @@
};
#include "t1040si-post.dtsi"
+
+&seville_switch {
+ status = "okay";
+};
+
+&seville_port0 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_0>;
+ phy-mode = "qsgmii";
+ label = "ETH3";
+ status = "okay";
+};
+
+&seville_port1 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_1>;
+ phy-mode = "qsgmii";
+ label = "ETH4";
+ status = "okay";
+};
+
+&seville_port2 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_2>;
+ phy-mode = "qsgmii";
+ label = "ETH5";
+ status = "okay";
+};
+
+&seville_port3 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_3>;
+ phy-mode = "qsgmii";
+ label = "ETH6";
+ status = "okay";
+};
+
+&seville_port4 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_4>;
+ phy-mode = "qsgmii";
+ label = "ETH7";
+ status = "okay";
+};
+
+&seville_port5 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_5>;
+ phy-mode = "qsgmii";
+ label = "ETH8";
+ status = "okay";
+};
+
+&seville_port6 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_6>;
+ phy-mode = "qsgmii";
+ label = "ETH9";
+ status = "okay";
+};
+
+&seville_port7 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_7>;
+ phy-mode = "qsgmii";
+ label = "ETH10";
+ status = "okay";
+};
+
+&seville_port8 {
+ ethernet = <&enet0>;
+ status = "okay";
+};
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 315d0557eefc..f58eb820eb5e 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -628,6 +628,84 @@
status = "disabled";
};
};
+
+ seville_switch: ethernet-switch@800000 {
+ compatible = "mscc,vsc9953-switch";
+ reg = <0x800000 0x290000>;
+ interrupts = <26 2 0 0>;
+ interrupt-names = "xtr";
+ little-endian;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ seville_port0: port@0 {
+ reg = <0>;
+ status = "disabled";
+ };
+
+ seville_port1: port@1 {
+ reg = <1>;
+ status = "disabled";
+ };
+
+ seville_port2: port@2 {
+ reg = <2>;
+ status = "disabled";
+ };
+
+ seville_port3: port@3 {
+ reg = <3>;
+ status = "disabled";
+ };
+
+ seville_port4: port@4 {
+ reg = <4>;
+ status = "disabled";
+ };
+
+ seville_port5: port@5 {
+ reg = <5>;
+ status = "disabled";
+ };
+
+ seville_port6: port@6 {
+ reg = <6>;
+ status = "disabled";
+ };
+
+ seville_port7: port@7 {
+ reg = <7>;
+ status = "disabled";
+ };
+
+ seville_port8: port@8 {
+ reg = <8>;
+ phy-mode = "internal";
+ status = "disabled";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ };
+ };
+
+ seville_port9: port@9 {
+ reg = <9>;
+ phy-mode = "internal";
+ status = "disabled";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ };
+ };
+ };
+ };
};
&qe {
diff --git a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
index 099a598c74c0..bfe1ed5be337 100644
--- a/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t104xrdb.dtsi
@@ -139,12 +139,12 @@
fman@400000 {
ethernet@e6000 {
phy-handle = <&phy_rgmii_0>;
- phy-connection-type = "rgmii";
+ phy-connection-type = "rgmii-id";
};
ethernet@e8000 {
phy-handle = <&phy_rgmii_1>;
- phy-connection-type = "rgmii";
+ phy-connection-type = "rgmii-id";
};
mdio0: mdio@fc000 {
diff --git a/arch/powerpc/boot/dts/fsl/t4240rdb.dts b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
index a56a705d41f7..145896f2eef6 100644
--- a/arch/powerpc/boot/dts/fsl/t4240rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t4240rdb.dts
@@ -144,7 +144,6 @@
rtc@68 {
compatible = "dallas,ds1374";
reg = <0x68>;
- interrupts = <0x1 0x1 0 0>;
};
};
diff --git a/arch/powerpc/boot/dts/glacier.dts b/arch/powerpc/boot/dts/glacier.dts
index a7a802f4ffdd..e84ff1afb58c 100644
--- a/arch/powerpc/boot/dts/glacier.dts
+++ b/arch/powerpc/boot/dts/glacier.dts
@@ -489,7 +489,7 @@
interrupt-map = < 0x0 0x0 0x0 0x0 &UIC1 0x0 0x8 >;
};
- PCIE0: pciex@d00000000 {
+ PCIE0: pcie@d00000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -531,7 +531,7 @@
0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>;
};
- PCIE1: pciex@d20000000 {
+ PCIE1: pcie@d20000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/haleakala.dts b/arch/powerpc/boot/dts/haleakala.dts
index cb16dad43c92..f81ce8786d59 100644
--- a/arch/powerpc/boot/dts/haleakala.dts
+++ b/arch/powerpc/boot/dts/haleakala.dts
@@ -237,7 +237,7 @@
};
};
- PCIE0: pciex@a0000000 {
+ PCIE0: pcie@a0000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/icon.dts b/arch/powerpc/boot/dts/icon.dts
index 2e6e3a7b2604..4fd7a4fbb4fb 100644
--- a/arch/powerpc/boot/dts/icon.dts
+++ b/arch/powerpc/boot/dts/icon.dts
@@ -197,13 +197,6 @@
reg = <0x00fa0000 0x00060000>;
};
};
-
- SysACE_CompactFlash: sysace@1,0 {
- compatible = "xlnx,sysace";
- interrupt-parent = <&UIC2>;
- interrupts = <24 0x4>;
- reg = <0x00000001 0x00000000 0x10000>;
- };
};
UART0: serial@f0000200 {
@@ -315,7 +308,7 @@
interrupt-map = <0x0 0x0 0x0 0x0 &UIC1 19 0x8>;
};
- PCIE0: pciex@d00000000 {
+ PCIE0: pcie@d00000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -356,7 +349,7 @@
0x0 0x0 0x0 0x4 &UIC3 0x3 0x4 /* swizzled int D */>;
};
- PCIE1: pciex@d20000000 {
+ PCIE1: pcie@d20000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts
index 02629e119b87..4262b2bbd6de 100644
--- a/arch/powerpc/boot/dts/katmai.dts
+++ b/arch/powerpc/boot/dts/katmai.dts
@@ -319,7 +319,7 @@
>;
};
- PCIE0: pciex@d00000000 {
+ PCIE0: pcie@d00000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -360,7 +360,7 @@
0x0 0x0 0x0 0x4 &UIC3 0x3 0x4 /* swizzled int D */>;
};
- PCIE1: pciex@d20000000 {
+ PCIE1: pcie@d20000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -401,7 +401,7 @@
0x0 0x0 0x0 0x4 &UIC3 0x7 0x4 /* swizzled int D */>;
};
- PCIE2: pciex@d40000000 {
+ PCIE2: pcie@d40000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -442,24 +442,6 @@
0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
};
- MSI: ppc4xx-msi@400300000 {
- compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = < 0x4 0x00300000 0x100>;
- sdr-base = <0x3B0>;
- msi-data = <0x00000000>;
- msi-mask = <0x44440000>;
- interrupt-count = <3>;
- interrupts =<0 1 2 3>;
- interrupt-parent = <&UIC0>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &UIC0 0xC 1
- 1 &UIC0 0x0D 1
- 2 &UIC0 0x0E 1
- 3 &UIC0 0x0F 1>;
- };
-
I2O: i2o@400100000 {
compatible = "ibm,i2o-440spe";
reg = <0x00000004 0x00100000 0x100>;
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index 2a3413221cc1..c07a7525a72c 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -322,7 +322,7 @@
};
};
- PCIE0: pciex@a0000000 {
+ PCIE0: pcie@a0000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -363,7 +363,7 @@
0x0 0x0 0x0 0x4 &UIC2 0x3 0x4 /* swizzled int D */>;
};
- PCIE1: pciex@c0000000 {
+ PCIE1: pcie@c0000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -403,33 +403,5 @@
0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */
0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>;
};
-
- MSI: ppc4xx-msi@C10000000 {
- compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = <0xEF620000 0x100>;
- sdr-base = <0x4B0>;
- msi-data = <0x00000000>;
- msi-mask = <0x44440000>;
- interrupt-count = <12>;
- interrupts = <0 1 2 3 4 5 6 7 8 9 0xA 0xB 0xC 0xD>;
- interrupt-parent = <&UIC2>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &UIC2 0x10 1
- 1 &UIC2 0x11 1
- 2 &UIC2 0x12 1
- 2 &UIC2 0x13 1
- 2 &UIC2 0x14 1
- 2 &UIC2 0x15 1
- 2 &UIC2 0x16 1
- 2 &UIC2 0x17 1
- 2 &UIC2 0x18 1
- 2 &UIC2 0x19 1
- 2 &UIC2 0x1A 1
- 2 &UIC2 0x1B 1
- 2 &UIC2 0x1C 1
- 3 &UIC2 0x1D 1>;
- };
};
};
diff --git a/arch/powerpc/boot/dts/ksi8560.dts b/arch/powerpc/boot/dts/ksi8560.dts
index fe6c17c8812a..37a7eb576d02 100644
--- a/arch/powerpc/boot/dts/ksi8560.dts
+++ b/arch/powerpc/boot/dts/ksi8560.dts
@@ -14,6 +14,8 @@
/dts-v1/;
+/include/ "fsl/e500v1_power_isa.dtsi"
+
/ {
model = "KSI8560";
compatible = "emerson,KSI8560";
diff --git a/arch/powerpc/boot/dts/lite5200.dts b/arch/powerpc/boot/dts/lite5200.dts
index cb2782dd6132..b9d8487813b4 100644
--- a/arch/powerpc/boot/dts/lite5200.dts
+++ b/arch/powerpc/boot/dts/lite5200.dts
@@ -32,7 +32,7 @@
};
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x00000000 0x04000000>; // 64MB
};
@@ -283,9 +283,9 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
- 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
- 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000>,
+ <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts
index 2b86c81f9048..7e2d91c7cb66 100644
--- a/arch/powerpc/boot/dts/lite5200b.dts
+++ b/arch/powerpc/boot/dts/lite5200b.dts
@@ -31,7 +31,7 @@
led4 { gpios = <&gpio_simple 2 1>; };
};
- memory {
+ memory@0 {
reg = <0x00000000 0x10000000>; // 256MB
};
@@ -116,9 +116,9 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
- 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
- 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000>,
+ <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/makalu.dts b/arch/powerpc/boot/dts/makalu.dts
index bf8fe1629392..c473cd911bca 100644
--- a/arch/powerpc/boot/dts/makalu.dts
+++ b/arch/powerpc/boot/dts/makalu.dts
@@ -268,7 +268,7 @@
};
};
- PCIE0: pciex@a0000000 {
+ PCIE0: pcie@a0000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -309,7 +309,7 @@
0x0 0x0 0x0 0x4 &UIC2 0x3 0x4 /* swizzled int D */>;
};
- PCIE1: pciex@c0000000 {
+ PCIE1: pcie@c0000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
diff --git a/arch/powerpc/boot/dts/media5200.dts b/arch/powerpc/boot/dts/media5200.dts
index 61cae9dcddef..96524ede16cd 100644
--- a/arch/powerpc/boot/dts/media5200.dts
+++ b/arch/powerpc/boot/dts/media5200.dts
@@ -32,7 +32,7 @@
};
};
- memory {
+ memory@0 {
reg = <0x00000000 0x08000000>; // 128MB RAM
};
@@ -96,9 +96,9 @@
0xe000 0 0 1 &media5200_fpga 0 5 // CoralIP
>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
- 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
- 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000>,
+ <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
interrupt-parent = <&mpc5200_pic>;
};
diff --git a/arch/powerpc/boot/dts/mgcoge.dts b/arch/powerpc/boot/dts/mgcoge.dts
index 7de068991bde..9cefed207234 100644
--- a/arch/powerpc/boot/dts/mgcoge.dts
+++ b/arch/powerpc/boot/dts/mgcoge.dts
@@ -225,13 +225,6 @@
interrupts = <2 8>;
interrupt-parent = <&PIC>;
cs-gpios = < &cpm2_pio_d 19 0>;
- #address-cells = <1>;
- #size-cells = <0>;
- ds3106@1 {
- compatible = "gen,spidev";
- reg = <0>;
- spi-max-frequency = <8000000>;
- };
};
};
diff --git a/arch/powerpc/boot/dts/microwatt.dts b/arch/powerpc/boot/dts/microwatt.dts
new file mode 100644
index 000000000000..b69db1d275cd
--- /dev/null
+++ b/arch/powerpc/boot/dts/microwatt.dts
@@ -0,0 +1,152 @@
+/dts-v1/;
+
+/ {
+ #size-cells = <0x02>;
+ #address-cells = <0x02>;
+ model-name = "microwatt";
+ compatible = "microwatt-soc";
+
+ aliases {
+ serial0 = &UART0;
+ };
+
+ reserved-memory {
+ #size-cells = <0x02>;
+ #address-cells = <0x02>;
+ ranges;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x00000000 0x00000000 0x10000000>;
+ };
+
+ cpus {
+ #size-cells = <0x00>;
+ #address-cells = <0x01>;
+
+ ibm,powerpc-cpu-features {
+ display-name = "Microwatt";
+ isa = <3000>;
+ device_type = "cpu-features";
+ compatible = "ibm,powerpc-cpu-features";
+
+ mmu-radix {
+ isa = <3000>;
+ usable-privilege = <2>;
+ };
+
+ little-endian {
+ isa = <2050>;
+ usable-privilege = <3>;
+ hwcap-bit-nr = <1>;
+ };
+
+ cache-inhibited-large-page {
+ isa = <2040>;
+ usable-privilege = <2>;
+ };
+
+ fixed-point-v3 {
+ isa = <3000>;
+ usable-privilege = <3>;
+ };
+
+ no-execute {
+ isa = <2010>;
+ usable-privilege = <2>;
+ };
+
+ floating-point {
+ hwcap-bit-nr = <27>;
+ isa = <0>;
+ usable-privilege = <3>;
+ };
+ };
+
+ PowerPC,Microwatt@0 {
+ i-cache-sets = <2>;
+ ibm,dec-bits = <64>;
+ reservation-granule-size = <64>;
+ clock-frequency = <100000000>;
+ timebase-frequency = <100000000>;
+ i-tlb-sets = <1>;
+ ibm,ppc-interrupt-server#s = <0>;
+ i-cache-block-size = <64>;
+ d-cache-block-size = <64>;
+ d-cache-sets = <2>;
+ i-tlb-size = <64>;
+ cpu-version = <0x990000>;
+ status = "okay";
+ i-cache-size = <0x1000>;
+ ibm,processor-radix-AP-encodings = <0x0c 0xa0000010 0x20000015 0x4000001e>;
+ tlb-size = <0>;
+ tlb-sets = <0>;
+ device_type = "cpu";
+ d-tlb-size = <128>;
+ d-tlb-sets = <2>;
+ reg = <0>;
+ general-purpose;
+ 64-bit;
+ d-cache-size = <0x1000>;
+ ibm,chip-id = <0>;
+ ibm,mmu-lpid-bits = <12>;
+ ibm,mmu-pid-bits = <20>;
+ };
+ };
+
+ soc@c0000000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&ICS>;
+
+ ranges = <0 0 0xc0000000 0x40000000>;
+
+ interrupt-controller@4000 {
+ compatible = "openpower,xics-presentation", "ibm,ppc-xicp";
+ ibm,interrupt-server-ranges = <0x0 0x1>;
+ reg = <0x4000 0x100>;
+ };
+
+ ICS: interrupt-controller@5000 {
+ compatible = "openpower,xics-sources";
+ interrupt-controller;
+ interrupt-ranges = <0x10 0x10>;
+ reg = <0x5000 0x100>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ #interrupt-cells = <2>;
+ };
+
+ UART0: serial@2000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0x2000 0x8>;
+ clock-frequency = <100000000>;
+ current-speed = <115200>;
+ reg-shift = <2>;
+ fifo-size = <16>;
+ interrupts = <0x10 0x1>;
+ };
+
+ ethernet@8020000 {
+ compatible = "litex,liteeth";
+ reg = <0x8021000 0x100
+ 0x8020800 0x100
+ 0x8030000 0x2000>;
+ reg-names = "mac", "mido", "buffer";
+ litex,rx-slots = <2>;
+ litex,tx-slots = <2>;
+ litex,slot-size = <0x800>;
+ interrupts = <0x11 0x1>;
+ };
+ };
+
+ chosen {
+ bootargs = "";
+ ibm,architecture-vec-5 = [19 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00
+ 00 00 00 00 00 00 00 00 40 00 40];
+ stdout-path = &UART0;
+ };
+};
diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi
index 648fe31795f4..ffa82c7e1055 100644
--- a/arch/powerpc/boot/dts/mpc5200b.dtsi
+++ b/arch/powerpc/boot/dts/mpc5200b.dtsi
@@ -33,7 +33,7 @@
};
};
- memory: memory {
+ memory: memory@0 {
device_type = "memory";
reg = <0x00000000 0x04000000>; // 64MB
};
@@ -276,7 +276,9 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- // ranges = need to add
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
localbus: localbus {
diff --git a/arch/powerpc/boot/dts/mucmc52.dts b/arch/powerpc/boot/dts/mucmc52.dts
index c6c66306308d..e88a7bd4034d 100644
--- a/arch/powerpc/boot/dts/mucmc52.dts
+++ b/arch/powerpc/boot/dts/mucmc52.dts
@@ -106,9 +106,9 @@
0x8000 0 0 3 &mpc5200_pic 0 2 3
0x8000 0 0 4 &mpc5200_pic 0 1 3
>;
- ranges = <0x42000000 0 0x60000000 0x60000000 0 0x10000000
- 0x02000000 0 0x90000000 0x90000000 0 0x10000000
- 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x60000000 0x60000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/o2d.dts b/arch/powerpc/boot/dts/o2d.dts
index 24a46f65e529..e0a8d3034417 100644
--- a/arch/powerpc/boot/dts/o2d.dts
+++ b/arch/powerpc/boot/dts/o2d.dts
@@ -12,7 +12,7 @@
model = "ifm,o2d";
compatible = "ifm,o2d";
- memory {
+ memory@0 {
reg = <0x00000000 0x08000000>; // 128MB
};
diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi
index 6661955a2be4..7e52509fa506 100644
--- a/arch/powerpc/boot/dts/o2d.dtsi
+++ b/arch/powerpc/boot/dts/o2d.dtsi
@@ -19,7 +19,7 @@
model = "ifm,o2d";
compatible = "ifm,o2d";
- memory {
+ memory@0 {
reg = <0x00000000 0x04000000>; // 64MB
};
@@ -34,12 +34,6 @@
#address-cells = <1>;
#size-cells = <0>;
cell-index = <0>;
-
- spidev@0 {
- compatible = "spidev";
- spi-max-frequency = <250000>;
- reg = <0>;
- };
};
psc@2200 { // PSC2
diff --git a/arch/powerpc/boot/dts/o2dnt2.dts b/arch/powerpc/boot/dts/o2dnt2.dts
index eeba7f5507d5..c2eedbd1f5fc 100644
--- a/arch/powerpc/boot/dts/o2dnt2.dts
+++ b/arch/powerpc/boot/dts/o2dnt2.dts
@@ -12,7 +12,7 @@
model = "ifm,o2dnt2";
compatible = "ifm,o2d";
- memory {
+ memory@0 {
reg = <0x00000000 0x08000000>; // 128MB
};
diff --git a/arch/powerpc/boot/dts/o3dnt.dts b/arch/powerpc/boot/dts/o3dnt.dts
index fd00396b0593..e4c1bdd41271 100644
--- a/arch/powerpc/boot/dts/o3dnt.dts
+++ b/arch/powerpc/boot/dts/o3dnt.dts
@@ -12,7 +12,7 @@
model = "ifm,o3dnt";
compatible = "ifm,o2d";
- memory {
+ memory@0 {
reg = <0x00000000 0x04000000>; // 64MB
};
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts
index b1bc731f7afd..5cee474dcc4c 100644
--- a/arch/powerpc/boot/dts/pcm030.dts
+++ b/arch/powerpc/boot/dts/pcm030.dts
@@ -90,9 +90,9 @@
0xc800 0 0 2 &mpc5200_pic 1 2 3
0xc800 0 0 3 &mpc5200_pic 1 3 3
0xc800 0 0 4 &mpc5200_pic 0 0 3>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
- 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
- 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000>,
+ <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts
index c259c6b3ac5a..d00f13b62510 100644
--- a/arch/powerpc/boot/dts/pcm032.dts
+++ b/arch/powerpc/boot/dts/pcm032.dts
@@ -3,9 +3,7 @@
* phyCORE-MPC5200B-IO (pcm032) board Device Tree Source
*
* Copyright (C) 2006-2009 Pengutronix
- * Sascha Hauer <s.hauer@pengutronix.de>
- * Juergen Beisert <j.beisert@pengutronix.de>
- * Wolfram Sang <w.sang@pengutronix.de>
+ * Sascha Hauer, Juergen Beisert, Wolfram Sang <kernel@pengutronix.de>
*/
/include/ "mpc5200b.dtsi"
@@ -22,7 +20,7 @@
model = "phytec,pcm032";
compatible = "phytec,pcm032";
- memory {
+ memory@0 {
reg = <0x00000000 0x08000000>; // 128MB
};
@@ -89,9 +87,9 @@
0xc800 0 0 2 &mpc5200_pic 1 2 3
0xc800 0 0 3 &mpc5200_pic 1 3 3
0xc800 0 0 4 &mpc5200_pic 0 0 3>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
- 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
- 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000>,
+ <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
};
localbus {
diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts
index f3e046fb49e2..3c849e23e5f3 100644
--- a/arch/powerpc/boot/dts/redwood.dts
+++ b/arch/powerpc/boot/dts/redwood.dts
@@ -235,7 +235,7 @@
has-new-stacr-staopc;
};
};
- PCIE0: pciex@d00000000 {
+ PCIE0: pcie@d00000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -276,7 +276,7 @@
0x0 0x0 0x0 0x4 &UIC3 0x3 0x4 /* swizzled int D */>;
};
- PCIE1: pciex@d20000000 {
+ PCIE1: pcie@d20000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -317,7 +317,7 @@
0x0 0x0 0x0 0x4 &UIC3 0x7 0x4 /* swizzled int D */>;
};
- PCIE2: pciex@d40000000 {
+ PCIE2: pcie@d40000000 {
device_type = "pci";
#interrupt-cells = <1>;
#size-cells = <2>;
@@ -358,25 +358,6 @@
0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
};
- MSI: ppc4xx-msi@400300000 {
- compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = < 0x4 0x00300000 0x100
- 0x4 0x00300000 0x100>;
- sdr-base = <0x3B0>;
- msi-data = <0x00000000>;
- msi-mask = <0x44440000>;
- interrupt-count = <3>;
- interrupts =<0 1 2 3>;
- interrupt-parent = <&UIC0>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &UIC0 0xC 1
- 1 &UIC0 0x0D 1
- 2 &UIC0 0x0E 1
- 3 &UIC0 0x0F 1>;
- };
-
};
diff --git a/arch/powerpc/boot/dts/sbc8548-altflash.dts b/arch/powerpc/boot/dts/sbc8548-altflash.dts
deleted file mode 100644
index bb7a1e712bb7..000000000000
--- a/arch/powerpc/boot/dts/sbc8548-altflash.dts
+++ /dev/null
@@ -1,111 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SBC8548 Device Tree Source
- *
- * Configured for booting off the alternate (64MB SODIMM) flash.
- * Requires switching JP12 jumpers and changing SW2.8 setting.
- *
- * Copyright 2013 Wind River Systems Inc.
- *
- * Paul Gortmaker (see MAINTAINERS for contact information)
- */
-
-
-/dts-v1/;
-
-/include/ "sbc8548-pre.dtsi"
-
-/{
- localbus@e0000000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "simple-bus";
- reg = <0xe0000000 0x5000>;
- interrupt-parent = <&mpic>;
-
- ranges = <0x0 0x0 0xfc000000 0x04000000 /*64MB Flash*/
- 0x3 0x0 0xf0000000 0x04000000 /*64MB SDRAM*/
- 0x4 0x0 0xf4000000 0x04000000 /*64MB SDRAM*/
- 0x5 0x0 0xf8000000 0x00b10000 /* EPLD */
- 0x6 0x0 0xef800000 0x00800000>; /*8MB Flash*/
-
- flash@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x0 0x0 0x04000000>;
- compatible = "intel,JS28F128", "cfi-flash";
- bank-width = <4>;
- device-width = <1>;
- partition@0 {
- label = "space";
- /* FC000000 -> FFEFFFFF */
- reg = <0x00000000 0x03f00000>;
- };
- partition@3f00000 {
- label = "bootloader";
- /* FFF00000 -> FFFFFFFF */
- reg = <0x03f00000 0x00100000>;
- read-only;
- };
- };
-
-
- epld@5,0 {
- compatible = "wrs,epld-localbus";
- #address-cells = <2>;
- #size-cells = <1>;
- reg = <0x5 0x0 0x00b10000>;
- ranges = <
- 0x0 0x0 0x5 0x000000 0x1fff /* LED */
- 0x1 0x0 0x5 0x100000 0x1fff /* Switches */
- 0x3 0x0 0x5 0x300000 0x1fff /* HW Rev. */
- 0xb 0x0 0x5 0xb00000 0x1fff /* EEPROM */
- >;
-
- led@0,0 {
- compatible = "led";
- reg = <0x0 0x0 0x1fff>;
- };
-
- switches@1,0 {
- compatible = "switches";
- reg = <0x1 0x0 0x1fff>;
- };
-
- hw-rev@3,0 {
- compatible = "hw-rev";
- reg = <0x3 0x0 0x1fff>;
- };
-
- eeprom@b,0 {
- compatible = "eeprom";
- reg = <0xb 0 0x1fff>;
- };
-
- };
-
- alt-flash@6,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "intel,JS28F640", "cfi-flash";
- reg = <0x6 0x0 0x800000>;
- bank-width = <1>;
- device-width = <1>;
- partition@0 {
- label = "space";
- /* EF800000 -> EFF9FFFF */
- reg = <0x00000000 0x007a0000>;
- };
- partition@7a0000 {
- label = "bootloader";
- /* EFFA0000 -> EFFFFFFF */
- reg = <0x007a0000 0x00060000>;
- read-only;
- };
- };
-
-
- };
-};
-
-/include/ "sbc8548-post.dtsi"
diff --git a/arch/powerpc/boot/dts/sbc8548-post.dtsi b/arch/powerpc/boot/dts/sbc8548-post.dtsi
deleted file mode 100644
index 9d848d409408..000000000000
--- a/arch/powerpc/boot/dts/sbc8548-post.dtsi
+++ /dev/null
@@ -1,289 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SBC8548 Device Tree Source
- *
- * Copyright 2007 Wind River Systems Inc.
- *
- * Paul Gortmaker (see MAINTAINERS for contact information)
- */
-
-/{
- soc8548@e0000000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- ranges = <0x00000000 0xe0000000 0x00100000>;
- bus-frequency = <0>;
- compatible = "simple-bus";
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <10>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8548-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8548-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <0x12 0x2>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8548-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <0x20>; // 32 bytes
- cache-size = <0x80000>; // L2, 512K
- interrupt-parent = <&mpic>;
- interrupts = <0x10 0x2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <0x2b 0x2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <0x2b 0x2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8548-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8548-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi0>;
- phy-handle = <&phy0>;
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
-
- phy0: ethernet-phy@19 {
- interrupt-parent = <&mpic>;
- interrupts = <0x6 0x1>;
- reg = <0x19>;
- };
- phy1: ethernet-phy@1a {
- interrupt-parent = <&mpic>;
- interrupts = <0x7 0x1>;
- reg = <0x1a>;
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "eTSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi1>;
- phy-handle = <&phy1>;
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- serial0: serial@4500 {
- cell-index = <0>;
- device_type = "serial";
- compatible = "fsl,ns16550", "ns16550";
- reg = <0x4500 0x100>; // reg base, size
- clock-frequency = <0>; // should we fill in in uboot?
- interrupts = <0x2a 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@4600 {
- cell-index = <1>;
- device_type = "serial";
- compatible = "fsl,ns16550", "ns16550";
- reg = <0x4600 0x100>; // reg base, size
- clock-frequency = <0>; // should we fill in in uboot?
- interrupts = <0x2a 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- global-utilities@e0000 { //global utilities reg
- compatible = "fsl,mpc8548-guts";
- reg = <0xe0000 0x1000>;
- fsl,has-rstcr;
- };
-
- crypto@30000 {
- compatible = "fsl,sec2.1", "fsl,sec2.0";
- reg = <0x30000 0x10000>;
- interrupts = <45 2>;
- interrupt-parent = <&mpic>;
- fsl,num-channels = <4>;
- fsl,channel-fifo-len = <24>;
- fsl,exec-units-mask = <0xfe>;
- fsl,descriptor-types-mask = <0x12b0ebf>;
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- reg = <0x40000 0x40000>;
- compatible = "chrp,open-pic";
- device_type = "open-pic";
- };
- };
-
- pci0: pci@e0008000 {
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
- /* IDSEL 0x01 (PCI-X slot) @66MHz */
- 0x0800 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x0800 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x0800 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x0800 0x0 0x0 0x4 &mpic 0x1 0x1
-
- /* IDSEL 0x11 (PCI, 3.3V 32bit) @33MHz */
- 0x8800 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x8800 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x8800 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x8800 0x0 0x0 0x4 &mpic 0x1 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <0x18 0x2>;
- bus-range = <0 0>;
- ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x10000000
- 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00800000>;
- clock-frequency = <66000000>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe0008000 0x1000>;
- compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
- device_type = "pci";
- };
-
- pci1: pcie@e000a000 {
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
-
- /* IDSEL 0x0 (PEX) */
- 0x0000 0x0 0x0 0x1 &mpic 0x0 0x1
- 0x0000 0x0 0x0 0x2 &mpic 0x1 0x1
- 0x0000 0x0 0x0 0x3 &mpic 0x2 0x1
- 0x0000 0x0 0x0 0x4 &mpic 0x3 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <0x1a 0x2>;
- bus-range = <0x0 0xff>;
- ranges = <0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
- 0x01000000 0x0 0x00000000 0xe2800000 0x0 0x08000000>;
- clock-frequency = <33000000>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- reg = <0xe000a000 0x1000>;
- compatible = "fsl,mpc8548-pcie";
- device_type = "pci";
- pcie@0 {
- reg = <0x0 0x0 0x0 0x0 0x0>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- ranges = <0x02000000 0x0 0xa0000000
- 0x02000000 0x0 0xa0000000
- 0x0 0x10000000
-
- 0x01000000 0x0 0x00000000
- 0x01000000 0x0 0x00000000
- 0x0 0x00800000>;
- };
- };
-};
diff --git a/arch/powerpc/boot/dts/sbc8548-pre.dtsi b/arch/powerpc/boot/dts/sbc8548-pre.dtsi
deleted file mode 100644
index 0e3665fd15d0..000000000000
--- a/arch/powerpc/boot/dts/sbc8548-pre.dtsi
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SBC8548 Device Tree Source
- *
- * Copyright 2007 Wind River Systems Inc.
- *
- * Paul Gortmaker (see MAINTAINERS for contact information)
- */
-
-/{
- model = "SBC8548";
- compatible = "SBC8548";
- #address-cells = <1>;
- #size-cells = <1>;
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- pci1 = &pci1;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,8548@0 {
- device_type = "cpu";
- reg = <0>;
- d-cache-line-size = <0x20>; // 32 bytes
- i-cache-line-size = <0x20>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>; // From uboot
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
- };
- };
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x10000000>;
- };
-
-};
diff --git a/arch/powerpc/boot/dts/sbc8548.dts b/arch/powerpc/boot/dts/sbc8548.dts
deleted file mode 100644
index ce0a119f496e..000000000000
--- a/arch/powerpc/boot/dts/sbc8548.dts
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SBC8548 Device Tree Source
- *
- * Copyright 2007 Wind River Systems Inc.
- *
- * Paul Gortmaker (see MAINTAINERS for contact information)
- */
-
-
-/dts-v1/;
-
-/include/ "sbc8548-pre.dtsi"
-
-/{
- localbus@e0000000 {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "simple-bus";
- reg = <0xe0000000 0x5000>;
- interrupt-parent = <&mpic>;
-
- ranges = <0x0 0x0 0xff800000 0x00800000 /*8MB Flash*/
- 0x3 0x0 0xf0000000 0x04000000 /*64MB SDRAM*/
- 0x4 0x0 0xf4000000 0x04000000 /*64MB SDRAM*/
- 0x5 0x0 0xf8000000 0x00b10000 /* EPLD */
- 0x6 0x0 0xec000000 0x04000000>; /*64MB Flash*/
-
-
- flash@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "intel,JS28F640", "cfi-flash";
- reg = <0x0 0x0 0x800000>;
- bank-width = <1>;
- device-width = <1>;
- partition@0 {
- label = "space";
- /* FF800000 -> FFF9FFFF */
- reg = <0x00000000 0x007a0000>;
- };
- partition@7a0000 {
- label = "bootloader";
- /* FFFA0000 -> FFFFFFFF */
- reg = <0x007a0000 0x00060000>;
- read-only;
- };
- };
-
- epld@5,0 {
- compatible = "wrs,epld-localbus";
- #address-cells = <2>;
- #size-cells = <1>;
- reg = <0x5 0x0 0x00b10000>;
- ranges = <
- 0x0 0x0 0x5 0x000000 0x1fff /* LED */
- 0x1 0x0 0x5 0x100000 0x1fff /* Switches */
- 0x3 0x0 0x5 0x300000 0x1fff /* HW Rev. */
- 0xb 0x0 0x5 0xb00000 0x1fff /* EEPROM */
- >;
-
- led@0,0 {
- compatible = "led";
- reg = <0x0 0x0 0x1fff>;
- };
-
- switches@1,0 {
- compatible = "switches";
- reg = <0x1 0x0 0x1fff>;
- };
-
- hw-rev@3,0 {
- compatible = "hw-rev";
- reg = <0x3 0x0 0x1fff>;
- };
-
- eeprom@b,0 {
- compatible = "eeprom";
- reg = <0xb 0 0x1fff>;
- };
-
- };
-
- alt-flash@6,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x6 0x0 0x04000000>;
- compatible = "intel,JS28F128", "cfi-flash";
- bank-width = <4>;
- device-width = <1>;
- partition@0 {
- label = "space";
- /* EC000000 -> EFEFFFFF */
- reg = <0x00000000 0x03f00000>;
- };
- partition@3f00000 {
- label = "bootloader";
- /* EFF00000 -> EFFFFFFF */
- reg = <0x03f00000 0x00100000>;
- read-only;
- };
- };
- };
-};
-
-/include/ "sbc8548-post.dtsi"
diff --git a/arch/powerpc/boot/dts/stx_gp3_8560.dts b/arch/powerpc/boot/dts/stx_gp3_8560.dts
index d1ab698eef36..e73f7e75b0b4 100644
--- a/arch/powerpc/boot/dts/stx_gp3_8560.dts
+++ b/arch/powerpc/boot/dts/stx_gp3_8560.dts
@@ -7,6 +7,8 @@
/dts-v1/;
+/include/ "fsl/e500v1_power_isa.dtsi"
+
/ {
model = "stx,gp3";
compatible = "stx,gp3-8560", "stx,gp3";
diff --git a/arch/powerpc/boot/dts/stxssa8555.dts b/arch/powerpc/boot/dts/stxssa8555.dts
index 5dca2a91c41f..96add25c904b 100644
--- a/arch/powerpc/boot/dts/stxssa8555.dts
+++ b/arch/powerpc/boot/dts/stxssa8555.dts
@@ -9,6 +9,8 @@
/dts-v1/;
+/include/ "fsl/e500v1_power_isa.dtsi"
+
/ {
model = "stx,gp3";
compatible = "stx,gp3-8560", "stx,gp3";
diff --git a/arch/powerpc/boot/dts/tqm5200.dts b/arch/powerpc/boot/dts/tqm5200.dts
index 9ed0bc78967e..372177b19e60 100644
--- a/arch/powerpc/boot/dts/tqm5200.dts
+++ b/arch/powerpc/boot/dts/tqm5200.dts
@@ -32,7 +32,7 @@
};
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x00000000 0x04000000>; // 64MB
};
@@ -200,8 +200,8 @@
clock-frequency = <0>; // From boot loader
interrupts = <2 8 0 2 9 0 2 10 0>;
bus-range = <0 0>;
- ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
- 0x02000000 0 0x90000000 0x90000000 0 0x10000000
- 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
+ ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000>,
+ <0x02000000 0 0x90000000 0x90000000 0 0x10000000>,
+ <0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
};
};
diff --git a/arch/powerpc/boot/dts/tqm8540.dts b/arch/powerpc/boot/dts/tqm8540.dts
index 9c1eb9779108..eb4d8fd3f7aa 100644
--- a/arch/powerpc/boot/dts/tqm8540.dts
+++ b/arch/powerpc/boot/dts/tqm8540.dts
@@ -7,6 +7,8 @@
/dts-v1/;
+/include/ "fsl/e500v1_power_isa.dtsi"
+
/ {
model = "tqc,tqm8540";
compatible = "tqc,tqm8540";
diff --git a/arch/powerpc/boot/dts/tqm8541.dts b/arch/powerpc/boot/dts/tqm8541.dts
index 44595cf675d0..fe5d3d873ec9 100644
--- a/arch/powerpc/boot/dts/tqm8541.dts
+++ b/arch/powerpc/boot/dts/tqm8541.dts
@@ -7,6 +7,8 @@
/dts-v1/;
+/include/ "fsl/e500v1_power_isa.dtsi"
+
/ {
model = "tqc,tqm8541";
compatible = "tqc,tqm8541";
diff --git a/arch/powerpc/boot/dts/tqm8555.dts b/arch/powerpc/boot/dts/tqm8555.dts
index 54f3e82907d6..4be05b7d225d 100644
--- a/arch/powerpc/boot/dts/tqm8555.dts
+++ b/arch/powerpc/boot/dts/tqm8555.dts
@@ -7,6 +7,8 @@
/dts-v1/;
+/include/ "fsl/e500v1_power_isa.dtsi"
+
/ {
model = "tqc,tqm8555";
compatible = "tqc,tqm8555";
diff --git a/arch/powerpc/boot/dts/tqm8560.dts b/arch/powerpc/boot/dts/tqm8560.dts
index 7415cb69f60d..8ea48502420b 100644
--- a/arch/powerpc/boot/dts/tqm8560.dts
+++ b/arch/powerpc/boot/dts/tqm8560.dts
@@ -8,6 +8,8 @@
/dts-v1/;
+/include/ "fsl/e500v1_power_isa.dtsi"
+
/ {
model = "tqc,tqm8560";
compatible = "tqc,tqm8560";
diff --git a/arch/powerpc/boot/dts/turris1x.dts b/arch/powerpc/boot/dts/turris1x.dts
new file mode 100644
index 000000000000..045af668e928
--- /dev/null
+++ b/arch/powerpc/boot/dts/turris1x.dts
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Turris 1.x Device Tree Source
+ *
+ * Copyright 2013 - 2022 CZ.NIC z.s.p.o. (http://www.nic.cz/)
+ *
+ * Pinout, Schematics and Altium hardware design files are open source
+ * and available at: https://docs.turris.cz/hw/turris-1x/turris-1x/
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/leds/common.h>
+/include/ "fsl/p2020si-pre.dtsi"
+
+/ {
+ model = "Turris 1.x";
+ compatible = "cznic,turris1x", "fsl,P2020RDB-PC"; /* fsl,P2020RDB-PC is required for booting Linux */
+
+ aliases {
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
+ ethernet2 = &enet2;
+ serial0 = &serial0;
+ serial1 = &serial1;
+ pci0 = &pci0;
+ pci1 = &pci1;
+ pci2 = &pci2;
+ spi0 = &spi0;
+ };
+
+ memory {
+ device_type = "memory";
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x00100000>;
+
+ i2c@3000 {
+ /* PCA9557PW GPIO controller for boot config */
+ gpio-controller@18 {
+ compatible = "nxp,pca9557";
+ label = "bootcfg";
+ reg = <0x18>;
+ #gpio-cells = <2>;
+ gpio-controller;
+ polarity = <0x00>;
+ };
+
+ /* STM32F030R8T6 MCU for power control */
+ power-control@2a {
+ /*
+ * Turris Power Control firmware runs on STM32F0 MCU.
+ * This firmware is open source and available at:
+ * https://gitlab.nic.cz/turris/hw/turris_power_control
+ */
+ reg = <0x2a>;
+ };
+
+ /* DDR3 SPD/EEPROM PSWP instruction */
+ eeprom@32 {
+ reg = <0x32>;
+ };
+
+ /* SA56004ED temperature control */
+ temperature-sensor@4c {
+ compatible = "nxp,sa56004";
+ reg = <0x4c>;
+ interrupt-parent = <&gpio>;
+ interrupts = <12 IRQ_TYPE_LEVEL_LOW>, /* GPIO12 - ALERT pin */
+ <13 IRQ_TYPE_LEVEL_LOW>; /* GPIO13 - CRIT pin */
+ };
+
+ /* DDR3 SPD/EEPROM */
+ eeprom@52 {
+ compatible = "atmel,spd";
+ reg = <0x52>;
+ };
+
+ /* MCP79402-I/ST Protected EEPROM */
+ eeprom@57 {
+ reg = <0x57>;
+ };
+
+ /* ATSHA204-TH-DA-T crypto module */
+ crypto@64 {
+ compatible = "atmel,atsha204";
+ reg = <0x64>;
+ };
+
+ /* IDT6V49205BNLGI clock generator */
+ clock-generator@69 {
+ compatible = "idt,6v49205b";
+ reg = <0x69>;
+ };
+
+ /* MCP79402-I/ST RTC */
+ rtc@6f {
+ compatible = "microchip,mcp7940x";
+ reg = <0x6f>;
+ interrupt-parent = <&gpio>;
+ interrupts = <14 0>; /* GPIO14 - MFP pin */
+ };
+ };
+
+ /* SPI on connector P1 */
+ spi0: spi@7000 {
+ };
+
+ gpio: gpio-controller@fc00 {
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ };
+
+ /* Connected to SMSC USB2412-DZK 2-Port USB 2.0 Hub Controller */
+ usb@22000 {
+ phy_type = "ulpi";
+ dr_mode = "host";
+ };
+
+ enet0: ethernet@24000 {
+ /* Connected to port 6 of QCA8337N-AL3C switch */
+ phy-connection-type = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ mdio@24520 {
+ /* KSZ9031RNXCA ethernet phy for WAN port */
+ phy: ethernet-phy@7 {
+ interrupts = <3 1 0 0>;
+ reg = <0x7>;
+ };
+
+ /* QCA8337N-AL3C switch with integrated ethernet PHYs for LAN ports */
+ switch@10 {
+ compatible = "qca,qca8337";
+ interrupts = <2 1 0 0>;
+ reg = <0x10>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "cpu";
+ ethernet = <&enet1>;
+ phy-mode = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan5";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan4";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan2";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "lan1";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&enet0>;
+ phy-mode = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ };
+ };
+ };
+
+ ptp_clock@24e00 {
+ fsl,tclk-period = <5>;
+ fsl,tmr-prsc = <200>;
+ fsl,tmr-add = <0xcccccccd>;
+ fsl,tmr-fiper1 = <0x3b9ac9fb>;
+ fsl,tmr-fiper2 = <0x0001869b>;
+ fsl,max-adj = <249999999>;
+ };
+
+ enet1: ethernet@25000 {
+ /* Connected to port 0 of QCA8337N-AL3C switch */
+ phy-connection-type = "rgmii-id";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+
+ mdio@25520 {
+ status = "disabled";
+ };
+
+ enet2: ethernet@26000 {
+ /* Connected to KSZ9031RNXCA ethernet phy (WAN port) */
+ label = "wan";
+ phy-handle = <&phy>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ mdio@26520 {
+ status = "disabled";
+ };
+
+ sdhc@2e000 {
+ bus-width = <4>;
+ cd-gpios = <&gpio 8 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0 0xffe05000 0 0x1000>;
+
+ ranges = <0x0 0x0 0x0 0xef000000 0x01000000>, /* NOR */
+ <0x1 0x0 0x0 0xff800000 0x00040000>, /* NAND */
+ <0x3 0x0 0x0 0xffa00000 0x00020000>; /* CPLD */
+
+ /* S29GL128P90TFIR10 NOR */
+ nor@0,0 {
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x01000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ /* 128 kB for Device Tree Blob */
+ reg = <0x00000000 0x00020000>;
+ label = "dtb";
+ };
+
+ partition@20000 {
+ /* 1.7 MB for Linux Kernel Image */
+ reg = <0x00020000 0x001a0000>;
+ label = "kernel";
+ };
+
+ partition@1c0000 {
+ /* 1.5 MB for Rescue JFFS2 Root File System */
+ reg = <0x001c0000 0x00180000>;
+ label = "rescue";
+ };
+
+ partition@340000 {
+ /* 11 MB for TAR.XZ Archive with Factory content of NAND Root File System */
+ reg = <0x00340000 0x00b00000>;
+ label = "factory";
+ };
+
+ partition@e40000 {
+ /* 768 kB for Certificates JFFS2 File System */
+ reg = <0x00e40000 0x000c0000>;
+ label = "certificates";
+ };
+
+ /* free unused space 0x00f00000-0x00f20000 */
+
+ partition@f20000 {
+ /* 128 kB for U-Boot Environment Variables */
+ reg = <0x00f20000 0x00020000>;
+ label = "u-boot-env";
+ };
+
+ partition@f40000 {
+ /* 768 kB for U-Boot Bootloader Image */
+ reg = <0x00f40000 0x000c0000>;
+ label = "u-boot";
+ };
+ };
+ };
+
+ /* MT29F2G08ABAEAWP:E NAND */
+ nand@1,0 {
+ compatible = "fsl,p2020-fcm-nand", "fsl,elbc-fcm-nand";
+ reg = <0x1 0x0 0x00040000>;
+ nand-ecc-mode = "soft";
+ nand-ecc-algo = "bch";
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ /* 256 MB for UBI with one volume: UBIFS Root File System */
+ reg = <0x00000000 0x10000000>;
+ label = "rootfs";
+ };
+ };
+ };
+
+ /* LCMXO1200C-3FTN256C FPGA */
+ cpld@3,0 {
+ /*
+ * Turris CPLD firmware which runs on this Lattice FPGA,
+ * is extended version of P1021RDB-PC CPLD v4.1 firmware.
+ * It is backward compatible with its original version
+ * and the only extension is support for Turris LEDs.
+ * Turris CPLD firmware is open source and available at:
+ * https://gitlab.nic.cz/turris/hw/turris_cpld/-/blob/master/CZ_NIC_Router_CPLD.v
+ */
+ compatible = "cznic,turris1x-cpld", "fsl,p1021rdb-pc-cpld", "simple-bus", "syscon";
+ reg = <0x3 0x0 0x30>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0x0 0x3 0x0 0x00020000>;
+
+ /* MAX6370KA+T watchdog */
+ watchdog@2 {
+ /*
+ * CPLD firmware maps SET0, SET1 and SET2
+ * input logic of MAX6370KA+T chip to CPLD
+ * memory space at byte offset 0x2. WDI
+ * input logic is outside of the CPLD and
+ * connected via external GPIO.
+ */
+ compatible = "maxim,max6370";
+ reg = <0x02 0x01>;
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+
+ reboot@d {
+ compatible = "syscon-reboot";
+ reg = <0x0d 0x01>;
+ offset = <0x0d>;
+ mask = <0x01>;
+ value = <0x01>;
+ };
+
+ led-controller@13 {
+ /*
+ * LEDs are controlled by CPLD firmware.
+ * All five LAN LEDs share common RGB settings
+ * and so it is not possible to set different
+ * colors on different LAN ports.
+ */
+ compatible = "cznic,turris1x-leds";
+ reg = <0x13 0x1d>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ multi-led@0 {
+ reg = <0x0>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_WAN;
+ };
+
+ multi-led@1 {
+ reg = <0x1>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <5>;
+ };
+
+ multi-led@2 {
+ reg = <0x2>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <4>;
+ };
+
+ multi-led@3 {
+ reg = <0x3>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <3>;
+ };
+
+ multi-led@4 {
+ reg = <0x4>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <2>;
+ };
+
+ multi-led@5 {
+ reg = <0x5>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_LAN;
+ function-enumerator = <1>;
+ };
+
+ multi-led@6 {
+ reg = <0x6>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_WLAN;
+ };
+
+ multi-led@7 {
+ reg = <0x7>;
+ color = <LED_COLOR_ID_RGB>;
+ function = LED_FUNCTION_POWER;
+ };
+ };
+ };
+ };
+
+ pci2: pcie@ffe08000 {
+ /*
+ * PCIe bus for on-board TUSB7340RKM USB 3.0 xHCI controller.
+ * This xHCI controller is available only on Turris 1.1 boards.
+ * Turris 1.0 boards have nothing connected to this PCIe bus,
+ * so system would see only PCIe Root Port of this PCIe Root
+ * Complex. TUSB7340RKM xHCI controller has four SuperSpeed
+ * channels. Channel 0 is connected to the front USB 3.0 port,
+ * channel 1 (but only USB 2.0 subset) to USB 2.0 pins on mPCIe
+ * slot 1 (CN5), channels 2 and 3 to connector P600.
+ *
+ * P2020 PCIe Root Port uses 1MB of PCIe MEM and xHCI controller
+ * uses 64kB + 8kB of PCIe MEM. No PCIe IO is used or required.
+ * So allocate 2MB of PCIe MEM for this PCIe bus.
+ */
+ reg = <0 0xffe08000 0 0x1000>;
+ ranges = <0x02000000 0x0 0xc0000000 0 0xc0000000 0x0 0x00200000>, /* MEM */
+ <0x01000000 0x0 0x00000000 0 0xffc20000 0x0 0x00010000>; /* IO */
+
+ pcie@0 {
+ ranges;
+ };
+ };
+
+ pci1: pcie@ffe09000 {
+ /* PCIe bus on mPCIe slot 2 (CN6) for expansion mPCIe card */
+ reg = <0 0xffe09000 0 0x1000>;
+ ranges = <0x02000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000>, /* MEM */
+ <0x01000000 0x0 0x00000000 0 0xffc10000 0x0 0x00010000>; /* IO */
+
+ pcie@0 {
+ ranges;
+ };
+ };
+
+ pci0: pcie@ffe0a000 {
+ /*
+ * PCIe bus on mPCIe slot 1 (CN5) for expansion mPCIe card.
+ * Turris 1.1 boards have in this mPCIe slot additional USB 2.0
+ * pins via channel 1 of TUSB7340RKM xHCI controller and also
+ * additional SIM card slot, both for USB-based WWAN cards.
+ */
+ reg = <0 0xffe0a000 0 0x1000>;
+ ranges = <0x02000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000>, /* MEM */
+ <0x01000000 0x0 0x00000000 0 0xffc00000 0x0 0x00010000>; /* IO */
+
+ pcie@0 {
+ ranges;
+ };
+ };
+};
+
+/include/ "fsl/p2020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/virtex440-ml507.dts b/arch/powerpc/boot/dts/virtex440-ml507.dts
deleted file mode 100644
index 66f1c6312de6..000000000000
--- a/arch/powerpc/boot/dts/virtex440-ml507.dts
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- * This file supports the Xilinx ML507 board with the 440 processor.
- * A reference design for the FPGA is provided at http://git.xilinx.com.
- *
- * (C) Copyright 2008 Xilinx, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
- * ---
- *
- * Device Tree Generator version: 1.1
- *
- * CAUTION: This file is automatically generated by libgen.
- * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6
- *
- * XPS project directory: ml507_ppc440_emb_ref
- */
-
-/dts-v1/;
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,virtex440";
- dcr-parent = <&ppc440_0>;
- model = "testing";
- DDR2_SDRAM: memory@0 {
- device_type = "memory";
- reg = < 0 0x10000000 >;
- } ;
- chosen {
- bootargs = "console=ttyS0 root=/dev/ram";
- stdout-path = &RS232_Uart_1;
- } ;
- cpus {
- #address-cells = <1>;
- #cpus = <1>;
- #size-cells = <0>;
- ppc440_0: cpu@0 {
- clock-frequency = <400000000>;
- compatible = "PowerPC,440", "ibm,ppc440";
- d-cache-line-size = <0x20>;
- d-cache-size = <0x8000>;
- dcr-access-method = "native";
- dcr-controller ;
- device_type = "cpu";
- i-cache-line-size = <0x20>;
- i-cache-size = <0x8000>;
- model = "PowerPC,440";
- reg = <0>;
- timebase-frequency = <400000000>;
- xlnx,apu-control = <1>;
- xlnx,apu-udi-0 = <0>;
- xlnx,apu-udi-1 = <0>;
- xlnx,apu-udi-10 = <0>;
- xlnx,apu-udi-11 = <0>;
- xlnx,apu-udi-12 = <0>;
- xlnx,apu-udi-13 = <0>;
- xlnx,apu-udi-14 = <0>;
- xlnx,apu-udi-15 = <0>;
- xlnx,apu-udi-2 = <0>;
- xlnx,apu-udi-3 = <0>;
- xlnx,apu-udi-4 = <0>;
- xlnx,apu-udi-5 = <0>;
- xlnx,apu-udi-6 = <0>;
- xlnx,apu-udi-7 = <0>;
- xlnx,apu-udi-8 = <0>;
- xlnx,apu-udi-9 = <0>;
- xlnx,dcr-autolock-enable = <1>;
- xlnx,dcu-rd-ld-cache-plb-prio = <0>;
- xlnx,dcu-rd-noncache-plb-prio = <0>;
- xlnx,dcu-rd-touch-plb-prio = <0>;
- xlnx,dcu-rd-urgent-plb-prio = <0>;
- xlnx,dcu-wr-flush-plb-prio = <0>;
- xlnx,dcu-wr-store-plb-prio = <0>;
- xlnx,dcu-wr-urgent-plb-prio = <0>;
- xlnx,dma0-control = <0>;
- xlnx,dma0-plb-prio = <0>;
- xlnx,dma0-rxchannelctrl = <0x1010000>;
- xlnx,dma0-rxirqtimer = <0x3ff>;
- xlnx,dma0-txchannelctrl = <0x1010000>;
- xlnx,dma0-txirqtimer = <0x3ff>;
- xlnx,dma1-control = <0>;
- xlnx,dma1-plb-prio = <0>;
- xlnx,dma1-rxchannelctrl = <0x1010000>;
- xlnx,dma1-rxirqtimer = <0x3ff>;
- xlnx,dma1-txchannelctrl = <0x1010000>;
- xlnx,dma1-txirqtimer = <0x3ff>;
- xlnx,dma2-control = <0>;
- xlnx,dma2-plb-prio = <0>;
- xlnx,dma2-rxchannelctrl = <0x1010000>;
- xlnx,dma2-rxirqtimer = <0x3ff>;
- xlnx,dma2-txchannelctrl = <0x1010000>;
- xlnx,dma2-txirqtimer = <0x3ff>;
- xlnx,dma3-control = <0>;
- xlnx,dma3-plb-prio = <0>;
- xlnx,dma3-rxchannelctrl = <0x1010000>;
- xlnx,dma3-rxirqtimer = <0x3ff>;
- xlnx,dma3-txchannelctrl = <0x1010000>;
- xlnx,dma3-txirqtimer = <0x3ff>;
- xlnx,endian-reset = <0>;
- xlnx,generate-plb-timespecs = <1>;
- xlnx,icu-rd-fetch-plb-prio = <0>;
- xlnx,icu-rd-spec-plb-prio = <0>;
- xlnx,icu-rd-touch-plb-prio = <0>;
- xlnx,interconnect-imask = <0xffffffff>;
- xlnx,mplb-allow-lock-xfer = <1>;
- xlnx,mplb-arb-mode = <0>;
- xlnx,mplb-awidth = <0x20>;
- xlnx,mplb-counter = <0x500>;
- xlnx,mplb-dwidth = <0x80>;
- xlnx,mplb-max-burst = <8>;
- xlnx,mplb-native-dwidth = <0x80>;
- xlnx,mplb-p2p = <0>;
- xlnx,mplb-prio-dcur = <2>;
- xlnx,mplb-prio-dcuw = <3>;
- xlnx,mplb-prio-icu = <4>;
- xlnx,mplb-prio-splb0 = <1>;
- xlnx,mplb-prio-splb1 = <0>;
- xlnx,mplb-read-pipe-enable = <1>;
- xlnx,mplb-sync-tattribute = <0>;
- xlnx,mplb-wdog-enable = <1>;
- xlnx,mplb-write-pipe-enable = <1>;
- xlnx,mplb-write-post-enable = <1>;
- xlnx,num-dma = <1>;
- xlnx,pir = <0xf>;
- xlnx,ppc440mc-addr-base = <0>;
- xlnx,ppc440mc-addr-high = <0xfffffff>;
- xlnx,ppc440mc-arb-mode = <0>;
- xlnx,ppc440mc-bank-conflict-mask = <0xc00000>;
- xlnx,ppc440mc-control = <0xf810008f>;
- xlnx,ppc440mc-max-burst = <8>;
- xlnx,ppc440mc-prio-dcur = <2>;
- xlnx,ppc440mc-prio-dcuw = <3>;
- xlnx,ppc440mc-prio-icu = <4>;
- xlnx,ppc440mc-prio-splb0 = <1>;
- xlnx,ppc440mc-prio-splb1 = <0>;
- xlnx,ppc440mc-row-conflict-mask = <0x3ffe00>;
- xlnx,ppcdm-asyncmode = <0>;
- xlnx,ppcds-asyncmode = <0>;
- xlnx,user-reset = <0>;
- DMA0: sdma@80 {
- compatible = "xlnx,ll-dma-1.00.a";
- dcr-reg = < 0x80 0x11 >;
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 10 2 11 2 >;
- } ;
- } ;
- } ;
- plb_v46_0: plb@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,plb-v46-1.03.a", "simple-bus";
- ranges ;
- DIP_Switches_8Bit: gpio@81460000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 7 2 >;
- reg = < 0x81460000 0x10000 >;
- xlnx,all-inputs = <1>;
- xlnx,all-inputs-2 = <0>;
- xlnx,dout-default = <0>;
- xlnx,dout-default-2 = <0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <8>;
- xlnx,interrupt-present = <1>;
- xlnx,is-bidir = <1>;
- xlnx,is-bidir-2 = <1>;
- xlnx,is-dual = <0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- FLASH: flash@fc000000 {
- bank-width = <2>;
- compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
- reg = < 0xfc000000 0x2000000 >;
- xlnx,family = "virtex5";
- xlnx,include-datawidth-matching-0 = <0x1>;
- xlnx,include-datawidth-matching-1 = <0x0>;
- xlnx,include-datawidth-matching-2 = <0x0>;
- xlnx,include-datawidth-matching-3 = <0x0>;
- xlnx,include-negedge-ioregs = <0x0>;
- xlnx,include-plb-ipif = <0x1>;
- xlnx,include-wrbuf = <0x1>;
- xlnx,max-mem-width = <0x10>;
- xlnx,mch-native-dwidth = <0x20>;
- xlnx,mch-plb-clk-period-ps = <0x2710>;
- xlnx,mch-splb-awidth = <0x20>;
- xlnx,mch0-accessbuf-depth = <0x10>;
- xlnx,mch0-protocol = <0x0>;
- xlnx,mch0-rddatabuf-depth = <0x10>;
- xlnx,mch1-accessbuf-depth = <0x10>;
- xlnx,mch1-protocol = <0x0>;
- xlnx,mch1-rddatabuf-depth = <0x10>;
- xlnx,mch2-accessbuf-depth = <0x10>;
- xlnx,mch2-protocol = <0x0>;
- xlnx,mch2-rddatabuf-depth = <0x10>;
- xlnx,mch3-accessbuf-depth = <0x10>;
- xlnx,mch3-protocol = <0x0>;
- xlnx,mch3-rddatabuf-depth = <0x10>;
- xlnx,mem0-width = <0x10>;
- xlnx,mem1-width = <0x20>;
- xlnx,mem2-width = <0x20>;
- xlnx,mem3-width = <0x20>;
- xlnx,num-banks-mem = <0x1>;
- xlnx,num-channels = <0x2>;
- xlnx,priority-mode = <0x0>;
- xlnx,synch-mem-0 = <0x0>;
- xlnx,synch-mem-1 = <0x0>;
- xlnx,synch-mem-2 = <0x0>;
- xlnx,synch-mem-3 = <0x0>;
- xlnx,synch-pipedelay-0 = <0x2>;
- xlnx,synch-pipedelay-1 = <0x2>;
- xlnx,synch-pipedelay-2 = <0x2>;
- xlnx,synch-pipedelay-3 = <0x2>;
- xlnx,tavdv-ps-mem-0 = <0x1adb0>;
- xlnx,tavdv-ps-mem-1 = <0x3a98>;
- xlnx,tavdv-ps-mem-2 = <0x3a98>;
- xlnx,tavdv-ps-mem-3 = <0x3a98>;
- xlnx,tcedv-ps-mem-0 = <0x1adb0>;
- xlnx,tcedv-ps-mem-1 = <0x3a98>;
- xlnx,tcedv-ps-mem-2 = <0x3a98>;
- xlnx,tcedv-ps-mem-3 = <0x3a98>;
- xlnx,thzce-ps-mem-0 = <0x88b8>;
- xlnx,thzce-ps-mem-1 = <0x1b58>;
- xlnx,thzce-ps-mem-2 = <0x1b58>;
- xlnx,thzce-ps-mem-3 = <0x1b58>;
- xlnx,thzoe-ps-mem-0 = <0x1b58>;
- xlnx,thzoe-ps-mem-1 = <0x1b58>;
- xlnx,thzoe-ps-mem-2 = <0x1b58>;
- xlnx,thzoe-ps-mem-3 = <0x1b58>;
- xlnx,tlzwe-ps-mem-0 = <0x88b8>;
- xlnx,tlzwe-ps-mem-1 = <0x0>;
- xlnx,tlzwe-ps-mem-2 = <0x0>;
- xlnx,tlzwe-ps-mem-3 = <0x0>;
- xlnx,twc-ps-mem-0 = <0x2af8>;
- xlnx,twc-ps-mem-1 = <0x3a98>;
- xlnx,twc-ps-mem-2 = <0x3a98>;
- xlnx,twc-ps-mem-3 = <0x3a98>;
- xlnx,twp-ps-mem-0 = <0x11170>;
- xlnx,twp-ps-mem-1 = <0x2ee0>;
- xlnx,twp-ps-mem-2 = <0x2ee0>;
- xlnx,twp-ps-mem-3 = <0x2ee0>;
- xlnx,xcl0-linesize = <0x4>;
- xlnx,xcl0-writexfer = <0x1>;
- xlnx,xcl1-linesize = <0x4>;
- xlnx,xcl1-writexfer = <0x1>;
- xlnx,xcl2-linesize = <0x4>;
- xlnx,xcl2-writexfer = <0x1>;
- xlnx,xcl3-linesize = <0x4>;
- xlnx,xcl3-writexfer = <0x1>;
- } ;
- Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,compound";
- ethernet@81c00000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "xlnx,xps-ll-temac-1.01.b";
- device_type = "network";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 5 2 >;
- llink-connected = <&DMA0>;
- local-mac-address = [ 02 00 00 00 00 00 ];
- reg = < 0x81c00000 0x40 >;
- xlnx,bus2core-clk-ratio = <1>;
- xlnx,phy-type = <1>;
- xlnx,phyaddr = <1>;
- xlnx,rxcsum = <1>;
- xlnx,rxfifo = <0x1000>;
- xlnx,temac-type = <0>;
- xlnx,txcsum = <1>;
- xlnx,txfifo = <0x1000>;
- phy-handle = <&phy7>;
- clock-frequency = <100000000>;
- phy7: phy@7 {
- compatible = "marvell,88e1111";
- reg = <7>;
- } ;
- } ;
- } ;
- IIC_EEPROM: i2c@81600000 {
- compatible = "xlnx,xps-iic-2.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 6 2 >;
- reg = < 0x81600000 0x10000 >;
- xlnx,clk-freq = <0x5f5e100>;
- xlnx,family = "virtex5";
- xlnx,gpo-width = <0x1>;
- xlnx,iic-freq = <0x186a0>;
- xlnx,scl-inertial-delay = <0x0>;
- xlnx,sda-inertial-delay = <0x0>;
- xlnx,ten-bit-adr = <0x0>;
- } ;
- LEDs_8Bit: gpio@81400000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- reg = < 0x81400000 0x10000 >;
- xlnx,all-inputs = <0>;
- xlnx,all-inputs-2 = <0>;
- xlnx,dout-default = <0>;
- xlnx,dout-default-2 = <0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <8>;
- xlnx,interrupt-present = <0>;
- xlnx,is-bidir = <1>;
- xlnx,is-bidir-2 = <1>;
- xlnx,is-dual = <0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- LEDs_Positions: gpio@81420000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- reg = < 0x81420000 0x10000 >;
- xlnx,all-inputs = <0>;
- xlnx,all-inputs-2 = <0>;
- xlnx,dout-default = <0>;
- xlnx,dout-default-2 = <0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <5>;
- xlnx,interrupt-present = <0>;
- xlnx,is-bidir = <1>;
- xlnx,is-bidir-2 = <1>;
- xlnx,is-dual = <0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- Push_Buttons_5Bit: gpio@81440000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 8 2 >;
- reg = < 0x81440000 0x10000 >;
- xlnx,all-inputs = <1>;
- xlnx,all-inputs-2 = <0>;
- xlnx,dout-default = <0>;
- xlnx,dout-default-2 = <0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <5>;
- xlnx,interrupt-present = <1>;
- xlnx,is-bidir = <1>;
- xlnx,is-bidir-2 = <1>;
- xlnx,is-dual = <0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- RS232_Uart_1: serial@83e00000 {
- clock-frequency = <100000000>;
- compatible = "xlnx,xps-uart16550-2.00.b", "ns16550";
- current-speed = <9600>;
- device_type = "serial";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 9 2 >;
- reg = < 0x83e00000 0x10000 >;
- reg-offset = <0x1003>;
- reg-shift = <2>;
- xlnx,family = "virtex5";
- xlnx,has-external-rclk = <0>;
- xlnx,has-external-xin = <0>;
- xlnx,is-a-16550 = <1>;
- } ;
- SysACE_CompactFlash: sysace@83600000 {
- compatible = "xlnx,xps-sysace-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 4 2 >;
- reg = < 0x83600000 0x10000 >;
- xlnx,family = "virtex5";
- xlnx,mem-width = <0x10>;
- } ;
- xps_bram_if_cntlr_1: xps-bram-if-cntlr@ffff0000 {
- compatible = "xlnx,xps-bram-if-cntlr-1.00.a";
- reg = < 0xffff0000 0x10000 >;
- xlnx,family = "virtex5";
- } ;
- xps_intc_0: interrupt-controller@81800000 {
- #interrupt-cells = <2>;
- compatible = "xlnx,xps-intc-1.00.a";
- interrupt-controller ;
- reg = < 0x81800000 0x10000 >;
- xlnx,num-intr-inputs = <0xc>;
- } ;
- xps_timebase_wdt_1: xps-timebase-wdt@83a00000 {
- compatible = "xlnx,xps-timebase-wdt-1.00.b";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 2 0 1 2 >;
- reg = < 0x83a00000 0x10000 >;
- xlnx,family = "virtex5";
- xlnx,wdt-enable-once = <0>;
- xlnx,wdt-interval = <0x1e>;
- } ;
- xps_timer_1: timer@83c00000 {
- compatible = "xlnx,xps-timer-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 3 2 >;
- reg = < 0x83c00000 0x10000 >;
- xlnx,count-width = <0x20>;
- xlnx,family = "virtex5";
- xlnx,gen0-assert = <1>;
- xlnx,gen1-assert = <1>;
- xlnx,one-timer-only = <1>;
- xlnx,trig0-assert = <1>;
- xlnx,trig1-assert = <1>;
- } ;
- } ;
-} ;
diff --git a/arch/powerpc/boot/dts/virtex440-ml510.dts b/arch/powerpc/boot/dts/virtex440-ml510.dts
deleted file mode 100644
index 3b736ca26ddc..000000000000
--- a/arch/powerpc/boot/dts/virtex440-ml510.dts
+++ /dev/null
@@ -1,466 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Xilinx ML510 Reference Design support
- *
- * This DTS file was created for the ml510_bsb1_pcores_ppc440 reference design.
- * The reference design contains a bug which prevent PCI DMA from working
- * properly. A description of the bug is given in the plbv46_pci section. It
- * needs to be fixed by the user until Xilinx updates their reference design.
- *
- * Copyright 2009, Roderick Colenbrander
- */
-
-/dts-v1/;
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,ml510-ref-design", "xlnx,virtex440";
- dcr-parent = <&ppc440_0>;
- DDR2_SDRAM_DIMM0: memory@0 {
- device_type = "memory";
- reg = < 0x0 0x20000000 >;
- } ;
- alias {
- ethernet0 = &Hard_Ethernet_MAC;
- serial0 = &RS232_Uart_1;
- } ;
- chosen {
- bootargs = "console=ttyS0 root=/dev/ram";
- stdout-path = "/plb@0/serial@83e00000";
- } ;
- cpus {
- #address-cells = <1>;
- #cpus = <0x1>;
- #size-cells = <0>;
- ppc440_0: cpu@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- clock-frequency = <300000000>;
- compatible = "PowerPC,440", "ibm,ppc440";
- d-cache-line-size = <0x20>;
- d-cache-size = <0x8000>;
- dcr-access-method = "native";
- dcr-controller ;
- device_type = "cpu";
- i-cache-line-size = <0x20>;
- i-cache-size = <0x8000>;
- model = "PowerPC,440";
- reg = <0>;
- timebase-frequency = <300000000>;
- xlnx,apu-control = <0x2000>;
- xlnx,apu-udi-0 = <0x0>;
- xlnx,apu-udi-1 = <0x0>;
- xlnx,apu-udi-10 = <0x0>;
- xlnx,apu-udi-11 = <0x0>;
- xlnx,apu-udi-12 = <0x0>;
- xlnx,apu-udi-13 = <0x0>;
- xlnx,apu-udi-14 = <0x0>;
- xlnx,apu-udi-15 = <0x0>;
- xlnx,apu-udi-2 = <0x0>;
- xlnx,apu-udi-3 = <0x0>;
- xlnx,apu-udi-4 = <0x0>;
- xlnx,apu-udi-5 = <0x0>;
- xlnx,apu-udi-6 = <0x0>;
- xlnx,apu-udi-7 = <0x0>;
- xlnx,apu-udi-8 = <0x0>;
- xlnx,apu-udi-9 = <0x0>;
- xlnx,dcr-autolock-enable = <0x1>;
- xlnx,dcu-rd-ld-cache-plb-prio = <0x0>;
- xlnx,dcu-rd-noncache-plb-prio = <0x0>;
- xlnx,dcu-rd-touch-plb-prio = <0x0>;
- xlnx,dcu-rd-urgent-plb-prio = <0x0>;
- xlnx,dcu-wr-flush-plb-prio = <0x0>;
- xlnx,dcu-wr-store-plb-prio = <0x0>;
- xlnx,dcu-wr-urgent-plb-prio = <0x0>;
- xlnx,dma0-control = <0x0>;
- xlnx,dma0-plb-prio = <0x0>;
- xlnx,dma0-rxchannelctrl = <0x1010000>;
- xlnx,dma0-rxirqtimer = <0x3ff>;
- xlnx,dma0-txchannelctrl = <0x1010000>;
- xlnx,dma0-txirqtimer = <0x3ff>;
- xlnx,dma1-control = <0x0>;
- xlnx,dma1-plb-prio = <0x0>;
- xlnx,dma1-rxchannelctrl = <0x1010000>;
- xlnx,dma1-rxirqtimer = <0x3ff>;
- xlnx,dma1-txchannelctrl = <0x1010000>;
- xlnx,dma1-txirqtimer = <0x3ff>;
- xlnx,dma2-control = <0x0>;
- xlnx,dma2-plb-prio = <0x0>;
- xlnx,dma2-rxchannelctrl = <0x1010000>;
- xlnx,dma2-rxirqtimer = <0x3ff>;
- xlnx,dma2-txchannelctrl = <0x1010000>;
- xlnx,dma2-txirqtimer = <0x3ff>;
- xlnx,dma3-control = <0x0>;
- xlnx,dma3-plb-prio = <0x0>;
- xlnx,dma3-rxchannelctrl = <0x1010000>;
- xlnx,dma3-rxirqtimer = <0x3ff>;
- xlnx,dma3-txchannelctrl = <0x1010000>;
- xlnx,dma3-txirqtimer = <0x3ff>;
- xlnx,endian-reset = <0x0>;
- xlnx,generate-plb-timespecs = <0x1>;
- xlnx,icu-rd-fetch-plb-prio = <0x0>;
- xlnx,icu-rd-spec-plb-prio = <0x0>;
- xlnx,icu-rd-touch-plb-prio = <0x0>;
- xlnx,interconnect-imask = <0xffffffff>;
- xlnx,mplb-allow-lock-xfer = <0x1>;
- xlnx,mplb-arb-mode = <0x0>;
- xlnx,mplb-awidth = <0x20>;
- xlnx,mplb-counter = <0x500>;
- xlnx,mplb-dwidth = <0x80>;
- xlnx,mplb-max-burst = <0x8>;
- xlnx,mplb-native-dwidth = <0x80>;
- xlnx,mplb-p2p = <0x0>;
- xlnx,mplb-prio-dcur = <0x2>;
- xlnx,mplb-prio-dcuw = <0x3>;
- xlnx,mplb-prio-icu = <0x4>;
- xlnx,mplb-prio-splb0 = <0x1>;
- xlnx,mplb-prio-splb1 = <0x0>;
- xlnx,mplb-read-pipe-enable = <0x1>;
- xlnx,mplb-sync-tattribute = <0x0>;
- xlnx,mplb-wdog-enable = <0x1>;
- xlnx,mplb-write-pipe-enable = <0x1>;
- xlnx,mplb-write-post-enable = <0x1>;
- xlnx,num-dma = <0x0>;
- xlnx,pir = <0xf>;
- xlnx,ppc440mc-addr-base = <0x0>;
- xlnx,ppc440mc-addr-high = <0x1fffffff>;
- xlnx,ppc440mc-arb-mode = <0x0>;
- xlnx,ppc440mc-bank-conflict-mask = <0x1800000>;
- xlnx,ppc440mc-control = <0xf810008f>;
- xlnx,ppc440mc-max-burst = <0x8>;
- xlnx,ppc440mc-prio-dcur = <0x2>;
- xlnx,ppc440mc-prio-dcuw = <0x3>;
- xlnx,ppc440mc-prio-icu = <0x4>;
- xlnx,ppc440mc-prio-splb0 = <0x1>;
- xlnx,ppc440mc-prio-splb1 = <0x0>;
- xlnx,ppc440mc-row-conflict-mask = <0x7ffe00>;
- xlnx,ppcdm-asyncmode = <0x0>;
- xlnx,ppcds-asyncmode = <0x0>;
- xlnx,user-reset = <0x0>;
- } ;
- } ;
- plb_v46_0: plb@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,plb-v46-1.03.a", "simple-bus";
- ranges ;
- FLASH: flash@fc000000 {
- bank-width = <2>;
- compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
- reg = < 0xfc000000 0x2000000 >;
- xlnx,family = "virtex5";
- xlnx,include-datawidth-matching-0 = <0x1>;
- xlnx,include-datawidth-matching-1 = <0x0>;
- xlnx,include-datawidth-matching-2 = <0x0>;
- xlnx,include-datawidth-matching-3 = <0x0>;
- xlnx,include-negedge-ioregs = <0x0>;
- xlnx,include-plb-ipif = <0x1>;
- xlnx,include-wrbuf = <0x1>;
- xlnx,max-mem-width = <0x10>;
- xlnx,mch-native-dwidth = <0x20>;
- xlnx,mch-plb-clk-period-ps = <0x2710>;
- xlnx,mch-splb-awidth = <0x20>;
- xlnx,mch0-accessbuf-depth = <0x10>;
- xlnx,mch0-protocol = <0x0>;
- xlnx,mch0-rddatabuf-depth = <0x10>;
- xlnx,mch1-accessbuf-depth = <0x10>;
- xlnx,mch1-protocol = <0x0>;
- xlnx,mch1-rddatabuf-depth = <0x10>;
- xlnx,mch2-accessbuf-depth = <0x10>;
- xlnx,mch2-protocol = <0x0>;
- xlnx,mch2-rddatabuf-depth = <0x10>;
- xlnx,mch3-accessbuf-depth = <0x10>;
- xlnx,mch3-protocol = <0x0>;
- xlnx,mch3-rddatabuf-depth = <0x10>;
- xlnx,mem0-width = <0x10>;
- xlnx,mem1-width = <0x20>;
- xlnx,mem2-width = <0x20>;
- xlnx,mem3-width = <0x20>;
- xlnx,num-banks-mem = <0x1>;
- xlnx,num-channels = <0x2>;
- xlnx,priority-mode = <0x0>;
- xlnx,synch-mem-0 = <0x0>;
- xlnx,synch-mem-1 = <0x0>;
- xlnx,synch-mem-2 = <0x0>;
- xlnx,synch-mem-3 = <0x0>;
- xlnx,synch-pipedelay-0 = <0x2>;
- xlnx,synch-pipedelay-1 = <0x2>;
- xlnx,synch-pipedelay-2 = <0x2>;
- xlnx,synch-pipedelay-3 = <0x2>;
- xlnx,tavdv-ps-mem-0 = <0x1adb0>;
- xlnx,tavdv-ps-mem-1 = <0x3a98>;
- xlnx,tavdv-ps-mem-2 = <0x3a98>;
- xlnx,tavdv-ps-mem-3 = <0x3a98>;
- xlnx,tcedv-ps-mem-0 = <0x1adb0>;
- xlnx,tcedv-ps-mem-1 = <0x3a98>;
- xlnx,tcedv-ps-mem-2 = <0x3a98>;
- xlnx,tcedv-ps-mem-3 = <0x3a98>;
- xlnx,thzce-ps-mem-0 = <0x88b8>;
- xlnx,thzce-ps-mem-1 = <0x1b58>;
- xlnx,thzce-ps-mem-2 = <0x1b58>;
- xlnx,thzce-ps-mem-3 = <0x1b58>;
- xlnx,thzoe-ps-mem-0 = <0x1b58>;
- xlnx,thzoe-ps-mem-1 = <0x1b58>;
- xlnx,thzoe-ps-mem-2 = <0x1b58>;
- xlnx,thzoe-ps-mem-3 = <0x1b58>;
- xlnx,tlzwe-ps-mem-0 = <0x88b8>;
- xlnx,tlzwe-ps-mem-1 = <0x0>;
- xlnx,tlzwe-ps-mem-2 = <0x0>;
- xlnx,tlzwe-ps-mem-3 = <0x0>;
- xlnx,twc-ps-mem-0 = <0x1adb0>;
- xlnx,twc-ps-mem-1 = <0x3a98>;
- xlnx,twc-ps-mem-2 = <0x3a98>;
- xlnx,twc-ps-mem-3 = <0x3a98>;
- xlnx,twp-ps-mem-0 = <0x11170>;
- xlnx,twp-ps-mem-1 = <0x2ee0>;
- xlnx,twp-ps-mem-2 = <0x2ee0>;
- xlnx,twp-ps-mem-3 = <0x2ee0>;
- xlnx,xcl0-linesize = <0x4>;
- xlnx,xcl0-writexfer = <0x1>;
- xlnx,xcl1-linesize = <0x4>;
- xlnx,xcl1-writexfer = <0x1>;
- xlnx,xcl2-linesize = <0x4>;
- xlnx,xcl2-writexfer = <0x1>;
- xlnx,xcl3-linesize = <0x4>;
- xlnx,xcl3-writexfer = <0x1>;
- } ;
- Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "xlnx,compound";
- ethernet@81c00000 {
- compatible = "xlnx,xps-ll-temac-1.01.b";
- device_type = "network";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 8 2 >;
- llink-connected = <&Hard_Ethernet_MAC_fifo>;
- local-mac-address = [ 02 00 00 00 00 00 ];
- reg = < 0x81c00000 0x40 >;
- xlnx,bus2core-clk-ratio = <0x1>;
- xlnx,phy-type = <0x3>;
- xlnx,phyaddr = <0x1>;
- xlnx,rxcsum = <0x0>;
- xlnx,rxfifo = <0x8000>;
- xlnx,temac-type = <0x0>;
- xlnx,txcsum = <0x0>;
- xlnx,txfifo = <0x8000>;
- } ;
- } ;
- Hard_Ethernet_MAC_fifo: xps-ll-fifo@81a00000 {
- compatible = "xlnx,xps-ll-fifo-1.01.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 6 2 >;
- reg = < 0x81a00000 0x10000 >;
- xlnx,family = "virtex5";
- } ;
- IIC_EEPROM: i2c@81600000 {
- compatible = "xlnx,xps-iic-2.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 9 2 >;
- reg = < 0x81600000 0x10000 >;
- xlnx,clk-freq = <0x5f5e100>;
- xlnx,family = "virtex5";
- xlnx,gpo-width = <0x1>;
- xlnx,iic-freq = <0x186a0>;
- xlnx,scl-inertial-delay = <0x5>;
- xlnx,sda-inertial-delay = <0x5>;
- xlnx,ten-bit-adr = <0x0>;
- } ;
- LCD_OPTIONAL: gpio@81420000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- reg = < 0x81420000 0x10000 >;
- xlnx,all-inputs = <0x0>;
- xlnx,all-inputs-2 = <0x0>;
- xlnx,dout-default = <0x0>;
- xlnx,dout-default-2 = <0x0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <0xb>;
- xlnx,interrupt-present = <0x0>;
- xlnx,is-bidir = <0x1>;
- xlnx,is-bidir-2 = <0x1>;
- xlnx,is-dual = <0x0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- LEDs_4Bit: gpio@81400000 {
- compatible = "xlnx,xps-gpio-1.00.a";
- reg = < 0x81400000 0x10000 >;
- xlnx,all-inputs = <0x0>;
- xlnx,all-inputs-2 = <0x0>;
- xlnx,dout-default = <0x0>;
- xlnx,dout-default-2 = <0x0>;
- xlnx,family = "virtex5";
- xlnx,gpio-width = <0x4>;
- xlnx,interrupt-present = <0x0>;
- xlnx,is-bidir = <0x1>;
- xlnx,is-bidir-2 = <0x1>;
- xlnx,is-dual = <0x0>;
- xlnx,tri-default = <0xffffffff>;
- xlnx,tri-default-2 = <0xffffffff>;
- } ;
- RS232_Uart_1: serial@83e00000 {
- clock-frequency = <100000000>;
- compatible = "xlnx,xps-uart16550-2.00.b", "ns16550";
- current-speed = <9600>;
- device_type = "serial";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 11 2 >;
- reg = < 0x83e00000 0x10000 >;
- reg-offset = <0x1003>;
- reg-shift = <2>;
- xlnx,family = "virtex5";
- xlnx,has-external-rclk = <0x0>;
- xlnx,has-external-xin = <0x0>;
- xlnx,is-a-16550 = <0x1>;
- } ;
- SPI_EEPROM: xps-spi@feff8000 {
- compatible = "xlnx,xps-spi-2.00.b";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 10 2 >;
- reg = < 0xfeff8000 0x80 >;
- xlnx,family = "virtex5";
- xlnx,fifo-exist = <0x1>;
- xlnx,num-ss-bits = <0x1>;
- xlnx,num-transfer-bits = <0x8>;
- xlnx,sck-ratio = <0x80>;
- } ;
- SysACE_CompactFlash: sysace@83600000 {
- compatible = "xlnx,xps-sysace-1.00.a";
- interrupt-parent = <&xps_intc_0>;
- interrupts = < 7 2 >;
- reg = < 0x83600000 0x10000 >;
- xlnx,family = "virtex5";
- xlnx,mem-width = <0x10>;
- } ;
- plbv46_pci_0: plbv46-pci@85e00000 {
- #size-cells = <2>;
- #address-cells = <3>;
- compatible = "xlnx,plbv46-pci-1.03.a";
- device_type = "pci";
- reg = < 0x85e00000 0x10000 >;
-
- /*
- * The default ML510 BSB has C_IPIFBAR2PCIBAR_0 set to
- * 0 which means that a read/write to the memory mapped
- * i/o region (which starts at 0xa0000000) for pci
- * bar 0 on the plb side translates to 0.
- * It is important to set this value to 0xa0000000, so
- * that inbound and outbound pci transactions work
- * properly including DMA.
- */
- ranges = <0x02000000 0 0xa0000000 0xa0000000 0 0x20000000
- 0x01000000 0 0x00000000 0xf0000000 0 0x00010000>;
-
- #interrupt-cells = <1>;
- interrupt-parent = <&xps_intc_0>;
- interrupt-map-mask = <0xff00 0x0 0x0 0x7>;
- interrupt-map = <
- /* IRQ mapping for pci slots and ALI M1533
- * periperhals. In total there are 5 interrupt
- * lines connected to a xps_intc controller.
- * Four of them are PCI IRQ A, B, C, D and
- * which correspond to respectively xpx_intc
- * 5, 4, 3 and 2. The fifth interrupt line is
- * connected to the south bridge and this one
- * uses irq 1 and is active high instead of
- * active low.
- *
- * The M1533 contains various peripherals
- * including AC97 audio, a modem, USB, IDE and
- * some power management stuff. The modem
- * isn't connected on the ML510 and the power
- * management core also isn't used.
- */
-
- /* IDSEL 0x16 / dev=6, bus=0 / PCI slot 3 */
- 0x3000 0 0 1 &xps_intc_0 3 2
- 0x3000 0 0 2 &xps_intc_0 2 2
- 0x3000 0 0 3 &xps_intc_0 5 2
- 0x3000 0 0 4 &xps_intc_0 4 2
-
- /* IDSEL 0x13 / dev=3, bus=1 / PCI slot 4 */
- /*
- 0x11800 0 0 1 &xps_intc_0 5 0 2
- 0x11800 0 0 2 &xps_intc_0 4 0 2
- 0x11800 0 0 3 &xps_intc_0 3 0 2
- 0x11800 0 0 4 &xps_intc_0 2 0 2
- */
-
- /* According to the datasheet + schematic
- * ABCD [FPGA] of slot 5 is mapped to DABC.
- * Testing showed that at least A maps to B,
- * the mapping of the other pins is a guess
- * and for that reason the lines have been
- * commented out.
- */
- /* IDSEL 0x15 / dev=5, bus=0 / PCI slot 5 */
- 0x2800 0 0 1 &xps_intc_0 4 2
- /*
- 0x2800 0 0 2 &xps_intc_0 3 2
- 0x2800 0 0 3 &xps_intc_0 2 2
- 0x2800 0 0 4 &xps_intc_0 5 2
- */
-
- /* IDSEL 0x12 / dev=2, bus=1 / PCI slot 6 */
- /*
- 0x11000 0 0 1 &xps_intc_0 4 0 2
- 0x11000 0 0 2 &xps_intc_0 3 0 2
- 0x11000 0 0 3 &xps_intc_0 2 0 2
- 0x11000 0 0 4 &xps_intc_0 5 0 2
- */
-
- /* IDSEL 0x11 / dev=1, bus=0 / AC97 audio */
- 0x0800 0 0 1 &i8259 7 2
-
- /* IDSEL 0x1b / dev=11, bus=0 / IDE */
- 0x5800 0 0 1 &i8259 14 2
-
- /* IDSEL 0x1f / dev 15, bus=0 / 2x USB 1.1 */
- 0x7800 0 0 1 &i8259 7 2
- >;
- ali_m1533 {
- #size-cells = <1>;
- #address-cells = <2>;
- i8259: interrupt-controller@20 {
- reg = <1 0x20 2
- 1 0xa0 2
- 1 0x4d0 2>;
- interrupt-controller;
- device_type = "interrupt-controller";
- #address-cells = <0>;
- #interrupt-cells = <2>;
- compatible = "chrp,iic";
-
- /* south bridge irq is active high */
- interrupts = <1 3>;
- interrupt-parent = <&xps_intc_0>;
- };
- };
- } ;
- xps_bram_if_cntlr_1: xps-bram-if-cntlr@ffff0000 {
- compatible = "xlnx,xps-bram-if-cntlr-1.00.a";
- reg = < 0xffff0000 0x10000 >;
- xlnx,family = "virtex5";
- } ;
- xps_intc_0: interrupt-controller@81800000 {
- #interrupt-cells = <0x2>;
- compatible = "xlnx,xps-intc-1.00.a";
- interrupt-controller ;
- reg = < 0x81800000 0x10000 >;
- xlnx,num-intr-inputs = <0xc>;
- } ;
- xps_tft_0: tft@86e00000 {
- compatible = "xlnx,xps-tft-1.00.a";
- reg = < 0x86e00000 0x10000 >;
- xlnx,dcr-splb-slave-if = <0x1>;
- xlnx,default-tft-base-addr = <0x0>;
- xlnx,family = "virtex5";
- xlnx,i2c-slave-addr = <0x76>;
- xlnx,mplb-awidth = <0x20>;
- xlnx,mplb-dwidth = <0x80>;
- xlnx,mplb-native-dwidth = <0x40>;
- xlnx,mplb-smallest-slave = <0x20>;
- xlnx,tft-interface = <0x1>;
- } ;
- } ;
-} ;
diff --git a/arch/powerpc/boot/dts/walnut.dts b/arch/powerpc/boot/dts/walnut.dts
deleted file mode 100644
index 0872862c9363..000000000000
--- a/arch/powerpc/boot/dts/walnut.dts
+++ /dev/null
@@ -1,246 +0,0 @@
-/*
- * Device Tree Source for IBM Walnut
- *
- * Copyright 2007 IBM Corp.
- * Josh Boyer <jwboyer@linux.vnet.ibm.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without
- * any warranty of any kind, whether express or implied.
- */
-
-/dts-v1/;
-
-/ {
- #address-cells = <1>;
- #size-cells = <1>;
- model = "ibm,walnut";
- compatible = "ibm,walnut";
- dcr-parent = <&{/cpus/cpu@0}>;
-
- aliases {
- ethernet0 = &EMAC;
- serial0 = &UART0;
- serial1 = &UART1;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu@0 {
- device_type = "cpu";
- model = "PowerPC,405GP";
- reg = <0x00000000>;
- clock-frequency = <200000000>; /* Filled in by zImage */
- timebase-frequency = <0>; /* Filled in by zImage */
- i-cache-line-size = <32>;
- d-cache-line-size = <32>;
- i-cache-size = <16384>;
- d-cache-size = <16384>;
- dcr-controller;
- dcr-access-method = "native";
- };
- };
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x00000000>; /* Filled in by zImage */
- };
-
- UIC0: interrupt-controller {
- compatible = "ibm,uic";
- interrupt-controller;
- cell-index = <0>;
- dcr-reg = <0x0c0 0x009>;
- #address-cells = <0>;
- #size-cells = <0>;
- #interrupt-cells = <2>;
- };
-
- plb {
- compatible = "ibm,plb3";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
- clock-frequency = <0>; /* Filled in by zImage */
-
- SDRAM0: memory-controller {
- compatible = "ibm,sdram-405gp";
- dcr-reg = <0x010 0x002>;
- };
-
- MAL: mcmal {
- compatible = "ibm,mcmal-405gp", "ibm,mcmal";
- dcr-reg = <0x180 0x062>;
- num-tx-chans = <1>;
- num-rx-chans = <1>;
- interrupt-parent = <&UIC0>;
- interrupts = <
- 0xb 0x4 /* TXEOB */
- 0xc 0x4 /* RXEOB */
- 0xa 0x4 /* SERR */
- 0xd 0x4 /* TXDE */
- 0xe 0x4 /* RXDE */>;
- };
-
- POB0: opb {
- compatible = "ibm,opb-405gp", "ibm,opb";
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0xef600000 0xef600000 0x00a00000>;
- dcr-reg = <0x0a0 0x005>;
- clock-frequency = <0>; /* Filled in by zImage */
-
- UART0: serial@ef600300 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0xef600300 0x00000008>;
- virtual-reg = <0xef600300>;
- clock-frequency = <0>; /* Filled in by zImage */
- current-speed = <9600>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x0 0x4>;
- };
-
- UART1: serial@ef600400 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0xef600400 0x00000008>;
- virtual-reg = <0xef600400>;
- clock-frequency = <0>; /* Filled in by zImage */
- current-speed = <9600>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x1 0x4>;
- };
-
- IIC: i2c@ef600500 {
- compatible = "ibm,iic-405gp", "ibm,iic";
- reg = <0xef600500 0x00000011>;
- interrupt-parent = <&UIC0>;
- interrupts = <0x2 0x4>;
- };
-
- GPIO: gpio@ef600700 {
- compatible = "ibm,gpio-405gp";
- reg = <0xef600700 0x00000020>;
- };
-
- EMAC: ethernet@ef600800 {
- device_type = "network";
- compatible = "ibm,emac-405gp", "ibm,emac";
- interrupt-parent = <&UIC0>;
- interrupts = <
- 0xf 0x4 /* Ethernet */
- 0x9 0x4 /* Ethernet Wake Up */>;
- local-mac-address = [000000000000]; /* Filled in by zImage */
- reg = <0xef600800 0x00000070>;
- mal-device = <&MAL>;
- mal-tx-channel = <0>;
- mal-rx-channel = <0>;
- cell-index = <0>;
- max-frame-size = <1500>;
- rx-fifo-size = <4096>;
- tx-fifo-size = <2048>;
- phy-mode = "rmii";
- phy-map = <0x00000001>;
- };
-
- };
-
- EBC0: ebc {
- compatible = "ibm,ebc-405gp", "ibm,ebc";
- dcr-reg = <0x012 0x002>;
- #address-cells = <2>;
- #size-cells = <1>;
- /* The ranges property is supplied by the bootwrapper
- * and is based on the firmware's configuration of the
- * EBC bridge
- */
- clock-frequency = <0>; /* Filled in by zImage */
-
- sram@0,0 {
- reg = <0x00000000 0x00000000 0x00080000>;
- };
-
- flash@0,80000 {
- compatible = "jedec-flash";
- bank-width = <1>;
- reg = <0x00000000 0x00080000 0x00080000>;
- #address-cells = <1>;
- #size-cells = <1>;
- partition@0 {
- label = "OpenBIOS";
- reg = <0x00000000 0x00080000>;
- read-only;
- };
- };
-
- nvram@1,0 {
- /* NVRAM and RTC */
- compatible = "ds1743-nvram";
- #bytes = <0x2000>;
- reg = <0x00000001 0x00000000 0x00002000>;
- };
-
- keyboard@2,0 {
- compatible = "intel,82C42PC";
- reg = <0x00000002 0x00000000 0x00000002>;
- };
-
- ir@3,0 {
- compatible = "ti,TIR2000PAG";
- reg = <0x00000003 0x00000000 0x00000010>;
- };
-
- fpga@7,0 {
- compatible = "Walnut-FPGA";
- reg = <0x00000007 0x00000000 0x00000010>;
- virtual-reg = <0xf0300005>;
- };
- };
-
- PCI0: pci@ec000000 {
- device_type = "pci";
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- compatible = "ibm,plb405gp-pci", "ibm,plb-pci";
- primary;
- reg = <0xeec00000 0x00000008 /* Config space access */
- 0xeed80000 0x00000004 /* IACK */
- 0xeed80000 0x00000004 /* Special cycle */
- 0xef480000 0x00000040>; /* Internal registers */
-
- /* Outbound ranges, one memory and one IO,
- * later cannot be changed. Chip supports a second
- * IO range but we don't use it for now
- */
- ranges = <0x02000000 0x00000000 0x80000000 0x80000000 0x00000000 0x20000000
- 0x01000000 0x00000000 0x00000000 0xe8000000 0x00000000 0x00010000>;
-
- /* Inbound 2GB range starting at 0 */
- dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x80000000>;
-
- /* Walnut has all 4 IRQ pins tied together per slot */
- interrupt-map-mask = <0xf800 0x0 0x0 0x0>;
- interrupt-map = <
- /* IDSEL 1 */
- 0x800 0x0 0x0 0x0 &UIC0 0x1c 0x8
-
- /* IDSEL 2 */
- 0x1000 0x0 0x0 0x0 &UIC0 0x1d 0x8
-
- /* IDSEL 3 */
- 0x1800 0x0 0x0 0x0 &UIC0 0x1e 0x8
-
- /* IDSEL 4 */
- 0x2000 0x0 0x0 0x0 &UIC0 0x1f 0x8
- >;
- };
- };
-
- chosen {
- stdout-path = "/plb/opb/serial@ef600300";
- };
-};
diff --git a/arch/powerpc/boot/dts/wii.dts b/arch/powerpc/boot/dts/wii.dts
index aaa381da1906..e46143c32308 100644
--- a/arch/powerpc/boot/dts/wii.dts
+++ b/arch/powerpc/boot/dts/wii.dts
@@ -168,6 +168,11 @@
interrupts = <14>;
};
+ srnprot@d800060 {
+ compatible = "nintendo,hollywood-srnprot";
+ reg = <0x0d800060 0x4>;
+ };
+
GPIO: gpio@d8000c0 {
#gpio-cells = <2>;
compatible = "nintendo,hollywood-gpio";
@@ -216,7 +221,18 @@
control@d800100 {
compatible = "nintendo,hollywood-control";
- reg = <0x0d800100 0x300>;
+ /*
+ * Both the address and length are wrong, according to
+ * Wiibrew this should be <0x0d800000 0x400>, but it
+ * requires refactoring the PIC1, GPIO and OTP nodes
+ * before changing that.
+ */
+ reg = <0x0d800100 0xa0>;
+ };
+
+ otp@d8001ec {
+ compatible = "nintendo,hollywood-otp";
+ reg = <0x0d8001ec 0x8>;
};
disk@d806000 {
diff --git a/arch/powerpc/boot/dts/xpedite5200.dts b/arch/powerpc/boot/dts/xpedite5200.dts
index 840ea84bbb59..74b346f2d43c 100644
--- a/arch/powerpc/boot/dts/xpedite5200.dts
+++ b/arch/powerpc/boot/dts/xpedite5200.dts
@@ -132,7 +132,7 @@
reg = <0x68>;
};
- dtt@48 {
+ dtt@34 {
compatible = "maxim,max1237";
reg = <0x34>;
};
diff --git a/arch/powerpc/boot/dts/xpedite5200_xmon.dts b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
index 449fc1b5dc23..d491c7a8f979 100644
--- a/arch/powerpc/boot/dts/xpedite5200_xmon.dts
+++ b/arch/powerpc/boot/dts/xpedite5200_xmon.dts
@@ -136,7 +136,7 @@
reg = <0x68>;
};
- dtt@48 {
+ dtt@34 {
compatible = "maxim,max1237";
reg = <0x34>;
};
diff --git a/arch/powerpc/boot/dummy.c b/arch/powerpc/boot/dummy.c
deleted file mode 100644
index 31dbf45bf99c..000000000000
--- a/arch/powerpc/boot/dummy.c
+++ /dev/null
@@ -1,4 +0,0 @@
-int main(void)
-{
- return 0;
-}
diff --git a/arch/powerpc/boot/ep405.c b/arch/powerpc/boot/ep405.c
deleted file mode 100644
index f9ad1e6a844e..000000000000
--- a/arch/powerpc/boot/ep405.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Embedded Planet EP405 with PlanetCore firmware
- *
- * (c) Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp,\
- *
- * Based on ep88xc.c by
- *
- * Scott Wood <scottwood@freescale.com>
- *
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
- */
-
-#include "ops.h"
-#include "stdio.h"
-#include "planetcore.h"
-#include "dcr.h"
-#include "4xx.h"
-#include "io.h"
-
-static char *table;
-static u64 mem_size;
-
-static void platform_fixups(void)
-{
- u64 val;
- void *nvrtc;
-
- dt_fixup_memory(0, mem_size);
- planetcore_set_mac_addrs(table);
-
- if (!planetcore_get_decimal(table, PLANETCORE_KEY_CRYSTAL_HZ, &val)) {
- printf("No PlanetCore crystal frequency key.\r\n");
- return;
- }
- ibm405gp_fixup_clocks(val, 0xa8c000);
- ibm4xx_quiesce_eth((u32 *)0xef600800, NULL);
- ibm4xx_fixup_ebc_ranges("/plb/ebc");
-
- if (!planetcore_get_decimal(table, PLANETCORE_KEY_KB_NVRAM, &val)) {
- printf("No PlanetCore NVRAM size key.\r\n");
- return;
- }
- nvrtc = finddevice("/plb/ebc/nvrtc@4,200000");
- if (nvrtc != NULL) {
- u32 reg[3] = { 4, 0x200000, 0};
- getprop(nvrtc, "reg", reg, 3);
- reg[2] = (val << 10) & 0xffffffff;
- setprop(nvrtc, "reg", reg, 3);
- }
-}
-
-void platform_init(unsigned long r3, unsigned long r4, unsigned long r5,
- unsigned long r6, unsigned long r7)
-{
- table = (char *)r3;
- planetcore_prepare_table(table);
-
- if (!planetcore_get_decimal(table, PLANETCORE_KEY_MB_RAM, &mem_size))
- return;
-
- mem_size *= 1024 * 1024;
- simple_alloc_init(_end, mem_size - (unsigned long)_end, 32, 64);
-
- fdt_init(_dtb_start);
-
- planetcore_set_stdout_path(table);
-
- serial_console_init();
- platform_ops.fixups = platform_fixups;
-}
diff --git a/arch/powerpc/boot/install.sh b/arch/powerpc/boot/install.sh
index b6a256bc96ee..461902c8a46d 100644..100755
--- a/arch/powerpc/boot/install.sh
+++ b/arch/powerpc/boot/install.sh
@@ -15,19 +15,9 @@
# $2 - kernel image file
# $3 - kernel map file
# $4 - default install path (blank if root directory)
-# $5 and more - kernel boot files; zImage*, uImage, cuImage.*, etc.
-#
-# Bail with error code if anything goes wrong
set -e
-# User may have a custom install script
-
-if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
-if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
-
-# Default install
-
# this should work for both the pSeries zImage and the iSeries vmlinux.sm
image_name=`basename $2`
@@ -41,15 +31,3 @@ fi
cat $2 > $4/$image_name
cp $3 $4/System.map
-
-# Copy all the bootable image files
-path=$4
-shift 4
-while [ $# -ne 0 ]; do
- image_name=`basename $1`
- if [ -f $path/$image_name ]; then
- mv $path/$image_name $path/$image_name.old
- fi
- cat $1 > $path/$image_name
- shift
-done;
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index a9d209135975..cae31a6e8f02 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -104,7 +104,7 @@ static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen,
{
/* If we have an image attached to us, it overrides anything
* supplied by the loader. */
- if (_initrd_end > _initrd_start) {
+ if (&_initrd_end > &_initrd_start) {
printf("Attached initrd image at 0x%p-0x%p\n\r",
_initrd_start, _initrd_end);
initrd_addr = (unsigned long)_initrd_start;
@@ -152,7 +152,7 @@ static void prep_esm_blob(struct addr_range vmlinux, void *chosen)
unsigned long esm_blob_addr, esm_blob_size;
/* Do we have an ESM (Enter Secure Mode) blob? */
- if (_esm_blob_end <= _esm_blob_start)
+ if (&_esm_blob_end <= &_esm_blob_start)
return;
printf("Attached ESM blob at 0x%p-0x%p\n\r",
diff --git a/arch/powerpc/boot/microwatt.c b/arch/powerpc/boot/microwatt.c
new file mode 100644
index 000000000000..ca9d83617fc1
--- /dev/null
+++ b/arch/powerpc/boot/microwatt.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <stddef.h>
+#include "stdio.h"
+#include "types.h"
+#include "io.h"
+#include "ops.h"
+
+BSS_STACK(8192);
+
+void platform_init(unsigned long r3, unsigned long r4, unsigned long r5)
+{
+ unsigned long heapsize = 16*1024*1024 - (unsigned long)_end;
+
+ /*
+ * Disable interrupts and turn off MSR_RI, since we'll
+ * shortly be overwriting the interrupt vectors.
+ */
+ __asm__ volatile("mtmsrd %0,1" : : "r" (0));
+
+ simple_alloc_init(_end, heapsize, 32, 64);
+ fdt_init(_dtb_start);
+ serial_console_init();
+}
diff --git a/arch/powerpc/boot/ns16550.c b/arch/powerpc/boot/ns16550.c
index b0da4466d419..f16d2be1d0f3 100644
--- a/arch/powerpc/boot/ns16550.c
+++ b/arch/powerpc/boot/ns16550.c
@@ -15,6 +15,7 @@
#include "stdio.h"
#include "io.h"
#include "ops.h"
+#include "of.h"
#define UART_DLL 0 /* Out: Divisor Latch Low */
#define UART_DLM 1 /* Out: Divisor Latch High */
@@ -58,16 +59,20 @@ int ns16550_console_init(void *devp, struct serial_console_data *scdp)
int n;
u32 reg_offset;
- if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1)
+ if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1) {
+ printf("virt reg parse fail...\r\n");
return -1;
+ }
n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset));
if (n == sizeof(reg_offset))
- reg_base += reg_offset;
+ reg_base += be32_to_cpu(reg_offset);
n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
if (n != sizeof(reg_shift))
reg_shift = 0;
+ else
+ reg_shift = be32_to_cpu(reg_shift);
scdp->open = ns16550_open;
scdp->putc = ns16550_putc;
diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S
index ad0e15d930c4..1f2f330a459e 100644
--- a/arch/powerpc/boot/opal-calls.S
+++ b/arch/powerpc/boot/opal-calls.S
@@ -16,7 +16,7 @@ opal_kentry:
li r5, 0
li r6, 0
li r7, 0
- ld r11,opal@got(r2)
+ LOAD_REG_ADDR(r11, opal)
ld r8,0(r11)
ld r9,8(r11)
bctr
@@ -35,7 +35,7 @@ opal_call:
mr r13,r2
/* Set opal return address */
- ld r11,opal_return@got(r2)
+ LOAD_REG_ADDR(r11, opal_return)
mtlr r11
mfmsr r12
@@ -45,7 +45,7 @@ opal_call:
mtspr SPRN_HSRR1,r12
/* load the opal call entry point and base */
- ld r11,opal@got(r2)
+ LOAD_REG_ADDR(r11, opal)
ld r12,8(r11)
ld r2,0(r11)
mtspr SPRN_HSRR0,r12
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index e0606766480f..a40c2162a4e9 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -1,12 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Global definition of all the bootwrapper operations.
*
* Author: Mark A. Greer <mgreer@mvista.com>
*
- * 2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * 2006 (c) MontaVista Software, Inc.
*/
#ifndef _PPC_BOOT_OPS_H_
#define _PPC_BOOT_OPS_H_
@@ -88,7 +86,6 @@ int serial_console_init(void);
int ns16550_console_init(void *devp, struct serial_console_data *scdp);
int cpm_console_init(void *devp, struct serial_console_data *scdp);
int mpc5200_psc_console_init(void *devp, struct serial_console_data *scdp);
-int uartlite_console_init(void *devp, struct serial_console_data *scdp);
int opal_console_init(void *devp, struct serial_console_data *scdp);
void *simple_alloc_init(char *base, unsigned long heap_size,
unsigned long granularity, unsigned long max_allocs);
@@ -201,12 +198,6 @@ void __dt_fixup_mac_addresses(u32 startindex, ...);
__dt_fixup_mac_addresses(0, __VA_ARGS__, NULL)
-static inline void *find_node_by_linuxphandle(const u32 linuxphandle)
-{
- return find_node_by_prop_value(NULL, "linux,phandle",
- (char *)&linuxphandle, sizeof(u32));
-}
-
static inline char *get_path(const void *phandle, char *buf, int len)
{
if (dt_ops.get_path)
diff --git a/arch/powerpc/boot/ppc_asm.h b/arch/powerpc/boot/ppc_asm.h
index 192b97523b05..a66cfd76fa4d 100644
--- a/arch/powerpc/boot/ppc_asm.h
+++ b/arch/powerpc/boot/ppc_asm.h
@@ -84,4 +84,14 @@
#define MFTBU(dest) mfspr dest, SPRN_TBRU
#endif
+#ifdef CONFIG_PPC64_BOOT_WRAPPER
+#define LOAD_REG_ADDR(reg,name) \
+ addis reg,r2,name@toc@ha; \
+ addi reg,reg,name@toc@l
+#else
+#define LOAD_REG_ADDR(reg,name) \
+ lis reg,name@ha; \
+ addi reg,reg,name@l
+#endif
+
#endif /* _PPC64_PPC_ASM_H */
diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
index c52552a681c5..f157717ae814 100644
--- a/arch/powerpc/boot/ps3.c
+++ b/arch/powerpc/boot/ps3.c
@@ -21,13 +21,6 @@ extern int lv1_get_logical_ppe_id(u64 *out_1);
extern int lv1_get_repository_node_value(u64 in_1, u64 in_2, u64 in_3,
u64 in_4, u64 in_5, u64 *out_1, u64 *out_2);
-#ifdef DEBUG
-#define DBG(fmt...) printf(fmt)
-#else
-static inline int __attribute__ ((format (printf, 1, 2))) DBG(
- const char *fmt, ...) {return 0;}
-#endif
-
BSS_STACK(4096);
/* A buffer that may be edited by tools operating on a zImage binary so as to
@@ -127,7 +120,7 @@ void platform_init(void)
ps3_repository_read_rm_size(&rm_size);
dt_fixup_memory(0, rm_size);
- if (_initrd_end > _initrd_start) {
+ if (&_initrd_end > &_initrd_start) {
setprop_val(chosen, "linux,initrd-start", (u32)(_initrd_start));
setprop_val(chosen, "linux,initrd-end", (u32)(_initrd_end));
}
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index 9457863147f9..c6d32a8c3612 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Generic serial console support
*
@@ -6,10 +7,7 @@
* Code in serial_edit_cmdline() copied from <file:arch/ppc/boot/simple/misc.c>
* and was written by Matt Porter <mporter@kernel.crashing.org>.
*
- * 2001,2006 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * 2001,2006 (c) MontaVista Software, Inc.
*/
#include <stdarg.h>
#include <stddef.h>
@@ -128,16 +126,11 @@ int serial_console_init(void)
dt_is_compatible(devp, "fsl,cpm2-smc-uart"))
rc = cpm_console_init(devp, &serial_cd);
#endif
-#ifdef CONFIG_PPC_MPC52XX
+#ifdef CONFIG_PPC_MPC52xx
else if (dt_is_compatible(devp, "fsl,mpc5200-psc-uart"))
rc = mpc5200_psc_console_init(devp, &serial_cd);
#endif
-#ifdef CONFIG_XILINX_VIRTEX
- else if (dt_is_compatible(devp, "xlnx,opb-uartlite-1.00.b") ||
- dt_is_compatible(devp, "xlnx,xps-uartlite-1.00.a"))
- rc = uartlite_console_init(devp, &serial_cd);
-#endif
-#ifdef CONFIG_PPC64_BOOT_WRAPPER
+#ifdef CONFIG_PPC_POWERNV
else if (dt_is_compatible(devp, "ibm,opal-console-raw"))
rc = opal_console_init(devp, &serial_cd);
#endif
diff --git a/arch/powerpc/boot/simple_alloc.c b/arch/powerpc/boot/simple_alloc.c
index 65ec135d0157..267d6524caac 100644
--- a/arch/powerpc/boot/simple_alloc.c
+++ b/arch/powerpc/boot/simple_alloc.c
@@ -1,12 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Implement primitive realloc(3) functionality.
*
* Author: Mark A. Greer <mgreer@mvista.com>
*
- * 2006 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
+ * 2006 (c) MontaVista, Software, Inc.
*/
#include <stddef.h>
diff --git a/arch/powerpc/boot/treeboot-walnut.c b/arch/powerpc/boot/treeboot-walnut.c
deleted file mode 100644
index 623f58e7f7c9..000000000000
--- a/arch/powerpc/boot/treeboot-walnut.c
+++ /dev/null
@@ -1,81 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Old U-boot compatibility for Walnut
- *
- * Author: Josh Boyer <jwboyer@linux.vnet.ibm.com>
- *
- * Copyright 2007 IBM Corporation
- * Based on cuboot-83xx.c, which is:
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
- */
-
-#include "ops.h"
-#include "stdio.h"
-#include "dcr.h"
-#include "4xx.h"
-#include "io.h"
-
-BSS_STACK(4096);
-
-static void walnut_flashsel_fixup(void)
-{
- void *devp, *sram;
- u32 reg_flash[3] = {0x0, 0x0, 0x80000};
- u32 reg_sram[3] = {0x0, 0x0, 0x80000};
- u8 *fpga;
- u8 fpga_brds1 = 0x0;
-
- devp = finddevice("/plb/ebc/fpga");
- if (!devp)
- fatal("Couldn't locate FPGA node\n\r");
-
- if (getprop(devp, "virtual-reg", &fpga, sizeof(fpga)) != sizeof(fpga))
- fatal("no virtual-reg property\n\r");
-
- fpga_brds1 = in_8(fpga);
-
- devp = finddevice("/plb/ebc/flash");
- if (!devp)
- fatal("Couldn't locate flash node\n\r");
-
- if (getprop(devp, "reg", reg_flash, sizeof(reg_flash)) != sizeof(reg_flash))
- fatal("flash reg property has unexpected size\n\r");
-
- sram = finddevice("/plb/ebc/sram");
- if (!sram)
- fatal("Couldn't locate sram node\n\r");
-
- if (getprop(sram, "reg", reg_sram, sizeof(reg_sram)) != sizeof(reg_sram))
- fatal("sram reg property has unexpected size\n\r");
-
- if (fpga_brds1 & 0x1) {
- reg_flash[1] ^= 0x80000;
- reg_sram[1] ^= 0x80000;
- }
-
- setprop(devp, "reg", reg_flash, sizeof(reg_flash));
- setprop(sram, "reg", reg_sram, sizeof(reg_sram));
-}
-
-#define WALNUT_OPENBIOS_MAC_OFF 0xfffffe0b
-static void walnut_fixups(void)
-{
- ibm4xx_sdram_fixup_memsize();
- ibm405gp_fixup_clocks(33330000, 0xa8c000);
- ibm4xx_quiesce_eth((u32 *)0xef600800, NULL);
- ibm4xx_fixup_ebc_ranges("/plb/ebc");
- walnut_flashsel_fixup();
- dt_fixup_mac_address_by_alias("ethernet0", (u8 *) WALNUT_OPENBIOS_MAC_OFF);
-}
-
-void platform_init(void)
-{
- unsigned long end_of_ram = 0x2000000;
- unsigned long avail_ram = end_of_ram - (unsigned long) _end;
-
- simple_alloc_init(_end, avail_ram, 32, 32);
- platform_ops.fixups = walnut_fixups;
- platform_ops.exit = ibm40x_dbcr_reset;
- fdt_init(_dtb_start);
- serial_console_init();
-}
diff --git a/arch/powerpc/boot/uartlite.c b/arch/powerpc/boot/uartlite.c
deleted file mode 100644
index 46bed69b4169..000000000000
--- a/arch/powerpc/boot/uartlite.c
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Xilinx UARTLITE bootloader driver
- *
- * Copyright (C) 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <stdarg.h>
-#include <stddef.h>
-#include "types.h"
-#include "string.h"
-#include "stdio.h"
-#include "io.h"
-#include "ops.h"
-
-#define ULITE_RX 0x00
-#define ULITE_TX 0x04
-#define ULITE_STATUS 0x08
-#define ULITE_CONTROL 0x0c
-
-#define ULITE_STATUS_RXVALID 0x01
-#define ULITE_STATUS_TXFULL 0x08
-
-#define ULITE_CONTROL_RST_RX 0x02
-
-static void * reg_base;
-
-static int uartlite_open(void)
-{
- /* Clear the RX FIFO */
- out_be32(reg_base + ULITE_CONTROL, ULITE_CONTROL_RST_RX);
- return 0;
-}
-
-static void uartlite_putc(unsigned char c)
-{
- u32 reg = ULITE_STATUS_TXFULL;
- while (reg & ULITE_STATUS_TXFULL) /* spin on TXFULL bit */
- reg = in_be32(reg_base + ULITE_STATUS);
- out_be32(reg_base + ULITE_TX, c);
-}
-
-static unsigned char uartlite_getc(void)
-{
- u32 reg = 0;
- while (!(reg & ULITE_STATUS_RXVALID)) /* spin waiting for RXVALID bit */
- reg = in_be32(reg_base + ULITE_STATUS);
- return in_be32(reg_base + ULITE_RX);
-}
-
-static u8 uartlite_tstc(void)
-{
- u32 reg = in_be32(reg_base + ULITE_STATUS);
- return reg & ULITE_STATUS_RXVALID;
-}
-
-int uartlite_console_init(void *devp, struct serial_console_data *scdp)
-{
- int n;
- unsigned long reg_phys;
-
- n = getprop(devp, "virtual-reg", &reg_base, sizeof(reg_base));
- if (n != sizeof(reg_base)) {
- if (!dt_xlate_reg(devp, 0, &reg_phys, NULL))
- return -1;
-
- reg_base = (void *)reg_phys;
- }
-
- scdp->open = uartlite_open;
- scdp->putc = uartlite_putc;
- scdp->getc = uartlite_getc;
- scdp->tstc = uartlite_tstc;
- scdp->close = NULL;
- return 0;
-}
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
index f11f0589a669..6a92376daf3f 100644
--- a/arch/powerpc/boot/util.S
+++ b/arch/powerpc/boot/util.S
@@ -18,7 +18,7 @@
.text
-/* udelay (on non-601 processors) needs to know the period of the
+/* udelay needs to know the period of the
* timebase in nanoseconds. This used to be hardcoded to be 60ns
* (period of 66MHz/4). Now a variable is used that is initialized to
* 60 for backward compatibility, but it can be overridden as necessary
@@ -37,32 +37,16 @@ timebase_period_ns:
*/
.globl udelay
udelay:
- mfspr r4,SPRN_PVR
- srwi r4,r4,16
- cmpwi 0,r4,1 /* 601 ? */
- bne .Ludelay_not_601
-00: li r0,86 /* Instructions / microsecond? */
- mtctr r0
-10: addi r0,r0,0 /* NOP */
- bdnz 10b
- subic. r3,r3,1
- bne 00b
- blr
-
-.Ludelay_not_601:
mulli r4,r3,1000 /* nanoseconds */
/* Change r4 to be the number of ticks using:
* (nanoseconds + (timebase_period_ns - 1 )) / timebase_period_ns
* timebase_period_ns defaults to 60 (16.6MHz) */
mflr r5
- bl 0f
+ bcl 20,31,0f
0: mflr r6
mtlr r5
- lis r5,0b@ha
- addi r5,r5,0b@l
- subf r5,r5,r6 /* In case we're relocated */
- addis r5,r5,timebase_period_ns@ha
- lwz r5,timebase_period_ns@l(r5)
+ addis r5,r6,(timebase_period_ns-0b)@ha
+ lwz r5,(timebase_period_ns-0b)@l(r5)
add r4,r4,r5
addi r4,r4,-1
divw r4,r4,r5 /* BUS ticks */
diff --git a/arch/powerpc/boot/virtex.c b/arch/powerpc/boot/virtex.c
deleted file mode 100644
index f731cbb4bff0..000000000000
--- a/arch/powerpc/boot/virtex.c
+++ /dev/null
@@ -1,97 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * The platform specific code for virtex devices since a boot loader is not
- * always used.
- *
- * (C) Copyright 2008 Xilinx, Inc.
- */
-
-#include "ops.h"
-#include "io.h"
-#include "stdio.h"
-
-#define UART_DLL 0 /* Out: Divisor Latch Low */
-#define UART_DLM 1 /* Out: Divisor Latch High */
-#define UART_FCR 2 /* Out: FIFO Control Register */
-#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */
-#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */
-#define UART_LCR 3 /* Out: Line Control Register */
-#define UART_MCR 4 /* Out: Modem Control Register */
-#define UART_MCR_RTS 0x02 /* RTS complement */
-#define UART_MCR_DTR 0x01 /* DTR complement */
-#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */
-#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */
-
-static int virtex_ns16550_console_init(void *devp)
-{
- unsigned char *reg_base;
- u32 reg_shift, reg_offset, clk, spd;
- u16 divisor;
- int n;
-
- if (dt_get_virtual_reg(devp, (void **)&reg_base, 1) < 1)
- return -1;
-
- n = getprop(devp, "reg-offset", &reg_offset, sizeof(reg_offset));
- if (n == sizeof(reg_offset))
- reg_base += reg_offset;
-
- n = getprop(devp, "reg-shift", &reg_shift, sizeof(reg_shift));
- if (n != sizeof(reg_shift))
- reg_shift = 0;
-
- n = getprop(devp, "current-speed", (void *)&spd, sizeof(spd));
- if (n != sizeof(spd))
- spd = 9600;
-
- /* should there be a default clock rate?*/
- n = getprop(devp, "clock-frequency", (void *)&clk, sizeof(clk));
- if (n != sizeof(clk))
- return -1;
-
- divisor = clk / (16 * spd);
-
- /* Access baud rate */
- out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_DLAB);
-
- /* Baud rate based on input clock */
- out_8(reg_base + (UART_DLL << reg_shift), divisor & 0xFF);
- out_8(reg_base + (UART_DLM << reg_shift), divisor >> 8);
-
- /* 8 data, 1 stop, no parity */
- out_8(reg_base + (UART_LCR << reg_shift), UART_LCR_WLEN8);
-
- /* RTS/DTR */
- out_8(reg_base + (UART_MCR << reg_shift), UART_MCR_RTS | UART_MCR_DTR);
-
- /* Clear transmitter and receiver */
- out_8(reg_base + (UART_FCR << reg_shift),
- UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
- return 0;
-}
-
-/* For virtex, the kernel may be loaded without using a bootloader and if so
- some UARTs need more setup than is provided in the normal console init
-*/
-int platform_specific_init(void)
-{
- void *devp;
- char devtype[MAX_PROP_LEN];
- char path[MAX_PATH_LEN];
-
- devp = finddevice("/chosen");
- if (devp == NULL)
- return -1;
-
- if (getprop(devp, "linux,stdout-path", path, MAX_PATH_LEN) > 0) {
- devp = finddevice(path);
- if (devp == NULL)
- return -1;
-
- if ((getprop(devp, "device_type", devtype, sizeof(devtype)) > 0)
- && !strcmp(devtype, "serial")
- && (dt_is_compatible(devp, "ns16550")))
- virtex_ns16550_console_init(devp);
- }
- return 0;
-}
diff --git a/arch/powerpc/boot/virtex405-head.S b/arch/powerpc/boot/virtex405-head.S
deleted file mode 100644
index 00bab7d7c48c..000000000000
--- a/arch/powerpc/boot/virtex405-head.S
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include "ppc_asm.h"
-
- .text
- .global _zimage_start
-_zimage_start:
-
- /* PPC errata 213: needed by Virtex-4 FX */
- mfccr0 0
- oris 0,0,0x50000000@h
- mtccr0 0
-
- /*
- * Invalidate the data cache if the data cache is turned off.
- * - The 405 core does not invalidate the data cache on power-up
- * or reset but does turn off the data cache. We cannot assume
- * that the cache contents are valid.
- * - If the data cache is turned on this must have been done by
- * a bootloader and we assume that the cache contents are
- * valid.
- */
- mfdccr r9
- cmplwi r9,0
- bne 2f
- lis r9,0
- li r8,256
- mtctr r8
-1: dccci r0,r9
- addi r9,r9,0x20
- bdnz 1b
-2: b _zimage_start_lib
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index ed6266367bc0..5bdd4dd20bbb 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -26,9 +26,12 @@
# Stop execution if any command fails
set -e
+export LC_ALL=C
+
# Allow for verbose output
if [ "$V" = 1 ]; then
set -x
+ map="-Map wrapper.map"
fi
# defaults
@@ -45,6 +48,8 @@ compression=.gz
uboot_comp=gzip
pie=
format=
+notext=
+rodynamic=
# cross-compilation prefix
CROSS=
@@ -157,7 +162,7 @@ while [ "$#" -gt 0 ]; do
fi
;;
--no-gzip)
- # a "feature" of the the wrapper script is that it can be used outside
+ # a "feature" of the wrapper script is that it can be used outside
# the kernel tree. So keeping this around for backwards compatibility.
compression=
uboot_comp=none
@@ -188,7 +193,7 @@ if [ -z "$kernel" ]; then
kernel=vmlinux
fi
-LANG=C elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`"
+LC_ALL=C elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`"
case "$elfformat" in
elf64-powerpcle) format=elf64lppc ;;
elf64-powerpc) format=elf32ppc ;;
@@ -295,7 +300,7 @@ cuboot*)
*-tqm8541|*-mpc8560*|*-tqm8560|*-tqm8555|*-ksi8560*)
platformo=$object/cuboot-85xx-cpm2.o
;;
- *-mpc85*|*-tqm85*|*-sbc85*)
+ *-mpc85*|*-tqm85*)
platformo=$object/cuboot-85xx.o
;;
*-amigaone)
@@ -323,14 +328,6 @@ adder875-redboot)
platformo="$object/fixed-head.o $object/redboot-8xx.o"
binary=y
;;
-simpleboot-virtex405-*)
- platformo="$object/virtex405-head.o $object/simpleboot.o $object/virtex.o"
- binary=y
- ;;
-simpleboot-virtex440-*)
- platformo="$object/fixed-head.o $object/simpleboot.o $object/virtex.o"
- binary=y
- ;;
simpleboot-*)
platformo="$object/fixed-head.o $object/simpleboot.o"
binary=y
@@ -347,6 +344,11 @@ gamecube|wii)
link_address='0x600000'
platformo="$object/$platform-head.o $object/$platform.o"
;;
+microwatt)
+ link_address='0x500000'
+ platformo="$object/fixed-head.o $object/$platform.o"
+ binary=y
+ ;;
treeboot-currituck)
link_address='0x1000000'
;;
@@ -360,6 +362,8 @@ epapr)
platformo="$object/pseries-head.o $object/epapr.o $object/epapr-wrapper.o"
link_address='0x20000000'
pie=-pie
+ notext='-z notext'
+ rodynamic=$(if ${CROSS}ld -V 2>&1 | grep -q LLD ; then echo "-z rodynamic"; fi)
;;
mvme5100)
platformo="$object/fixed-head.o $object/mvme5100.o"
@@ -429,7 +433,7 @@ fi
# Extract kernel version information, some platforms want to include
# it in the image header
version=`${CROSS}strings "$kernel" | grep '^Linux version [-0-9.]' | \
- cut -d' ' -f3`
+ head -n1 | cut -d' ' -f3`
if [ -n "$version" ]; then
uboot_version="-n Linux-$version"
fi
@@ -500,7 +504,7 @@ if [ "$platform" != "miboot" ]; then
text_start="-Ttext $link_address"
fi
#link everything
- ${CROSS}ld -m $format -T $lds $text_start $pie $nodl -o "$ofile" \
+ ${CROSS}ld -m $format -T $lds $text_start $pie $nodl $rodynamic $notext -o "$ofile" $map \
$platformo $tmp $object/wrapper.a
rm $tmp
fi
@@ -570,7 +574,18 @@ ps3)
count=$overlay_size bs=1
odir="$(dirname "$ofile.bin")"
- rm -f "$odir/otheros.bld"
- gzip -n --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld"
+
+ # The ps3's flash loader has a size limit of 16 MiB for the uncompressed
+ # image. If a compressed image that exceeded this limit is written to
+ # flash the loader will decompress that image until the 16 MiB limit is
+ # reached, then enter the system reset vector of the partially decompressed
+ # image. No warning is issued.
+ rm -f "$odir"/{otheros,otheros-too-big}.bld
+ size=$(${CROSS}nm --no-sort --radix=d "$ofile" | egrep ' _end$' | cut -d' ' -f1)
+ bld="otheros.bld"
+ if [ $size -gt $((0x1000000)) ]; then
+ bld="otheros-too-big.bld"
+ fi
+ gzip -n --force -9 --stdout "$ofile.bin" > "$odir/$bld"
;;
esac
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index a21f3a76e06f..d65cd55a6f38 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -34,6 +34,14 @@ SECTIONS
__dynamic_start = .;
*(.dynamic)
}
+
+#ifdef CONFIG_PPC64_BOOT_WRAPPER
+ .got : ALIGN(256)
+ {
+ *(.got .toc)
+ }
+#endif
+
.hash : { *(.hash) }
.interp : { *(.interp) }
.rela.dyn :
@@ -76,16 +84,6 @@ SECTIONS
_esm_blob_end = .;
}
-#ifdef CONFIG_PPC64_BOOT_WRAPPER
- . = ALIGN(256);
- .got :
- {
- __toc_start = .;
- *(.got)
- *(.toc)
- }
-#endif
-
. = ALIGN(4096);
.bss :
{
diff --git a/arch/powerpc/boot/zImage.ps3.lds.S b/arch/powerpc/boot/zImage.ps3.lds.S
index 7b2ff2eaa73a..d0ffb493614d 100644
--- a/arch/powerpc/boot/zImage.ps3.lds.S
+++ b/arch/powerpc/boot/zImage.ps3.lds.S
@@ -8,7 +8,7 @@ SECTIONS
.kernel:vmlinux.bin : { *(.kernel:vmlinux.bin) }
_vmlinux_end = .;
- . = ALIGN(4096);
+ . = ALIGN(8);
_dtb_start = .;
.kernel:dtb : { *(.kernel:dtb) }
_dtb_end = .;
diff --git a/arch/powerpc/configs/32-bit.config b/arch/powerpc/configs/32-bit.config
new file mode 100644
index 000000000000..ad6546850c68
--- /dev/null
+++ b/arch/powerpc/configs/32-bit.config
@@ -0,0 +1 @@
+# CONFIG_PPC64 is not set
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig
index db93c117be36..25eed86ec528 100644
--- a/arch/powerpc/configs/40x/acadia_defconfig
+++ b/arch/powerpc/configs/40x/acadia_defconfig
@@ -9,7 +9,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ACADIA=y
-# CONFIG_WALNUT is not set
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
deleted file mode 100644
index a3854cf65f8d..000000000000
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ /dev/null
@@ -1,62 +0,0 @@
-CONFIG_40x=y
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_EP405=y
-# CONFIG_WALNUT is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_CONNECTOR=y
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=m
-CONFIG_MTD_CFI=y
-CONFIG_MTD_JEDECPROBE=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=35000
-CONFIG_NETDEVICES=y
-CONFIG_IBM_EMAC=y
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_OF_PLATFORM=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_THERMAL=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
-CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
-CONFIG_EXT2_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_PCBC=y
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_DES=y
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig
index edc22464dfb5..3549c9e950e8 100644
--- a/arch/powerpc/configs/40x/kilauea_defconfig
+++ b/arch/powerpc/configs/40x/kilauea_defconfig
@@ -11,7 +11,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_KILAUEA=y
-# CONFIG_WALNUT is not set
CONFIG_PCI=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/40x/klondike_defconfig b/arch/powerpc/configs/40x/klondike_defconfig
index 579fa846839c..6a735ee75715 100644
--- a/arch/powerpc/configs/40x/klondike_defconfig
+++ b/arch/powerpc/configs/40x/klondike_defconfig
@@ -8,7 +8,6 @@ CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_WALNUT is not set
CONFIG_APM8018X=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_MATH_EMULATION=y
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig
index 188789b9aa4c..4563f88acf0c 100644
--- a/arch/powerpc/configs/40x/makalu_defconfig
+++ b/arch/powerpc/configs/40x/makalu_defconfig
@@ -9,7 +9,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_MAKALU=y
-# CONFIG_WALNUT is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/40x/obs600_defconfig b/arch/powerpc/configs/40x/obs600_defconfig
index 5bf6af7ef093..2a2bb3f46847 100644
--- a/arch/powerpc/configs/40x/obs600_defconfig
+++ b/arch/powerpc/configs/40x/obs600_defconfig
@@ -10,7 +10,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_WALNUT is not set
CONFIG_OBS600=y
CONFIG_MATH_EMULATION=y
CONFIG_NET=y
diff --git a/arch/powerpc/configs/40x/virtex_defconfig b/arch/powerpc/configs/40x/virtex_defconfig
deleted file mode 100644
index 5e7c61d1d7d0..000000000000
--- a/arch/powerpc/configs/40x/virtex_defconfig
+++ /dev/null
@@ -1,75 +0,0 @@
-CONFIG_40x=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_WALNUT is not set
-CONFIG_XILINX_VIRTEX_GENERIC_BOARD=y
-CONFIG_PREEMPT=y
-CONFIG_MATH_EMULATION=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
-CONFIG_PCI=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_NETFILTER=y
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_XILINX_SYSACE=y
-CONFIG_NETDEVICES=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_XILINX_XPS_PS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_UARTLITE=y
-CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_XILINX_HWICAP=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_XILINX=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_XILINX=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_UTF8=m
-CONFIG_CRC_CCITT=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_KERNEL=y
diff --git a/arch/powerpc/configs/44x/akebono_defconfig b/arch/powerpc/configs/44x/akebono_defconfig
index 7705a5c3f4ea..fde4824f235e 100644
--- a/arch/powerpc/configs/44x/akebono_defconfig
+++ b/arch/powerpc/configs/44x/akebono_defconfig
@@ -8,7 +8,6 @@ CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_SLUB_CPU_PARTIAL is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -19,10 +18,9 @@ CONFIG_HIGHMEM=y
CONFIG_HZ_100=y
CONFIG_IRQ_ALL_CPUS=y
# CONFIG_COMPACTION is not set
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
# CONFIG_SUSPEND is not set
CONFIG_NET=y
+CONFIG_NETDEVICES=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
@@ -43,7 +41,9 @@ CONFIG_BLK_DEV_RAM_SIZE=35000
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
# CONFIG_SATA_PMP is not set
+CONFIG_SATA_AHCI_PLATFORM=y
# CONFIG_ATA_SFF is not set
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
@@ -58,7 +58,6 @@ CONFIG_BLK_DEV_SD=y
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EXAR is not set
CONFIG_IBM_EMAC=y
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
@@ -101,6 +100,8 @@ CONFIG_USB_OHCI_HCD=y
# CONFIG_USB_OHCI_HCD_PCI is not set
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_M41T80=y
CONFIG_EXT2_FS=y
@@ -117,7 +118,7 @@ CONFIG_CRAMFS=y
CONFIG_NLS_DEFAULT="n"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_XMON=y
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig
index 82c6f49b8dcb..41d04e70d4fb 100644
--- a/arch/powerpc/configs/44x/arches_defconfig
+++ b/arch/powerpc/configs/44x/arches_defconfig
@@ -11,8 +11,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_ARCHES=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig
index 679213214a75..acbce718eaa8 100644
--- a/arch/powerpc/configs/44x/bamboo_defconfig
+++ b/arch/powerpc/configs/44x/bamboo_defconfig
@@ -9,8 +9,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_BAMBOO=y
# CONFIG_EBONY is not set
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig
index 8006a5728afd..37088f250c9e 100644
--- a/arch/powerpc/configs/44x/bluestone_defconfig
+++ b/arch/powerpc/configs/44x/bluestone_defconfig
@@ -11,8 +11,6 @@ CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_BLUESTONE=y
# CONFIG_EBONY is not set
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index ccc14eb7a2f1..61776ade572b 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -11,8 +11,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_CANYONLANDS=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index be76e066df01..7283b7d4a1a5 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -6,7 +6,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -17,8 +16,6 @@ CONFIG_HIGHMEM=y
CONFIG_HZ_100=y
CONFIG_MATH_EMULATION=y
CONFIG_IRQ_ALL_CPUS=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
# CONFIG_SUSPEND is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -76,7 +73,7 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_NLS_DEFAULT="n"
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_XMON=y
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig
index 1abaa63e067f..509300f400e2 100644
--- a/arch/powerpc/configs/44x/eiger_defconfig
+++ b/arch/powerpc/configs/44x/eiger_defconfig
@@ -10,8 +10,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_EBONY is not set
CONFIG_EIGER=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
CONFIG_NET=y
diff --git a/arch/powerpc/configs/44x/fsp2_defconfig b/arch/powerpc/configs/44x/fsp2_defconfig
index e67fc041ca3e..3fdfbb29b854 100644
--- a/arch/powerpc/configs/44x/fsp2_defconfig
+++ b/arch/powerpc/configs/44x/fsp2_defconfig
@@ -17,7 +17,6 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -28,7 +27,6 @@ CONFIG_476FPE_ERR46=y
CONFIG_SWIOTLB=y
CONFIG_KEXEC=y
CONFIG_CRASH_DUMP=y
-CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="ip=on rw"
# CONFIG_SUSPEND is not set
# CONFIG_PCI is not set
@@ -112,7 +110,7 @@ CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_MESSAGE_LOGLEVEL_DEFAULT=3
CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_CRYPTO_CBC=y
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig
index 7d7ff84c8200..fb9a15573546 100644
--- a/arch/powerpc/configs/44x/icon_defconfig
+++ b/arch/powerpc/configs/44x/icon_defconfig
@@ -9,8 +9,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_EBONY is not set
CONFIG_ICON=y
CONFIG_HIGHMEM=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
CONFIG_NET=y
@@ -30,7 +28,6 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-CONFIG_XILINX_SYSACE=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_CONSTANTS=y
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig
index fb5c73a29bf4..0f6380e1e612 100644
--- a/arch/powerpc/configs/44x/iss476-smp_defconfig
+++ b/arch/powerpc/configs/44x/iss476-smp_defconfig
@@ -7,7 +7,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -17,7 +16,6 @@ CONFIG_ISS4xx=y
CONFIG_HZ_100=y
CONFIG_MATH_EMULATION=y
CONFIG_IRQ_ALL_CPUS=y
-CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="root=/dev/issblk0"
# CONFIG_PCI is not set
CONFIG_ADVANCED_OPTIONS=y
@@ -58,7 +56,7 @@ CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_PPC_EARLY_DEBUG=y
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig
index c6dc1445fc04..1a0f1c3e0ee9 100644
--- a/arch/powerpc/configs/44x/katmai_defconfig
+++ b/arch/powerpc/configs/44x/katmai_defconfig
@@ -9,8 +9,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_KATMAI=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig
index c83ad03182df..6dd67de06a0b 100644
--- a/arch/powerpc/configs/44x/rainier_defconfig
+++ b/arch/powerpc/configs/44x/rainier_defconfig
@@ -10,8 +10,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_EBONY is not set
CONFIG_RAINIER=y
CONFIG_MATH_EMULATION=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig
index 640fe1d5af28..e28d76416537 100644
--- a/arch/powerpc/configs/44x/redwood_defconfig
+++ b/arch/powerpc/configs/44x/redwood_defconfig
@@ -10,8 +10,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_EBONY is not set
CONFIG_REDWOOD=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_PCIEPORTBUS=y
# CONFIG_PCIEASPM is not set
CONFIG_NET=y
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index 22dc0dadf576..ef09786d49b9 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -12,8 +12,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_AMIGA_PARTITION=y
# CONFIG_EBONY is not set
CONFIG_SAM440EP=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig
index 2c0973db8837..b4984eab43eb 100644
--- a/arch/powerpc/configs/44x/sequoia_defconfig
+++ b/arch/powerpc/configs/44x/sequoia_defconfig
@@ -11,8 +11,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_SEQUOIA=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig
index a2d355ca62b2..3ea5932ab852 100644
--- a/arch/powerpc/configs/44x/taishan_defconfig
+++ b/arch/powerpc/configs/44x/taishan_defconfig
@@ -9,8 +9,6 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_EBONY is not set
CONFIG_TAISHAN=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/powerpc/configs/44x/virtex5_defconfig b/arch/powerpc/configs/44x/virtex5_defconfig
deleted file mode 100644
index 1f74079e1703..000000000000
--- a/arch/powerpc/configs/44x/virtex5_defconfig
+++ /dev/null
@@ -1,74 +0,0 @@
-CONFIG_44x=y
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_EBONY is not set
-CONFIG_XILINX_VIRTEX440_GENERIC_BOARD=y
-CONFIG_PREEMPT=y
-CONFIG_MATH_EMULATION=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_NETFILTER=y
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_XILINX_SYSACE=y
-CONFIG_NETDEVICES=y
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_XILINX_XPS_PS2=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_UARTLITE=y
-CONFIG_SERIAL_UARTLITE_CONSOLE=y
-CONFIG_XILINX_HWICAP=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_XILINX=y
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_XILINX=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_EXT2_FS=y
-CONFIG_AUTOFS4_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_UTF8=m
-CONFIG_CRC_CCITT=y
-CONFIG_FONTS=y
-CONFIG_FONT_8x8=y
-CONFIG_FONT_8x16=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_KERNEL=y
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig
index af66c69c49fe..20891c413149 100644
--- a/arch/powerpc/configs/44x/warp_defconfig
+++ b/arch/powerpc/configs/44x/warp_defconfig
@@ -14,7 +14,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_WARP=y
CONFIG_PPC4xx_GPIO=y
CONFIG_HZ_1000=y
-CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="ip=on"
# CONFIG_PCI is not set
CONFIG_NET=y
@@ -89,7 +88,7 @@ CONFIG_NLS_UTF8=y
CONFIG_CRC_CCITT=y
CONFIG_CRC_T10DIF=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index 63368e677506..7db479dcbc0c 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -58,6 +58,6 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 72762da94846..6186ead1e105 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -84,7 +84,7 @@ CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_ECB=y
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index a3c8ca74032c..e6735b945327 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -85,7 +85,7 @@ CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_CRYPTO_ECB=y
diff --git a/arch/powerpc/configs/64-bit.config b/arch/powerpc/configs/64-bit.config
new file mode 100644
index 000000000000..0fe6406929e2
--- /dev/null
+++ b/arch/powerpc/configs/64-bit.config
@@ -0,0 +1 @@
+CONFIG_PPC64=y
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
index cbcae2a927e9..4e3373381ab6 100644
--- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig
@@ -77,6 +77,5 @@ CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_CRC_T10DIF=y
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
diff --git a/arch/powerpc/configs/85xx-hw.config b/arch/powerpc/configs/85xx-hw.config
index 9575a38c9155..524db76f47b7 100644
--- a/arch/powerpc/configs/85xx-hw.config
+++ b/arch/powerpc/configs/85xx-hw.config
@@ -2,7 +2,6 @@ CONFIG_AQUANTIA_PHY=y
CONFIG_AT803X_PHY=y
CONFIG_ATA=y
CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_BLK_DEV_SR=y
CONFIG_BROADCOM_PHY=y
CONFIG_C293_PCIE=y
@@ -68,7 +67,6 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_MTD_RAW_NAND=y
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index f29c166998af..ea719898b581 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -30,7 +30,7 @@ CONFIG_PREEMPT=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
-CONFIG_FORCE_MAX_ZONEORDER=17
+CONFIG_ARCH_FORCE_MAX_ORDER=17
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_MSI=y
@@ -74,7 +74,6 @@ CONFIG_MTD_PHYSMAP_OF=y
CONFIG_MTD_RAW_NAND=y
CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
index 0683d8c292a8..cea72e85ed26 100644
--- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
+++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig
@@ -29,9 +29,9 @@ CONFIG_SYN_COOKIES=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_IDE=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_VIA82CXXX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_VIA=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E1000=y
diff --git a/arch/powerpc/configs/85xx/ppa8548_defconfig b/arch/powerpc/configs/85xx/ppa8548_defconfig
index 190978a5b7d5..4bd5f993d26a 100644
--- a/arch/powerpc/configs/85xx/ppa8548_defconfig
+++ b/arch/powerpc/configs/85xx/ppa8548_defconfig
@@ -7,9 +7,7 @@ CONFIG_RAPIDIO=y
CONFIG_FSL_RIO=y
CONFIG_RAPIDIO_DMA_ENGINE=y
CONFIG_RAPIDIO_ENUM_BASIC=y
-CONFIG_RAPIDIO_TSI57X=y
CONFIG_RAPIDIO_CPS_XX=y
-CONFIG_RAPIDIO_TSI568=y
CONFIG_RAPIDIO_CPS_GEN2=y
CONFIG_ADVANCED_OPTIONS=y
CONFIG_LOWMEM_SIZE_BOOL=y
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig
deleted file mode 100644
index 258881727119..000000000000
--- a/arch/powerpc/configs/85xx/sbc8548_defconfig
+++ /dev/null
@@ -1,50 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_SBC8548=y
-CONFIG_GEN_RTC=y
-CONFIG_BINFMT_MISC=y
-CONFIG_MATH_EMULATION=y
-# CONFIG_SECCOMP is not set
-CONFIG_PCI=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_IPV6 is not set
-# CONFIG_FW_LOADER is not set
-CONFIG_MTD=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_ADV_OPTIONS=y
-CONFIG_MTD_CFI_GEOMETRY=y
-CONFIG_MTD_CFI_I4=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_PHYSMAP_OF=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_NETDEVICES=y
-CONFIG_GIANFAR=y
-CONFIG_BROADCOM_PHY=y
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-# CONFIG_USB_SUPPORT is not set
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig
index 98982a0e82d8..bbf040aa1f9a 100644
--- a/arch/powerpc/configs/85xx/tqm8540_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8540_defconfig
@@ -30,9 +30,9 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_IDE=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_VIA82CXXX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_VIA=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig
index a6e21db1dafe..523ad8dcfd9d 100644
--- a/arch/powerpc/configs/85xx/tqm8541_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8541_defconfig
@@ -30,9 +30,9 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_IDE=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_VIA82CXXX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_VIA=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig
index ca1de3979474..0032ce1e8c9c 100644
--- a/arch/powerpc/configs/85xx/tqm8555_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8555_defconfig
@@ -30,9 +30,9 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_IDE=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_VIA82CXXX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_VIA=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig
index ca3b8c8ef30f..a80b971f7d6e 100644
--- a/arch/powerpc/configs/85xx/tqm8560_defconfig
+++ b/arch/powerpc/configs/85xx/tqm8560_defconfig
@@ -30,9 +30,9 @@ CONFIG_MTD_CFI_AMDSTD=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_IDE=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_VIA82CXXX=y
+CONFIG_ATA=y
+CONFIG_ATA_GENERIC=y
+CONFIG_PATA_VIA=y
CONFIG_NETDEVICES=y
CONFIG_GIANFAR=y
CONFIG_E100=y
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index d50aca608736..3a6381aa9fdc 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -51,9 +51,6 @@ CONFIG_NET_IPIP=y
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_MTD=y
CONFIG_MTD_REDBOOT_PARTS=y
CONFIG_MTD_CMDLINE_PARTS=y
diff --git a/arch/powerpc/configs/86xx-hw.config b/arch/powerpc/configs/86xx-hw.config
index 151164cf8cb3..0cb24b33c88e 100644
--- a/arch/powerpc/configs/86xx-hw.config
+++ b/arch/powerpc/configs/86xx-hw.config
@@ -32,8 +32,6 @@ CONFIG_HW_RANDOM=y
CONFIG_HZ_1000=y
CONFIG_I2C_MPC=y
CONFIG_I2C=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSEDEV is not set
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig
index f55e23cb176c..7f35d5bc1229 100644
--- a/arch/powerpc/configs/adder875_defconfig
+++ b/arch/powerpc/configs/adder875_defconfig
@@ -10,7 +10,6 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_PPC_ADDER875=y
-CONFIG_8xx_COPYBACK=y
CONFIG_GEN_RTC=y
CONFIG_HZ_1000=y
# CONFIG_SECCOMP is not set
@@ -46,7 +45,7 @@ CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CRC32_SLICEBY4=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index f6d140f2d922..200bb1ecb560 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -44,7 +44,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SYM53C8XX_2=y
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 42fbc70cec33..06391cc2af3a 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -14,7 +14,6 @@ CONFIG_CPUSETS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
@@ -37,7 +36,6 @@ CONFIG_GEN_RTC=y
CONFIG_BINFMT_MISC=m
CONFIG_IRQ_ALL_CPUS=y
CONFIG_NUMA=y
-CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_PCIEPORTBUS=y
CONFIG_NET=y
@@ -197,7 +195,6 @@ CONFIG_NLS_ISO8859_9=m
CONFIG_NLS_ISO8859_13=m
CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
-# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_MUTEXES=y
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index 502a75d49789..fb314f75ad4b 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -42,7 +42,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SYM53C8XX_2=y
@@ -54,7 +53,6 @@ CONFIG_ATA_GENERIC=y
CONFIG_NETDEVICES=y
CONFIG_PCNET32=y
CONFIG_NET_TULIP=y
-CONFIG_DE4X5=y
CONFIG_MV643XX_ETH=y
CONFIG_8139CP=y
CONFIG_8139TOO=y
diff --git a/arch/powerpc/configs/disable-werror.config b/arch/powerpc/configs/disable-werror.config
new file mode 100644
index 000000000000..6ea12a12432c
--- /dev/null
+++ b/arch/powerpc/configs/disable-werror.config
@@ -0,0 +1 @@
+CONFIG_PPC_DISABLE_WERROR=y
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig
index 00d69965f898..8df6d3a293e3 100644
--- a/arch/powerpc/configs/ep8248e_defconfig
+++ b/arch/powerpc/configs/ep8248e_defconfig
@@ -59,7 +59,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_BDI_SWITCH=y
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig
index 0e2e5e81a359..a98ef6a4abef 100644
--- a/arch/powerpc/configs/ep88xc_defconfig
+++ b/arch/powerpc/configs/ep88xc_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_PPC_EP88XC=y
-CONFIG_8xx_COPYBACK=y
CONFIG_GEN_RTC=y
CONFIG_HZ_100=y
# CONFIG_SECCOMP is not set
@@ -49,6 +48,6 @@ CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CRC32_SLICEBY4=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/fsl-emb-nonhw.config b/arch/powerpc/configs/fsl-emb-nonhw.config
index 3c7dad19a691..ab8a8c4530d9 100644
--- a/arch/powerpc/configs/fsl-emb-nonhw.config
+++ b/arch/powerpc/configs/fsl-emb-nonhw.config
@@ -24,7 +24,7 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
@@ -41,7 +41,7 @@ CONFIG_FIXED_PHY=y
CONFIG_FONT_8x16=y
CONFIG_FONT_8x8=y
CONFIG_FONTS=y
-CONFIG_FORCE_MAX_ZONEORDER=13
+CONFIG_ARCH_FORCE_MAX_ORDER=13
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAME_WARN=1024
CONFIG_FTL=y
@@ -56,7 +56,6 @@ CONFIG_IKCONFIG=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
-# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_INET=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MROUTE=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index fbfcc85e4dc0..71d9d112c0b6 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -12,7 +12,6 @@ CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
@@ -27,6 +26,7 @@ CONFIG_CPU_FREQ_PMAC64=y
CONFIG_GEN_RTC=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
+CONFIG_PPC_4K_PAGES=y
CONFIG_PCI_MSI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -51,7 +51,6 @@ CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_IRC=m
CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CT_NETLINK=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
@@ -62,7 +61,6 @@ CONFIG_CDROM_PKTCDVD=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SPI_ATTRS=y
@@ -121,7 +119,6 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
# CONFIG_HW_RANDOM is not set
-CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
CONFIG_AGP=m
CONFIG_AGP_UNINORTH=m
@@ -196,7 +193,6 @@ CONFIG_USB_SERIAL_SAFE=m
CONFIG_USB_SERIAL_SAFE_PADDED=y
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_APPLEDISPLAY=m
CONFIG_EXT2_FS=y
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index 24c0e0ea5aeb..91a1b99f4e8f 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -68,7 +68,7 @@ CONFIG_SND_SEQUENCER=y
CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_GENERIC=y
+CONFIG_RTC_DRV_GAMECUBE=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig
index 067f433c8f5e..271daff47d1d 100644
--- a/arch/powerpc/configs/holly_defconfig
+++ b/arch/powerpc/configs/holly_defconfig
@@ -13,7 +13,6 @@ CONFIG_EMBEDDED6xx=y
CONFIG_PPC_HOLLY=y
CONFIG_GEN_RTC=y
CONFIG_BINFMT_MISC=y
-CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200"
# CONFIG_SECCOMP is not set
CONFIG_NET=y
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
index ea59f3d146df..fa707de761be 100644
--- a/arch/powerpc/configs/linkstation_defconfig
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -37,7 +37,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
@@ -121,7 +120,6 @@ CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
CONFIG_CIFS=m
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_932=m
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 2975e64629aa..c821a97f4a89 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -26,6 +25,7 @@ CONFIG_UDBG_RTAS_CONSOLE=y
CONFIG_GEN_RTC=y
CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
+CONFIG_PPC_4K_PAGES=y
CONFIG_PCI_MSI=y
CONFIG_NET=y
CONFIG_PACKET=y
@@ -41,7 +41,6 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_IPR=y
CONFIG_ATA=y
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index dcc8dccf54f3..498d35db7833 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -73,7 +73,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/powerpc/configs/microwatt_defconfig b/arch/powerpc/configs/microwatt_defconfig
new file mode 100644
index 000000000000..ea2dbd778aad
--- /dev/null
+++ b/arch/powerpc/configs/microwatt_defconfig
@@ -0,0 +1,110 @@
+# CONFIG_SWAP is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_TICK_CPU_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
+CONFIG_CGROUPS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_PPC64=y
+CONFIG_POWER9_CPU=y
+# CONFIG_PPC_64S_HASH_MMU is not set
+# CONFIG_PPC_KUEP is not set
+# CONFIG_PPC_KUAP is not set
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_NR_IRQS=64
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_PPC_POWERNV is not set
+# CONFIG_PPC_PSERIES is not set
+CONFIG_PPC_MICROWATT=y
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
+CONFIG_CPU_FREQ=y
+CONFIG_HZ_100=y
+CONFIG_PPC_4K_PAGES=y
+# CONFIG_SECCOMP is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+# CONFIG_COREDUMP is not set
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_PACKET_DIAG=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_INET=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_RAW_DIAG=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_PARTITIONED_MASTER=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_NETDEVICES=y
+CONFIG_LITEX_LITEETH=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_NVRAM is not set
+CONFIG_RANDOM_TRUST_CPU=y
+CONFIG_SPI=y
+CONFIG_SPI_DEBUG=y
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_SPIDEV=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_LITEX=y
+# CONFIG_VIRTIO_MENU is not set
+CONFIG_COMMON_CLK=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_NVMEM is not set
+CONFIG_EXT4_FS=y
+# CONFIG_FILE_LOCKING is not set
+# CONFIG_DNOTIFY is not set
+CONFIG_AUTOFS_FS=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CRYPTO_SHA256=y
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_PRINTK_TIME=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_MISC is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+# CONFIG_STRICT_DEVMEM is not set
+CONFIG_PPC_DISABLE_WERROR=y
+CONFIG_XMON=y
+CONFIG_XMON_DEFAULT=y
+# CONFIG_XMON_DEFAULT_RO_MODE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig
index e39346b3dc3b..10fe061c5e6d 100644
--- a/arch/powerpc/configs/mpc512x_defconfig
+++ b/arch/powerpc/configs/mpc512x_defconfig
@@ -47,7 +47,6 @@ CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
CONFIG_SCSI=y
@@ -115,5 +114,4 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-# CONFIG_ENABLE_MUST_CHECK is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 83d801307178..c0fe5e76604a 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -122,6 +122,6 @@ CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig
index 00a4d2bf43b2..4145ef5689ca 100644
--- a/arch/powerpc/configs/mpc8272_ads_defconfig
+++ b/arch/powerpc/configs/mpc8272_ads_defconfig
@@ -67,7 +67,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_BDI_SWITCH=y
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index be125729635c..95d43f8a3869 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -19,7 +19,6 @@ CONFIG_MPC836x_MDS=y
CONFIG_MPC836x_RDK=y
CONFIG_MPC837x_MDS=y
CONFIG_MPC837x_RDB=y
-CONFIG_SBC834x=y
CONFIG_ASP834x=y
CONFIG_QE_GPIO=y
CONFIG_MATH_EMULATION=y
diff --git a/arch/powerpc/configs/mpc85xx_base.config b/arch/powerpc/configs/mpc85xx_base.config
index b1593fe6f70b..85907b776908 100644
--- a/arch/powerpc/configs/mpc85xx_base.config
+++ b/arch/powerpc/configs/mpc85xx_base.config
@@ -13,7 +13,6 @@ CONFIG_P1022_DS=y
CONFIG_P1022_RDK=y
CONFIG_P1023_RDB=y
CONFIG_TWR_P102x=y
-CONFIG_SBC8548=y
CONFIG_SOCRATES=y
CONFIG_STX_GP3=y
CONFIG_TQM8540=y
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig
index 5320735395e7..5c56d36cdfc5 100644
--- a/arch/powerpc/configs/mpc866_ads_defconfig
+++ b/arch/powerpc/configs/mpc866_ads_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_MPC86XADS=y
-CONFIG_8xx_COPYBACK=y
CONFIG_GEN_RTC=y
CONFIG_HZ_1000=y
CONFIG_MATH_EMULATION=y
diff --git a/arch/powerpc/configs/mpc86xx_base.config b/arch/powerpc/configs/mpc86xx_base.config
index 67bd1fa036ee..588870e6af3b 100644
--- a/arch/powerpc/configs/mpc86xx_base.config
+++ b/arch/powerpc/configs/mpc86xx_base.config
@@ -1,6 +1,5 @@
CONFIG_PPC_86xx=y
CONFIG_MPC8641_HPCN=y
-CONFIG_SBC8641D=y
CONFIG_MPC8610_HPCD=y
CONFIG_GEF_PPC9A=y
CONFIG_GEF_SBC310=y
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig
index 82a008c04eae..56b876e418e9 100644
--- a/arch/powerpc/configs/mpc885_ads_defconfig
+++ b/arch/powerpc/configs/mpc885_ads_defconfig
@@ -1,20 +1,30 @@
-CONFIG_PPC_8xx=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_JIT=y
+CONFIG_VIRT_CPU_ACCOUNTING_NATIVE=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
# CONFIG_BASE_FULL is not set
# CONFIG_FUTEX is not set
+CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_8xx_COPYBACK=y
+CONFIG_PPC_8xx=y
+CONFIG_8xx_GPIO=y
+CONFIG_SMC_UCODE_PATCH=y
+CONFIG_PIN_TLB=y
CONFIG_GEN_RTC=y
CONFIG_HZ_100=y
+CONFIG_MATH_EMULATION=y
+CONFIG_PPC_16K_PAGES=y
+CONFIG_ADVANCED_OPTIONS=y
# CONFIG_SECCOMP is not set
+CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_MODULES=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -22,7 +32,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_SYN_COOKIES=y
-# CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
@@ -35,6 +44,7 @@ CONFIG_MTD_CFI_GEOMETRY=y
# CONFIG_MTD_CFI_I2 is not set
CONFIG_MTD_CFI_I4=y
CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_PHYSMAP_OF=y
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
@@ -47,14 +57,25 @@ CONFIG_DAVICOM_PHY=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_CPM=y
CONFIG_SERIAL_CPM_CONSOLE=y
+CONFIG_SPI=y
+CONFIG_SPI_FSL_SPI=y
# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_8xxx_WDT=y
# CONFIG_USB_SUPPORT is not set
# CONFIG_DNOTIFY is not set
CONFIG_TMPFS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
+CONFIG_CRYPTO=y
+CONFIG_CRYPTO_DEV_TALITOS=y
CONFIG_CRC32_SLICEBY4=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_VM_PGTABLE=y
CONFIG_DETECT_HUNG_TASK=y
+CONFIG_BDI_SWITCH=y
+CONFIG_PPC_EARLY_DEBUG=y
+CONFIG_GENERIC_PTDUMP=y
diff --git a/arch/powerpc/configs/mvme5100_defconfig b/arch/powerpc/configs/mvme5100_defconfig
index 0a0d046fc445..d1c7fd5bf34b 100644
--- a/arch/powerpc/configs/mvme5100_defconfig
+++ b/arch/powerpc/configs/mvme5100_defconfig
@@ -20,10 +20,9 @@ CONFIG_EMBEDDED6xx=y
CONFIG_MVME5100=y
CONFIG_KVM_GUEST=y
CONFIG_HZ_100=y
+CONFIG_CMDLINE="console=ttyS0,9600 ip=dhcp root=/dev/nfs"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_COMPACTION is not set
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS0,9600 ip=dhcp root=/dev/nfs"
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -46,7 +45,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NETFILTER_XT_MATCH_MAC=m
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_FILTER=m
CONFIG_IP_NF_TARGET_REJECT=m
@@ -103,7 +101,6 @@ CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
CONFIG_CIFS=m
CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=m
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 4b6d31d4474e..96aa5355911f 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -7,7 +7,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -23,7 +22,6 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_HZ_1000=y
-CONFIG_PPC_64K_PAGES=y
# CONFIG_SECCOMP is not set
CONFIG_PCI_MSI=y
CONFIG_PCCARD=y
@@ -58,9 +56,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
CONFIG_EEPROM_LEGACY=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
-CONFIG_CHR_DEV_OSST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
CONFIG_SCSI_CONSTANTS=y
@@ -96,7 +92,6 @@ CONFIG_LEGACY_PTY_COUNT=4
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_PASEMI=y
CONFIG_SENSORS_LM85=y
@@ -110,7 +105,6 @@ CONFIG_FB_NVIDIA=y
CONFIG_FB_NVIDIA_I2C=y
CONFIG_FB_RADEON=y
# CONFIG_LCD_CLASS_DEVICE is not set
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index f492e7d35925..019163c2571e 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -10,7 +10,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -75,7 +74,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -90,13 +88,6 @@ CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
CONFIG_IP_DCCP=m
-CONFIG_IRDA=m
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-CONFIG_IRDA_CACHE_LAST_LSAP=y
-CONFIG_IRDA_FAST_RR=y
-CONFIG_IRTTY_SIR=m
CONFIG_BT=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
@@ -117,7 +108,6 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
@@ -179,7 +169,6 @@ CONFIG_USB_USBNET=m
CONFIG_B43=m
CONFIG_B43LEGACY=m
CONFIG_P54_COMMON=m
-CONFIG_PRISM54=m
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_MOUSE_PS2 is not set
@@ -295,7 +284,6 @@ CONFIG_BOOTX_TEXT=y
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 71749377d164..c92652575064 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -17,7 +17,6 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
CONFIG_NUMA_BALANCING=y
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
@@ -30,7 +29,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
@@ -51,6 +49,7 @@ CONFIG_CPU_IDLE=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
CONFIG_PPC_TRANSACTIONAL_MEM=y
+CONFIG_PPC_UV=y
CONFIG_HOTPLUG_CPU=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
@@ -63,11 +62,11 @@ CONFIG_MEMORY_FAILURE=y
CONFIG_HWPOISON_INJECT=m
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
-CONFIG_PPC_64K_PAGES=y
-CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
CONFIG_PM=y
CONFIG_HOTPLUG_PCI=y
+CONFIG_ZONE_DEVICE=y
+CONFIG_DEVICE_PRIVATE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -108,7 +107,6 @@ CONFIG_BLK_DEV_NVME=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
@@ -210,7 +208,6 @@ CONFIG_FB_MATROX_G=y
CONFIG_FB_RADEON=m
CONFIG_FB_IBM_GXT4500=m
CONFIG_LCD_PLATFORM=m
-CONFIG_BACKLIGHT_GENERIC=m
# CONFIG_VGA_CONSOLE is not set
CONFIG_LOGO=y
CONFIG_HID_A4TECH=m
@@ -247,7 +244,6 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_CXGB3=m
CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_INFINIBAND_IPOIB=m
@@ -258,7 +254,6 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_GENERIC=y
# CONFIG_VIRTIO_MENU is not set
CONFIG_LIBNVDIMM=y
-# CONFIG_ND_BLK is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -314,6 +309,7 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_FUNCTION_TRACER=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_PPC_EMULATED_STATS=y
@@ -330,13 +326,11 @@ CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
@@ -348,3 +342,4 @@ CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig
index a5f683aed328..7e48693775f4 100644
--- a/arch/powerpc/configs/ppc40x_defconfig
+++ b/arch/powerpc/configs/ppc40x_defconfig
@@ -10,11 +10,9 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PPC4xx_GPIO=y
CONFIG_ACADIA=y
-CONFIG_EP405=y
CONFIG_HOTFOOT=y
CONFIG_KILAUEA=y
CONFIG_MAKALU=y
-CONFIG_XILINX_VIRTEX_GENERIC_BOARD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -22,9 +20,6 @@ CONFIG_INET=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
CONFIG_CONNECTOR=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
@@ -37,33 +32,26 @@ CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-CONFIG_XILINX_SYSACE=m
CONFIG_NETDEVICES=y
CONFIG_IBM_EMAC=y
# CONFIG_INPUT is not set
CONFIG_SERIO=m
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_XILINX_XPS_PS2=m
# CONFIG_VT is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_UARTLITE=y
-CONFIG_SERIAL_UARTLITE_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_XILINX_HWICAP=m
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_GPIO=m
CONFIG_I2C_IBM_IIC=m
-CONFIG_GPIO_XILINX=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
CONFIG_FB=m
-CONFIG_FB_XILINX=m
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=m
CONFIG_VFAT_FS=m
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig
index a41eedfe0a5f..8b595f67068c 100644
--- a/arch/powerpc/configs/ppc44x_defconfig
+++ b/arch/powerpc/configs/ppc44x_defconfig
@@ -22,7 +22,6 @@ CONFIG_GLACIER=y
CONFIG_REDWOOD=y
CONFIG_EIGER=y
CONFIG_YOSEMITE=y
-CONFIG_XILINX_VIRTEX440_GENERIC_BOARD=y
CONFIG_PPC4xx_GPIO=y
CONFIG_MATH_EMULATION=y
CONFIG_NET=y
@@ -46,7 +45,6 @@ CONFIG_MTD_UBI=m
CONFIG_MTD_UBI_GLUEBI=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=35000
-CONFIG_XILINX_SYSACE=m
CONFIG_SCSI=m
CONFIG_BLK_DEV_SD=m
# CONFIG_SCSI_LOWLEVEL is not set
@@ -57,7 +55,6 @@ CONFIG_IBM_EMAC=y
CONFIG_SERIO=m
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_XILINX_XPS_PS2=m
# CONFIG_VT is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
@@ -65,18 +62,13 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_SERIAL_UARTLITE=y
-CONFIG_SERIAL_UARTLITE_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_XILINX_HWICAP=m
CONFIG_I2C=m
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_GPIO=m
CONFIG_I2C_IBM_IIC=m
-CONFIG_GPIO_XILINX=y
# CONFIG_HWMON is not set
CONFIG_FB=m
-CONFIG_FB_XILINX=m
CONFIG_USB=m
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=m
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 7e68cb222c7b..d6949a6c5b2b 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -1,7 +1,9 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_IKCONFIG=y
@@ -26,7 +28,6 @@ CONFIG_PPC64=y
CONFIG_NR_CPUS=2048
CONFIG_PPC_SPLPAR=y
CONFIG_DTL=y
-CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
CONFIG_IBMEBUS=y
CONFIG_PPC_SVM=y
@@ -50,8 +51,8 @@ CONFIG_PPC_TRANSACTIONAL_MEM=y
CONFIG_KEXEC=y
CONFIG_KEXEC_FILE=y
CONFIG_CRASH_DUMP=y
+CONFIG_FA_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
-CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
@@ -62,7 +63,6 @@ CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
@@ -110,7 +110,6 @@ CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
@@ -179,6 +178,7 @@ CONFIG_CHELSIO_T1=m
CONFIG_BE2NET=m
CONFIG_IBMVETH=m
CONFIG_EHEA=m
+CONFIG_IBMVNIC=m
CONFIG_E100=y
CONFIG_E1000=y
CONFIG_E1000E=y
@@ -215,7 +215,6 @@ CONFIG_HVC_RTAS=y
CONFIG_HVCS=m
CONFIG_VIRTIO_CONSOLE=m
CONFIG_IBM_BSR=m
-CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_AMD8111=y
CONFIG_I2C_PASEMI=y
@@ -269,7 +268,6 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_CXGB3=m
CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_INFINIBAND_IPOIB=m
@@ -282,6 +280,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
+CONFIG_LIBNVDIMM=y
CONFIG_RAS=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
@@ -344,13 +343,11 @@ CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
@@ -359,6 +356,7 @@ CONFIG_CRYPTO_DEV_NX=y
CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
CONFIG_CRYPTO_DEV_VMX=y
CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACK_USAGE=y
@@ -367,7 +365,9 @@ CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 0d746774c2bd..f97a2d31bbf7 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -14,7 +14,6 @@ CONFIG_CPUSETS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
@@ -60,7 +59,6 @@ CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
@@ -120,7 +118,6 @@ CONFIG_INPUT_MISC=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-CONFIG_RAW_DRIVER=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_AMD8111=y
CONFIG_FB=y
@@ -236,13 +233,11 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
diff --git a/arch/powerpc/configs/ppc64le.config b/arch/powerpc/configs/ppc64le.config
new file mode 100644
index 000000000000..14dca1062c1b
--- /dev/null
+++ b/arch/powerpc/configs/ppc64le.config
@@ -0,0 +1,2 @@
+CONFIG_PPC64=y
+CONFIG_CPU_LITTLE_ENDIAN=y
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 3e2f44f38ac5..d23deb94b36e 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -19,7 +19,6 @@ CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -53,11 +52,9 @@ CONFIG_MPC836x_MDS=y
CONFIG_MPC836x_RDK=y
CONFIG_MPC837x_MDS=y
CONFIG_MPC837x_RDB=y
-CONFIG_SBC834x=y
CONFIG_ASP834x=y
CONFIG_PPC_86xx=y
CONFIG_MPC8641_HPCN=y
-CONFIG_SBC8641D=y
CONFIG_MPC8610_HPCD=y
CONFIG_GEF_SBC610=y
CONFIG_CPU_FREQ=y
@@ -187,7 +184,6 @@ CONFIG_NETFILTER_XT_MATCH_STRING=m
CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
CONFIG_NETFILTER_XT_MATCH_TIME=m
CONFIG_NETFILTER_XT_MATCH_U32=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -203,7 +199,6 @@ CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -241,7 +236,6 @@ CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE_EBT_NFLOG=m
CONFIG_IP_DCCP=m
-CONFIG_NET_DCCPPROBE=m
CONFIG_TIPC=m
CONFIG_ATM=m
CONFIG_ATM_CLIP=m
@@ -249,9 +243,6 @@ CONFIG_ATM_LANE=m
CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
-CONFIG_IPX=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
@@ -297,26 +288,6 @@ CONFIG_NET_ACT_NAT=m
CONFIG_NET_ACT_PEDIT=m
CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m
-CONFIG_IRDA=m
-CONFIG_IRLAN=m
-CONFIG_IRNET=m
-CONFIG_IRCOMM=m
-CONFIG_IRDA_CACHE_LAST_LSAP=y
-CONFIG_IRDA_FAST_RR=y
-CONFIG_IRTTY_SIR=m
-CONFIG_KINGSUN_DONGLE=m
-CONFIG_KSDAZZLE_DONGLE=m
-CONFIG_KS959_DONGLE=m
-CONFIG_USB_IRDA=m
-CONFIG_SIGMATEL_FIR=m
-CONFIG_NSC_FIR=m
-CONFIG_WINBOND_FIR=m
-CONFIG_TOSHIBA_FIR=m
-CONFIG_SMC_IRCC_FIR=m
-CONFIG_ALI_FIR=m
-CONFIG_VLSI_FIR=m
-CONFIG_VIA_FIR=m
-CONFIG_MCS_FIR=m
CONFIG_BT=m
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
@@ -332,7 +303,6 @@ CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIDTL1=m
CONFIG_BT_HCIBT3C=m
CONFIG_BT_HCIBLUECARD=m
-CONFIG_BT_HCIBTUART=m
CONFIG_BT_HCIVHCI=m
CONFIG_CFG80211=m
CONFIG_MAC80211=m
@@ -351,7 +321,6 @@ CONFIG_PNP=y
CONFIG_ISAPNP=y
CONFIG_MAC_FLOPPY=m
CONFIG_BLK_DEV_LOOP=m
-CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=16384
@@ -366,9 +335,7 @@ CONFIG_EEPROM_93CX6=m
CONFIG_RAID_ATTRS=m
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
CONFIG_SCSI_ENCLOSURE=m
@@ -474,7 +441,6 @@ CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
CONFIG_TULIP_MMIO=y
-CONFIG_DE4X5=m
CONFIG_WINBOND_840=m
CONFIG_DM9102=m
CONFIG_ULI526X=m
@@ -607,7 +573,6 @@ CONFIG_JOYSTICK_XPAD_LEDS=y
CONFIG_INPUT_TABLET=y
CONFIG_TABLET_USB_ACECAD=m
CONFIG_TABLET_USB_AIPTEK=m
-CONFIG_TABLET_USB_GTCO=m
CONFIG_TABLET_USB_KBTAB=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PCSPKR=m
@@ -624,10 +589,6 @@ CONFIG_GAMEPORT_EMU10K1=m
CONFIG_GAMEPORT_FM801=m
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_ROCKETPORT=m
-CONFIG_CYCLADES=m
-CONFIG_SYNCLINK=m
-CONFIG_SYNCLINKMP=m
CONFIG_SYNCLINK_GT=m
CONFIG_NOZOMI=m
CONFIG_N_HDLC=m
@@ -654,7 +615,6 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_NVRAM=y
CONFIG_DTLK=m
-CONFIG_R3964=m
CONFIG_CARDMAN_4000=m
CONFIG_CARDMAN_4040=m
CONFIG_IPWIRELESS=m
@@ -664,7 +624,6 @@ CONFIG_I2C_MPC=m
CONFIG_I2C_PCA_PLATFORM=m
CONFIG_I2C_SIMTEC=m
CONFIG_I2C_PARPORT=m
-CONFIG_I2C_PARPORT_LIGHT=m
CONFIG_I2C_TINY_USB=m
CONFIG_I2C_PCA_ISA=m
CONFIG_I2C_STUB=m
@@ -677,7 +636,6 @@ CONFIG_W1_SLAVE_THERM=m
CONFIG_W1_SLAVE_SMEM=m
CONFIG_W1_SLAVE_DS2433=m
CONFIG_W1_SLAVE_DS2433_CRC=y
-CONFIG_W1_SLAVE_DS2760=m
CONFIG_APM_POWER=m
CONFIG_BATTERY_PMU=m
CONFIG_HWMON=m
@@ -773,7 +731,6 @@ CONFIG_FB_TRIDENT=m
CONFIG_FB_SM501=m
CONFIG_FB_IBM_GXT4500=y
CONFIG_LCD_PLATFORM=m
-CONFIG_VGACON_SOFT_SCROLLBACK=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
@@ -942,7 +899,6 @@ CONFIG_USB_SERIAL_SAFE_PADDED=y
CONFIG_USB_SERIAL_SIERRAWIRELESS=m
CONFIG_USB_SERIAL_TI=m
CONFIG_USB_SERIAL_CYBERJACK=m
-CONFIG_USB_SERIAL_XIRCOM=m
CONFIG_USB_SERIAL_OPTION=m
CONFIG_USB_SERIAL_OMNINET=m
CONFIG_USB_SERIAL_DEBUG=m
@@ -1061,20 +1017,10 @@ CONFIG_NFSD=m
CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_CIFS=m
-CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_CIFS_DFS_UPCALL=y
-CONFIG_NCP_FS=m
-CONFIG_NCPFS_PACKET_SIGNING=y
-CONFIG_NCPFS_IOCTL_LOCKING=y
-CONFIG_NCPFS_STRONG=y
-CONFIG_NCPFS_NFS_NS=y
-CONFIG_NCPFS_OS2_NS=y
-CONFIG_NCPFS_SMALLDOS=y
-CONFIG_NCPFS_NLS=y
-CONFIG_NCPFS_EXTRAS=y
CONFIG_CODA_FS=m
CONFIG_9P_FS=m
CONFIG_NLS_DEFAULT="utf8"
@@ -1115,10 +1061,8 @@ CONFIG_NLS_ISO8859_14=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_KOI8_R=m
CONFIG_NLS_KOI8_U=m
-CONFIG_DEBUG_INFO=y
-CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_OBJECTS=y
@@ -1161,13 +1105,9 @@ CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
@@ -1175,7 +1115,6 @@ CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig
index 9d8a76857c6f..9d63e2e65211 100644
--- a/arch/powerpc/configs/pq2fads_defconfig
+++ b/arch/powerpc/configs/pq2fads_defconfig
@@ -68,7 +68,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 4db51719342a..0a1b42c4f26a 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -13,7 +13,6 @@ CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_PPC_POWERNV is not set
@@ -31,11 +30,10 @@ CONFIG_PS3_LPM=m
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=y
CONFIG_KEXEC=y
+CONFIG_PPC_4K_PAGES=y
# CONFIG_SPARSEMEM_VMEMMAP is not set
# CONFIG_COMPACTION is not set
CONFIG_SCHED_SMT=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE=""
CONFIG_PM=y
CONFIG_PM_DEBUG=y
# CONFIG_SECCOMP is not set
@@ -60,6 +58,8 @@ CONFIG_CFG80211=m
CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
# CONFIG_MAC80211_RC_MINSTREL is not set
+CONFIG_UEVENT_HELPER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65535
@@ -153,7 +153,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=m
CONFIG_CRC_T10DIF=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_STACKOVERFLOW=y
@@ -165,6 +165,5 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_LZO=m
CONFIG_PRINTK_TIME=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 6b68109e248f..7497e17ea657 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -3,8 +3,10 @@ CONFIG_NR_CPUS=2048
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
+# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_VIRT_CPU_ACCOUNTING_GEN=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
CONFIG_TASK_XACCT=y
@@ -16,7 +18,6 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=13
CONFIG_NUMA_BALANCING=y
CONFIG_CGROUPS=y
CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
@@ -29,7 +30,6 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_BPF_SYSCALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
@@ -39,9 +39,10 @@ CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PPC_SPLPAR=y
CONFIG_DTL=y
-CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
CONFIG_IBMEBUS=y
+CONFIG_LIBNVDIMM=m
+CONFIG_PAPR_SCM=m
CONFIG_PPC_SVM=y
# CONFIG_PPC_PMAC is not set
CONFIG_RTAS_FLASH=m
@@ -56,8 +57,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_PPC_64K_PAGES=y
-CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
@@ -94,10 +93,10 @@ CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_VIRTIO_BLK=m
+CONFIG_BLK_DEV_NVME=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
CONFIG_BLK_DEV_SR=y
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_FC_ATTRS=y
@@ -161,6 +160,7 @@ CONFIG_BE2NET=m
CONFIG_S2IO=m
CONFIG_IBMVETH=y
CONFIG_EHEA=y
+CONFIG_IBMVNIC=y
CONFIG_E100=y
CONFIG_E1000=y
CONFIG_E1000E=y
@@ -189,8 +189,6 @@ CONFIG_HVC_RTAS=y
CONFIG_HVCS=m
CONFIG_VIRTIO_CONSOLE=m
CONFIG_IBM_BSR=m
-CONFIG_RAW_DRIVER=y
-CONFIG_MAX_RAW_DEVS=1024
CONFIG_I2C_CHARDEV=y
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
@@ -225,7 +223,6 @@ CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_MAD=m
CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_INFINIBAND_MTHCA=m
-CONFIG_INFINIBAND_CXGB3=m
CONFIG_INFINIBAND_CXGB4=m
CONFIG_MLX4_INFINIBAND=m
CONFIG_INFINIBAND_IPOIB=m
@@ -290,7 +287,9 @@ CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_FUNCTION_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_SCHED_TRACER=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_CODE_PATCHING_SELFTEST=y
CONFIG_FTR_FIXUP_SELFTEST=y
@@ -304,13 +303,11 @@ CONFIG_CRYPTO_MD5_PPC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA1_PPC=m
CONFIG_CRYPTO_SHA256=y
-CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
@@ -323,3 +320,4 @@ CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
diff --git a/arch/powerpc/configs/security.config b/arch/powerpc/configs/security.config
new file mode 100644
index 000000000000..1c91a35c6a73
--- /dev/null
+++ b/arch/powerpc/configs/security.config
@@ -0,0 +1,15 @@
+# This is the equivalent of booting with lockdown=integrity
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_LOCKDOWN_LSM=y
+CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
+CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY=y
+
+# These are some general, reasonably inexpensive hardening options
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+
+# UBSAN bounds checking is very cheap and good for hardening
+CONFIG_UBSAN=y
+# CONFIG_UBSAN_MISC is not set \ No newline at end of file
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 1b6bdad36b13..e0964210f259 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -43,9 +43,7 @@ CONFIG_KEXEC_FILE=y
CONFIG_PRESERVE_FA_DUMP=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_NUMA=y
-CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
-CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=tty0 console=hvc0 ipr.fast_reboot=1 quiet"
# CONFIG_SECCOMP is not set
# CONFIG_PPC_MEM_KEYS is not set
@@ -84,7 +82,6 @@ CONFIG_EEPROM_AT24=m
# CONFIG_OCXL is not set
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
-CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=m
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_SCAN_ASYNC=y
@@ -136,7 +133,6 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_VENDOR_AURORA is not set
CONFIG_TIGON3=m
CONFIG_BNX2X=m
# CONFIG_NET_VENDOR_BROCADE is not set
@@ -277,8 +273,6 @@ CONFIG_NLS_UTF8=y
CONFIG_ENCRYPTED_KEYS=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
-# CONFIG_HARDENED_USERCOPY_FALLBACK is not set
-CONFIG_HARDENED_USERCOPY_PAGESPAN=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_LOCKDOWN_LSM=y
CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index b964084e4056..7a978d396991 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -12,7 +12,6 @@ CONFIG_EMBEDDED6xx=y
CONFIG_STORCENTER=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=y
-CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=ttyS0,115200"
# CONFIG_SECCOMP is not set
CONFIG_NET=y
@@ -77,4 +76,3 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_CRC_T10DIF=y
-# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig
index eda8bfb2d0a3..083c2e57520a 100644
--- a/arch/powerpc/configs/tqm8xx_defconfig
+++ b/arch/powerpc/configs/tqm8xx_defconfig
@@ -15,7 +15,6 @@ CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_TQM8XX=y
-CONFIG_8xx_COPYBACK=y
# CONFIG_8xx_CPU15 is not set
CONFIG_GEN_RTC=y
CONFIG_HZ_100=y
@@ -56,6 +55,6 @@ CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CRC32_SLICEBY4=y
-CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DETECT_HUNG_TASK=y
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 379c171f3ddd..0ab78c51455d 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -98,7 +98,8 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_PANIC=y
CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_GENERIC=y
+CONFIG_RTC_DRV_GAMECUBE=y
+CONFIG_NVMEM_NINTENDO_OTP=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_FUSE_FS=m
diff --git a/arch/powerpc/crypto/Kconfig b/arch/powerpc/crypto/Kconfig
new file mode 100644
index 000000000000..c1b964447401
--- /dev/null
+++ b/arch/powerpc/crypto/Kconfig
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menu "Accelerated Cryptographic Algorithms for CPU (powerpc)"
+
+config CRYPTO_CRC32C_VPMSUM
+ tristate "CRC32c"
+ depends on PPC64 && ALTIVEC
+ select CRYPTO_HASH
+ select CRC32
+ help
+ CRC32c CRC algorithm with the iSCSI polynomial (RFC 3385 and RFC 3720)
+
+ Architecture: powerpc64 using
+ - AltiVec extensions
+
+ Enable on POWER8 and newer processors for improved performance.
+
+config CRYPTO_CRCT10DIF_VPMSUM
+ tristate "CRC32T10DIF"
+ depends on PPC64 && ALTIVEC && CRC_T10DIF
+ select CRYPTO_HASH
+ help
+ CRC16 CRC algorithm used for the T10 (SCSI) Data Integrity Field (DIF)
+
+ Architecture: powerpc64 using
+ - AltiVec extensions
+
+ Enable on POWER8 and newer processors for improved performance.
+
+config CRYPTO_VPMSUM_TESTER
+ tristate "CRC32c and CRC32T10DIF hardware acceleration tester"
+ depends on CRYPTO_CRCT10DIF_VPMSUM && CRYPTO_CRC32C_VPMSUM
+ help
+ Stress test for CRC32c and CRCT10DIF algorithms implemented with
+ powerpc64 AltiVec extensions (POWER8 vpmsum instructions).
+ Unless you are testing these algorithms, you don't need this.
+
+config CRYPTO_MD5_PPC
+ tristate "Digests: MD5"
+ depends on PPC
+ select CRYPTO_HASH
+ help
+ MD5 message digest algorithm (RFC1321)
+
+ Architecture: powerpc
+
+config CRYPTO_SHA1_PPC
+ tristate "Hash functions: SHA-1"
+ depends on PPC
+ help
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: powerpc
+
+config CRYPTO_SHA1_PPC_SPE
+ tristate "Hash functions: SHA-1 (SPE)"
+ depends on PPC && SPE
+ help
+ SHA-1 secure hash algorithm (FIPS 180)
+
+ Architecture: powerpc using
+ - SPE (Signal Processing Engine) extensions
+
+config CRYPTO_SHA256_PPC_SPE
+ tristate "Hash functions: SHA-224 and SHA-256 (SPE)"
+ depends on PPC && SPE
+ select CRYPTO_SHA256
+ select CRYPTO_HASH
+ help
+ SHA-224 and SHA-256 secure hash algorithms (FIPS 180)
+
+ Architecture: powerpc using
+ - SPE (Signal Processing Engine) extensions
+
+config CRYPTO_AES_PPC_SPE
+ tristate "Ciphers: AES, modes: ECB/CBC/CTR/XTS (SPE)"
+ depends on PPC && SPE
+ select CRYPTO_SKCIPHER
+ help
+ Block ciphers: AES cipher algorithms (FIPS-197)
+ Length-preserving ciphers: AES with ECB, CBC, CTR, and XTS modes
+
+ Architecture: powerpc using:
+ - SPE (Signal Processing Engine) extensions
+
+ SPE is available for:
+ - Processor Type: Freescale 8500
+ - CPU selection: e500 (8540)
+
+ This module should only be used for low power (router) devices
+ without hardware AES acceleration (e.g. caam crypto). It reduces the
+ size of the AES tables from 16KB to 8KB + 256 bytes and mitigates
+ timining attacks. Nevertheless it might be not as secure as other
+ architecture specific assembler implementations that work on 1KB
+ tables or 256 bytes S-boxes.
+
+endmenu
diff --git a/arch/powerpc/crypto/aes-spe-glue.c b/arch/powerpc/crypto/aes-spe-glue.c
index c2b23b69d7b1..efab78a3a8f6 100644
--- a/arch/powerpc/crypto/aes-spe-glue.c
+++ b/arch/powerpc/crypto/aes-spe-glue.c
@@ -28,7 +28,7 @@
* instructions per clock cycle using one 32/64 bit unit (SU1) and one 32
* bit unit (SU2). One of these can be a memory access that is executed via
* a single load and store unit (LSU). XTS-AES-256 takes ~780 operations per
- * 16 byte block block or 25 cycles per byte. Thus 768 bytes of input data
+ * 16 byte block or 25 cycles per byte. Thus 768 bytes of input data
* will need an estimated maximum of 20,000 cycles. Headroom for cache misses
* included. Even with the low end model clocked at 667 MHz this equals to a
* critical time window of less than 30us. The value has been chosen to
@@ -404,7 +404,7 @@ static int ppc_xts_decrypt(struct skcipher_request *req)
/*
* Algorithm definitions. Disabling alignment (cra_alignmask=0) was chosen
- * because the e500 platform can handle unaligned reads/writes very efficently.
+ * because the e500 platform can handle unaligned reads/writes very efficiently.
* This improves IPsec thoughput by another few percent. Additionally we assume
* that AES context is always aligned to at least 8 bytes because it is created
* with kmalloc() in the crypto infrastructure
diff --git a/arch/powerpc/crypto/crc-vpmsum_test.c b/arch/powerpc/crypto/crc-vpmsum_test.c
index dce86e75f1a8..273c527868db 100644
--- a/arch/powerpc/crypto/crc-vpmsum_test.c
+++ b/arch/powerpc/crypto/crc-vpmsum_test.c
@@ -9,6 +9,7 @@
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/random.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/cpufeature.h>
@@ -22,10 +23,11 @@ static unsigned long iterations = 10000;
static int __init crc_test_init(void)
{
u16 crc16 = 0, verify16 = 0;
- u32 crc32 = 0, verify32 = 0;
__le32 verify32le = 0;
unsigned char *data;
+ u32 verify32 = 0;
unsigned long i;
+ __le32 crc32;
int ret;
struct crypto_shash *crct10dif_tfm;
@@ -80,7 +82,7 @@ static int __init crc_test_init(void)
if (len <= offset)
continue;
- prandom_bytes(data, len);
+ get_random_bytes(data, len);
len -= offset;
crypto_shash_update(crct10dif_shash, data+offset, len);
@@ -98,7 +100,7 @@ static int __init crc_test_init(void)
crypto_shash_final(crc32c_shash, (u8 *)(&crc32));
verify32 = le32_to_cpu(verify32le);
verify32le = ~cpu_to_le32(__crc32c_le(~verify32, data+offset, len));
- if (crc32 != (u32)verify32le) {
+ if (crc32 != verify32le) {
pr_err("FAILURE in CRC32: got 0x%08x expected 0x%08x (len %lu)\n",
crc32, verify32, len);
break;
diff --git a/arch/powerpc/crypto/crc32-vpmsum_core.S b/arch/powerpc/crypto/crc32-vpmsum_core.S
index c3524eba4d0d..a16a717c809c 100644
--- a/arch/powerpc/crypto/crc32-vpmsum_core.S
+++ b/arch/powerpc/crypto/crc32-vpmsum_core.S
@@ -19,7 +19,7 @@
* We then use fixed point Barrett reduction to compute a mod n over GF(2)
* for n = CRC using POWER8 instructions. We use x = 32.
*
- * http://en.wikipedia.org/wiki/Barrett_reduction
+ * https://en.wikipedia.org/wiki/Barrett_reduction
*
* Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
*/
diff --git a/arch/powerpc/crypto/md5-asm.S b/arch/powerpc/crypto/md5-asm.S
index 948d100a2934..fa6bc440cf4a 100644
--- a/arch/powerpc/crypto/md5-asm.S
+++ b/arch/powerpc/crypto/md5-asm.S
@@ -38,15 +38,11 @@
#define INITIALIZE \
PPC_STLU r1,-INT_FRAME_SIZE(r1); \
- SAVE_8GPRS(14, r1); /* push registers onto stack */ \
- SAVE_4GPRS(22, r1); \
- SAVE_GPR(26, r1)
+ SAVE_GPRS(14, 26, r1) /* push registers onto stack */
#define FINALIZE \
- REST_8GPRS(14, r1); /* pop registers from stack */ \
- REST_4GPRS(22, r1); \
- REST_GPR(26, r1); \
- addi r1,r1,INT_FRAME_SIZE;
+ REST_GPRS(14, 26, r1); /* pop registers from stack */ \
+ addi r1,r1,INT_FRAME_SIZE
#ifdef __BIG_ENDIAN__
#define LOAD_DATA(reg, off) \
diff --git a/arch/powerpc/crypto/md5-glue.c b/arch/powerpc/crypto/md5-glue.c
index 7d1bf2fcf668..c24f605033bd 100644
--- a/arch/powerpc/crypto/md5-glue.c
+++ b/arch/powerpc/crypto/md5-glue.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/md5.h>
#include <asm/byteorder.h>
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
index 23e248beff71..f0d5ed557ab1 100644
--- a/arch/powerpc/crypto/sha1-powerpc-asm.S
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -125,8 +125,7 @@
_GLOBAL(powerpc_sha_transform)
PPC_STLU r1,-INT_FRAME_SIZE(r1)
- SAVE_8GPRS(14, r1)
- SAVE_10GPRS(22, r1)
+ SAVE_GPRS(14, 31, r1)
/* Load up A - E */
lwz RA(0),0(r3) /* A */
@@ -184,7 +183,6 @@ _GLOBAL(powerpc_sha_transform)
stw RD(0),12(r3)
stw RE(0),16(r3)
- REST_8GPRS(14, r1)
- REST_10GPRS(22, r1)
+ REST_GPRS(14, 31, r1)
addi r1,r1,INT_FRAME_SIZE
blr
diff --git a/arch/powerpc/crypto/sha1-spe-glue.c b/arch/powerpc/crypto/sha1-spe-glue.c
index 6379990bd604..9170892a8557 100644
--- a/arch/powerpc/crypto/sha1-spe-glue.c
+++ b/arch/powerpc/crypto/sha1-spe-glue.c
@@ -11,9 +11,9 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <crypto/sha1_base.h>
#include <asm/byteorder.h>
#include <asm/switch_to.h>
#include <linux/hardirq.h>
@@ -56,20 +56,6 @@ static inline void ppc_sha1_clear_context(struct sha1_state *sctx)
do { *ptr++ = 0; } while (--count);
}
-static int ppc_spe_sha1_init(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA1_H0;
- sctx->state[1] = SHA1_H1;
- sctx->state[2] = SHA1_H2;
- sctx->state[3] = SHA1_H3;
- sctx->state[4] = SHA1_H4;
- sctx->count = 0;
-
- return 0;
-}
-
static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
@@ -108,7 +94,7 @@ static int ppc_spe_sha1_update(struct shash_desc *desc, const u8 *data,
src += bytes;
len -= bytes;
- };
+ }
memcpy((char *)sctx->buffer, src, len);
return 0;
@@ -169,7 +155,7 @@ static int ppc_spe_sha1_import(struct shash_desc *desc, const void *in)
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = ppc_spe_sha1_init,
+ .init = sha1_base_init,
.update = ppc_spe_sha1_update,
.final = ppc_spe_sha1_final,
.export = ppc_spe_sha1_export,
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index 7b43fc352089..f283bbd3f121 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -16,26 +16,15 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha1.h>
+#include <crypto/sha1_base.h>
#include <asm/byteorder.h>
-extern void powerpc_sha_transform(u32 *state, const u8 *src, u32 *temp);
+void powerpc_sha_transform(u32 *state, const u8 *src);
-static int sha1_init(struct shash_desc *desc)
-{
- struct sha1_state *sctx = shash_desc_ctx(desc);
-
- *sctx = (struct sha1_state){
- .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
- };
-
- return 0;
-}
-
-static int sha1_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+static int powerpc_sha1_update(struct shash_desc *desc, const u8 *data,
+ unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial, done;
@@ -47,7 +36,6 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
src = data;
if ((partial + len) > 63) {
- u32 temp[SHA_WORKSPACE_WORDS];
if (partial) {
done = -partial;
@@ -56,12 +44,11 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
}
do {
- powerpc_sha_transform(sctx->state, src, temp);
+ powerpc_sha_transform(sctx->state, src);
done += 64;
src = data + done;
} while (done + 63 < len);
- memzero_explicit(temp, sizeof(temp));
partial = 0;
}
memcpy(sctx->buffer + partial, src, len - done);
@@ -71,7 +58,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
/* Add padding and return the message digest. */
-static int sha1_final(struct shash_desc *desc, u8 *out)
+static int powerpc_sha1_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
__be32 *dst = (__be32 *)out;
@@ -84,10 +71,10 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
/* Pad out to 56 mod 64 */
index = sctx->count & 0x3f;
padlen = (index < 56) ? (56 - index) : ((64+56) - index);
- sha1_update(desc, padding, padlen);
+ powerpc_sha1_update(desc, padding, padlen);
/* Append length */
- sha1_update(desc, (const u8 *)&bits, sizeof(bits));
+ powerpc_sha1_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 5; i++)
@@ -99,7 +86,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
return 0;
}
-static int sha1_export(struct shash_desc *desc, void *out)
+static int powerpc_sha1_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
@@ -107,7 +94,7 @@ static int sha1_export(struct shash_desc *desc, void *out)
return 0;
}
-static int sha1_import(struct shash_desc *desc, const void *in)
+static int powerpc_sha1_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
@@ -117,11 +104,11 @@ static int sha1_import(struct shash_desc *desc, const void *in)
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
- .init = sha1_init,
- .update = sha1_update,
- .final = sha1_final,
- .export = sha1_export,
- .import = sha1_import,
+ .init = sha1_base_init,
+ .update = powerpc_sha1_update,
+ .final = powerpc_sha1_final,
+ .export = powerpc_sha1_export,
+ .import = powerpc_sha1_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
diff --git a/arch/powerpc/crypto/sha256-spe-glue.c b/arch/powerpc/crypto/sha256-spe-glue.c
index 84939e563b81..2997d13236e0 100644
--- a/arch/powerpc/crypto/sha256-spe-glue.c
+++ b/arch/powerpc/crypto/sha256-spe-glue.c
@@ -12,9 +12,9 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
-#include <linux/cryptohash.h>
#include <linux/types.h>
-#include <crypto/sha.h>
+#include <crypto/sha2.h>
+#include <crypto/sha256_base.h>
#include <asm/byteorder.h>
#include <asm/switch_to.h>
#include <linux/hardirq.h>
@@ -57,40 +57,6 @@ static inline void ppc_sha256_clear_context(struct sha256_state *sctx)
do { *ptr++ = 0; } while (--count);
}
-static int ppc_spe_sha256_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA256_H0;
- sctx->state[1] = SHA256_H1;
- sctx->state[2] = SHA256_H2;
- sctx->state[3] = SHA256_H3;
- sctx->state[4] = SHA256_H4;
- sctx->state[5] = SHA256_H5;
- sctx->state[6] = SHA256_H6;
- sctx->state[7] = SHA256_H7;
- sctx->count = 0;
-
- return 0;
-}
-
-static int ppc_spe_sha224_init(struct shash_desc *desc)
-{
- struct sha256_state *sctx = shash_desc_ctx(desc);
-
- sctx->state[0] = SHA224_H0;
- sctx->state[1] = SHA224_H1;
- sctx->state[2] = SHA224_H2;
- sctx->state[3] = SHA224_H3;
- sctx->state[4] = SHA224_H4;
- sctx->state[5] = SHA224_H5;
- sctx->state[6] = SHA224_H6;
- sctx->state[7] = SHA224_H7;
- sctx->count = 0;
-
- return 0;
-}
-
static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
@@ -130,7 +96,7 @@ static int ppc_spe_sha256_update(struct shash_desc *desc, const u8 *data,
src += bytes;
len -= bytes;
- };
+ }
memcpy((char *)sctx->buf, src, len);
return 0;
@@ -178,7 +144,7 @@ static int ppc_spe_sha256_final(struct shash_desc *desc, u8 *out)
static int ppc_spe_sha224_final(struct shash_desc *desc, u8 *out)
{
- u32 D[SHA256_DIGEST_SIZE >> 2];
+ __be32 D[SHA256_DIGEST_SIZE >> 2];
__be32 *dst = (__be32 *)out;
ppc_spe_sha256_final(desc, (u8 *)D);
@@ -215,7 +181,7 @@ static int ppc_spe_sha256_import(struct shash_desc *desc, const void *in)
static struct shash_alg algs[2] = { {
.digestsize = SHA256_DIGEST_SIZE,
- .init = ppc_spe_sha256_init,
+ .init = sha256_base_init,
.update = ppc_spe_sha256_update,
.final = ppc_spe_sha256_final,
.export = ppc_spe_sha256_export,
@@ -231,7 +197,7 @@ static struct shash_alg algs[2] = { {
}
}, {
.digestsize = SHA224_DIGEST_SIZE,
- .init = ppc_spe_sha224_init,
+ .init = sha224_base_init,
.update = ppc_spe_sha256_update,
.final = ppc_spe_sha224_final,
.export = ppc_spe_sha256_export,
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index d0a23d0db863..bcf95ce0964f 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,14 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table_32.h
generated-y += syscall_table_64.h
-generated-y += syscall_table_c32.h
generated-y += syscall_table_spu.h
-generic-y += div64.h
-generic-y += dma-mapping.h
generic-y += export.h
-generic-y += irq_regs.h
-generic-y += local64.h
+generic-y += kvm_types.h
generic-y += mcs_spinlock.h
-generic-y += preempt.h
+generic-y += qrwlock.h
generic-y += vtime.h
generic-y += early_ioremap.h
diff --git a/arch/powerpc/include/asm/agp.h b/arch/powerpc/include/asm/agp.h
index b29b1186f819..6b6485c988dd 100644
--- a/arch/powerpc/include/asm/agp.h
+++ b/arch/powerpc/include/asm/agp.h
@@ -5,8 +5,8 @@
#include <asm/io.h>
-#define map_page_into_agp(page)
-#define unmap_page_from_agp(page)
+#define map_page_into_agp(page) do {} while (0)
+#define unmap_page_from_agp(page) do {} while (0)
#define flush_agp_cache() mb()
/* GATT allocation. Returns/accepts GATT kernel virtual address. */
diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
index 9a53e29680f4..51b093f67528 100644
--- a/arch/powerpc/include/asm/archrandom.h
+++ b/arch/powerpc/include/asm/archrandom.h
@@ -2,48 +2,15 @@
#ifndef _ASM_POWERPC_ARCHRANDOM_H
#define _ASM_POWERPC_ARCHRANDOM_H
-#ifdef CONFIG_ARCH_RANDOM
-
-#include <asm/machdep.h>
-
-static inline bool __must_check arch_get_random_long(unsigned long *v)
+static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
{
- return false;
+ return 0;
}
-static inline bool __must_check arch_get_random_int(unsigned int *v)
-{
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
-{
- if (ppc_md.get_random_seed)
- return ppc_md.get_random_seed(v);
-
- return false;
-}
-
-static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
-{
- unsigned long val;
- bool rc;
-
- rc = arch_get_random_seed_long(&val);
- if (rc)
- *v = val;
-
- return rc;
-}
-#endif /* CONFIG_ARCH_RANDOM */
+size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs);
#ifdef CONFIG_PPC_POWERNV
-int powernv_hwrng_present(void);
-int powernv_get_random_long(unsigned long *v);
-int powernv_get_random_real_mode(unsigned long *v);
-#else
-static inline int powernv_hwrng_present(void) { return 0; }
-static inline int powernv_get_random_real_mode(unsigned long *v) { return 0; }
+int pnv_get_random_long(unsigned long *v);
#endif
#endif /* _ASM_POWERPC_ARCHRANDOM_H */
diff --git a/arch/powerpc/include/asm/asm-405.h b/arch/powerpc/include/asm/asm-405.h
deleted file mode 100644
index 7270d3ae7c8e..000000000000
--- a/arch/powerpc/include/asm/asm-405.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#ifndef _ASM_POWERPC_ASM_405_H
-#define _ASM_POWERPC_ASM_405_H
-
-#include <asm/asm-const.h>
-
-#ifdef __KERNEL__
-#ifdef CONFIG_IBM405_ERR77
-/* Erratum #77 on the 405 means we need a sync or dcbt before every
- * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
- */
-#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
-#define PPC405_ERR77_SYNC stringify_in_c(sync;)
-#else
-#define PPC405_ERR77(ra,rb)
-#define PPC405_ERR77_SYNC
-#endif
-#endif
-
-#endif /* _ASM_POWERPC_ASM_405_H */
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 19b70c5b5f18..2bc53c646ccd 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -17,10 +17,11 @@
#define PPC_LONG stringify_in_c(.8byte)
#define PPC_LONG_ALIGN stringify_in_c(.balign 8)
#define PPC_TLNEI stringify_in_c(tdnei)
-#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
+#define PPC_LLARX stringify_in_c(ldarx)
#define PPC_STLCX stringify_in_c(stdcx.)
#define PPC_CNTLZL stringify_in_c(cntlzd)
#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS)
+#define PPC_SRL stringify_in_c(srd)
#define PPC_LR_STKOFF 16
#define PPC_MIN_STKFRM 112
@@ -50,10 +51,11 @@
#define PPC_LONG stringify_in_c(.long)
#define PPC_LONG_ALIGN stringify_in_c(.balign 4)
#define PPC_TLNEI stringify_in_c(twnei)
-#define PPC_LLARX(t, a, b, eh) PPC_LWARX(t, a, b, eh)
+#define PPC_LLARX stringify_in_c(lwarx)
#define PPC_STLCX stringify_in_c(stwcx.)
#define PPC_CNTLZL stringify_in_c(cntlzw)
#define PPC_MTOCRF stringify_in_c(mtcrf)
+#define PPC_SRL stringify_in_c(srw)
#define PPC_LR_STKOFF 4
#define PPC_MIN_STKFRM 16
diff --git a/arch/powerpc/include/asm/asm-const.h b/arch/powerpc/include/asm/asm-const.h
index 082c1538c562..bfb3c3534877 100644
--- a/arch/powerpc/include/asm/asm-const.h
+++ b/arch/powerpc/include/asm/asm-const.h
@@ -11,4 +11,5 @@
# define __ASM_CONST(x) x##UL
# define ASM_CONST(x) __ASM_CONST(x)
#endif
+
#endif /* _ASM_POWERPC_ASM_CONST_H */
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 983c0084fb3f..274bce76f5da 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -2,8 +2,9 @@
#ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
#define _ASM_POWERPC_ASM_PROTOTYPES_H
/*
- * This file is for prototypes of C functions that are only called
- * from asm, and any associated variables.
+ * This file is for C prototypes of asm symbols that are EXPORTed.
+ * It allows the modversions logic to see their prototype and
+ * generate proper CRCs for them.
*
* Copyright 2016, Daniel Axtens, IBM Corporation.
*/
@@ -19,22 +20,6 @@
#include <uapi/asm/ucontext.h>
-/* SMP */
-extern struct task_struct *current_set[NR_CPUS];
-extern struct task_struct *secondary_current;
-void start_secondary(void *unused);
-
-/* kexec */
-struct paca_struct;
-struct kimage;
-extern struct paca_struct kexec_paca;
-void kexec_copy_flush(struct kimage *image);
-
-/* pseries hcall tracing */
-extern struct static_key hcall_tracepoint_key;
-void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
-void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
-
/* Ultravisor */
#if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
long ucall_norets(unsigned long opcode, ...);
@@ -50,82 +35,8 @@ int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
int64_t a4, int64_t a5, int64_t a6, int64_t a7,
int64_t opcode, uint64_t msr);
-/* VMX copying */
-int enter_vmx_usercopy(void);
-int exit_vmx_usercopy(void);
-int enter_vmx_ops(void);
-void *exit_vmx_ops(void *dest);
-
-/* Traps */
-long machine_check_early(struct pt_regs *regs);
-long hmi_exception_realmode(struct pt_regs *regs);
-void SMIException(struct pt_regs *regs);
-void handle_hmi_exception(struct pt_regs *regs);
-void instruction_breakpoint_exception(struct pt_regs *regs);
-void RunModeException(struct pt_regs *regs);
-void single_step_exception(struct pt_regs *regs);
-void program_check_exception(struct pt_regs *regs);
-void alignment_exception(struct pt_regs *regs);
-void StackOverflow(struct pt_regs *regs);
-void kernel_fp_unavailable_exception(struct pt_regs *regs);
-void altivec_unavailable_exception(struct pt_regs *regs);
-void vsx_unavailable_exception(struct pt_regs *regs);
-void fp_unavailable_tm(struct pt_regs *regs);
-void altivec_unavailable_tm(struct pt_regs *regs);
-void vsx_unavailable_tm(struct pt_regs *regs);
-void facility_unavailable_exception(struct pt_regs *regs);
-void TAUException(struct pt_regs *regs);
-void altivec_assist_exception(struct pt_regs *regs);
-void unrecoverable_exception(struct pt_regs *regs);
-void kernel_bad_stack(struct pt_regs *regs);
-void system_reset_exception(struct pt_regs *regs);
-void machine_check_exception(struct pt_regs *regs);
-void emulation_assist_interrupt(struct pt_regs *regs);
-long do_slb_fault(struct pt_regs *regs, unsigned long ea);
-void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err);
-
-/* signals, syscalls and interrupts */
-long sys_swapcontext(struct ucontext __user *old_ctx,
- struct ucontext __user *new_ctx,
- long ctx_size);
-#ifdef CONFIG_PPC32
-long sys_debug_setcontext(struct ucontext __user *ctx,
- int ndbg, struct sig_dbg_op __user *dbg);
-int
-ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
- struct __kernel_old_timeval __user *tvp);
-unsigned long __init early_init(unsigned long dt_ptr);
-void __init machine_init(u64 dt_ptr);
-#endif
-
-long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
- u32 len_high, u32 len_low);
-long sys_switch_endian(void);
-notrace unsigned int __check_irq_replay(void);
-void notrace restore_interrupts(void);
-
-/* ptrace */
-long do_syscall_trace_enter(struct pt_regs *regs);
-void do_syscall_trace_leave(struct pt_regs *regs);
-
-/* process */
-void restore_math(struct pt_regs *regs);
-void restore_tm_state(struct pt_regs *regs);
-
-/* prom_init (OpenFirmware) */
-unsigned long __init prom_init(unsigned long r3, unsigned long r4,
- unsigned long pp,
- unsigned long r6, unsigned long r7,
- unsigned long kbase);
-
-/* setup */
-void __init early_setup(unsigned long dt_ptr);
-void early_setup_secondary(void);
-
-/* time */
-void accumulate_stolen_time(void);
-
/* misc runtime */
+void enable_machine_check(void);
extern u64 __bswapdi2(u64);
extern s64 __lshrdi3(s64, int);
extern s64 __ashldi3(s64, int);
@@ -135,11 +46,6 @@ extern int __ucmpdi2(u64, u64);
/* tracing */
void _mcount(void);
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
- unsigned long sp);
-
-void pnv_power9_force_smt4_catch(void);
-void pnv_power9_force_smt4_release(void);
/* Transaction memory related */
void tm_enable(void);
@@ -150,16 +56,6 @@ struct kvm_vcpu;
void _kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
-/* Patch sites */
-extern s32 patch__call_flush_count_cache;
-extern s32 patch__flush_count_cache_return;
-extern s32 patch__flush_link_stack_return;
-extern s32 patch__call_kvm_flush_link_stack;
-extern s32 patch__memset_nocache, patch__memcpy_nocache;
-
-extern long flush_count_cache;
-extern long kvm_flush_link_stack;
-
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
@@ -170,12 +66,7 @@ static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
bool preserve_nv) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-void kvmhv_save_host_pmu(void);
-void kvmhv_load_host_pmu(void);
-void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
-void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
-
-int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
+void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 31c231ea56b7..486ab7889121 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -10,9 +10,7 @@
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
-#include <asm/asm-405.h>
-
-#define ATOMIC_INIT(i) { (i) }
+#include <asm/asm-const.h>
/*
* Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
@@ -25,182 +23,142 @@
#define __atomic_release_fence() \
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
-static __inline__ int atomic_read(const atomic_t *v)
+static __inline__ int arch_atomic_read(const atomic_t *v)
{
int t;
- __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+ __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
return t;
}
-static __inline__ void atomic_set(atomic_t *v, int i)
+static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
- __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
-#define ATOMIC_OP(op, asm_op) \
-static __inline__ void atomic_##op(int a, atomic_t *v) \
+#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
+static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_" #op "\n" \
- #asm_op " %0,%2,%0\n" \
- PPC405_ERR77(0,%3) \
+ #asm_op "%I2" suffix " %0,%0,%2\n" \
" stwcx. %0,0,%3 \n" \
" bne- 1b\n" \
: "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
+ : "r"#sign (a), "r" (&v->counter) \
+ : "cc", ##__VA_ARGS__); \
} \
-#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
-static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
+#define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
+static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
- #asm_op " %0,%2,%0\n" \
- PPC405_ERR77(0, %3) \
+ #asm_op "%I2" suffix " %0,%0,%2\n" \
" stwcx. %0,0,%3\n" \
" bne- 1b\n" \
: "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
+ : "r"#sign (a), "r" (&v->counter) \
+ : "cc", ##__VA_ARGS__); \
\
return t; \
}
-#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
-static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
+#define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
+static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
{ \
int res, t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
- #asm_op " %1,%3,%0\n" \
- PPC405_ERR77(0, %4) \
+ #asm_op "%I3" suffix " %1,%0,%3\n" \
" stwcx. %1,0,%4\n" \
" bne- 1b\n" \
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
+ : "r"#sign (a), "r" (&v->counter) \
+ : "cc", ##__VA_ARGS__); \
\
return res; \
}
-#define ATOMIC_OPS(op, asm_op) \
- ATOMIC_OP(op, asm_op) \
- ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
- ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+#define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
+ ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
+ ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
+ ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, subf)
+ATOMIC_OPS(add, add, "c", I, "xer")
+ATOMIC_OPS(sub, sub, "c", I, "xer")
-#define atomic_add_return_relaxed atomic_add_return_relaxed
-#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
-#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
-#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
-#define ATOMIC_OPS(op, asm_op) \
- ATOMIC_OP(op, asm_op) \
- ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+#define ATOMIC_OPS(op, asm_op, suffix, sign) \
+ ATOMIC_OP(op, asm_op, suffix, sign) \
+ ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
-ATOMIC_OPS(and, and)
-ATOMIC_OPS(or, or)
-ATOMIC_OPS(xor, xor)
+ATOMIC_OPS(and, and, ".", K)
+ATOMIC_OPS(or, or, "", K)
+ATOMIC_OPS(xor, xor, "", K)
-#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
-#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
-#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP_RELAXED
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-static __inline__ void atomic_inc(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_inc\n\
- addic %0,%0,1\n"
- PPC405_ERR77(0,%2)
-" stwcx. %0,0,%2 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define atomic_inc atomic_inc
+#define arch_atomic_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
-" addic %0,%0,1\n"
- PPC405_ERR77(0, %2)
-" stwcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-static __inline__ void atomic_dec(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_dec\n\
- addic %0,%0,-1\n"
- PPC405_ERR77(0,%2)\
-" stwcx. %0,0,%2\n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define atomic_dec atomic_dec
+#define arch_atomic_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
-static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
+/*
+ * Don't want to override the generic atomic_try_cmpxchg_acquire, because
+ * we add a lock hint to the lwarx, which may not be wanted for the
+ * _acquire case (and is not used by the other _acquire variants so it
+ * would be a surprise).
+ */
+static __always_inline bool
+arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
-" addic %0,%0,-1\n"
- PPC405_ERR77(0, %2)
-" stwcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
+ int r, o = *old;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
- return t;
+ __asm__ __volatile__ (
+"1: lwarx %0,0,%2,%[eh] # atomic_try_cmpxchg_acquire \n"
+" cmpw 0,%0,%3 \n"
+" bne- 2f \n"
+" stwcx. %4,0,%2 \n"
+" bne- 1b \n"
+"\t" PPC_ACQUIRE_BARRIER " \n"
+"2: \n"
+ : "=&r" (r), "+m" (v->counter)
+ : "r" (&v->counter), "r" (o), "r" (new), [eh] "n" (eh)
+ : "cr0", "memory");
+
+ if (unlikely(r != o))
+ *old = r;
+ return likely(r == o);
}
-#define atomic_inc_return_relaxed atomic_inc_return_relaxed
-#define atomic_dec_return_relaxed atomic_dec_return_relaxed
-
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
-
/**
* atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
@@ -210,7 +168,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int t;
@@ -219,58 +177,26 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
cmpw 0,%0,%3 \n\
beq 2f \n\
- add %0,%2,%0 \n"
- PPC405_ERR77(0,%2)
+ add%I2c %0,%0,%2 \n"
" stwcx. %0,0,%1 \n\
bne- 1b \n"
PPC_ATOMIC_EXIT_BARRIER
-" subf %0,%2,%0 \n\
+" sub%I2c %0,%0,%2 \n\
2:"
: "=&r" (t)
- : "r" (&v->counter), "r" (a), "r" (u)
- : "cc", "memory");
+ : "r" (&v->counter), "rI" (a), "r" (u)
+ : "cc", "memory", "xer");
return t;
}
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
- */
-static __inline__ int atomic_inc_not_zero(atomic_t *v)
-{
- int t1, t2;
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
- cmpwi 0,%0,0\n\
- beq- 2f\n\
- addic %1,%0,1\n"
- PPC405_ERR77(0,%2)
-" stwcx. %1,0,%2\n\
- bne- 1b\n"
- PPC_ATOMIC_EXIT_BARRIER
- "\n\
-2:"
- : "=&r" (t1), "=&r" (t2)
- : "r" (&v->counter)
- : "cc", "xer", "memory");
-
- return t1;
-}
-#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
-static __inline__ int atomic_dec_if_positive(atomic_t *v)
+static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
{
int t;
@@ -280,7 +206,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
cmpwi %0,1\n\
addi %0,%0,-1\n\
blt- 2f\n"
- PPC405_ERR77(0,%1)
" stwcx. %0,0,%1\n\
bne- 1b"
PPC_ATOMIC_EXIT_BARRIER
@@ -291,28 +216,28 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t;
}
-#define atomic_dec_if_positive atomic_dec_if_positive
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#ifdef __powerpc64__
#define ATOMIC64_INIT(i) { (i) }
-static __inline__ s64 atomic64_read(const atomic64_t *v)
+static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{
s64 t;
- __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
+ __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
return t;
}
-static __inline__ void atomic64_set(atomic64_t *v, s64 i)
+static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{
- __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
+ __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
#define ATOMIC64_OP(op, asm_op) \
-static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
+static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \
s64 t; \
\
@@ -328,7 +253,7 @@ static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
static inline s64 \
-atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
+arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
{ \
s64 t; \
\
@@ -346,7 +271,7 @@ atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
static inline s64 \
-atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
+arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
{ \
s64 res, t; \
\
@@ -370,11 +295,11 @@ atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf)
-#define atomic64_add_return_relaxed atomic64_add_return_relaxed
-#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
-#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
-#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, asm_op) \
@@ -385,16 +310,16 @@ ATOMIC64_OPS(and, and)
ATOMIC64_OPS(or, or)
ATOMIC64_OPS(xor, xor)
-#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
-#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
-#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOPIC64_OPS
#undef ATOMIC64_FETCH_OP_RELAXED
#undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP
-static __inline__ void atomic64_inc(atomic64_t *v)
+static __inline__ void arch_atomic64_inc(atomic64_t *v)
{
s64 t;
@@ -407,9 +332,9 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic64_inc atomic64_inc
+#define arch_atomic64_inc arch_atomic64_inc
-static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
+static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
{
s64 t;
@@ -425,7 +350,7 @@ static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
return t;
}
-static __inline__ void atomic64_dec(atomic64_t *v)
+static __inline__ void arch_atomic64_dec(atomic64_t *v)
{
s64 t;
@@ -438,9 +363,9 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "r" (&v->counter)
: "cc", "xer");
}
-#define atomic64_dec atomic64_dec
+#define arch_atomic64_dec arch_atomic64_dec
-static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
+static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
{
s64 t;
@@ -456,14 +381,14 @@ static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
return t;
}
-#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
-#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
+#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
*/
-static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
+static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 t;
@@ -482,16 +407,19 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
return t;
}
-#define atomic64_dec_if_positive atomic64_dec_if_positive
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
-#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-#define atomic64_cmpxchg_relaxed(v, o, n) \
- cmpxchg_relaxed(&((v)->counter), (o), (n))
-#define atomic64_cmpxchg_acquire(v, o, n) \
- cmpxchg_acquire(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg(v, o, n) \
+ (arch_cmpxchg(&((v)->counter), (o), (n)))
+#define arch_atomic64_cmpxchg_relaxed(v, o, n) \
+ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
+#define arch_atomic64_cmpxchg_acquire(v, o, n) \
+ arch_cmpxchg_acquire(&((v)->counter), (o), (n))
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
+#define arch_atomic64_xchg(v, new) \
+ (arch_xchg(&((v)->counter), new))
+#define arch_atomic64_xchg_relaxed(v, new) \
+ arch_xchg_relaxed(&((v)->counter), (new))
/**
* atomic64_fetch_add_unless - add unless the number is a given value
@@ -502,7 +430,7 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
-static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 t;
@@ -523,7 +451,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return t;
}
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/**
* atomic_inc64_not_zero - increment unless the number is zero
@@ -532,7 +460,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
+static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
{
s64 t1, t2;
@@ -553,7 +481,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
return t1 != 0;
}
-#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
+#define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 123adcefd40f..e80b2c0e9315 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -7,6 +7,10 @@
#include <asm/asm-const.h>
+#ifndef __ASSEMBLY__
+#include <asm/ppc-opcode.h>
+#endif
+
/*
* Memory barrier.
* The sync instruction guarantees that all memory accesses initiated
@@ -36,12 +40,16 @@
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
/* The sub-arch has lwsync */
-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_E500MC)
# define SMPWMB LWSYNC
+#elif defined(CONFIG_BOOKE)
+# define SMPWMB mbar
#else
# define SMPWMB eieio
#endif
+/* clang defines this macro for a builtin, which will not work with runtime patching */
+#undef __lwsync
#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
#define dma_rmb() __lwsync()
#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
@@ -78,7 +86,7 @@ do { \
#ifdef CONFIG_PPC_BOOK3S_64
#define NOSPEC_BARRIER_SLOT nop
-#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#elif defined(CONFIG_PPC_E500)
#define NOSPEC_BARRIER_SLOT nop; nop
#endif
@@ -97,6 +105,15 @@ do { \
#define barrier_nospec()
#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+/*
+ * pmem_wmb() ensures that all stores for which the modification
+ * are written to persistent storage by preceding dcbfps/dcbstps
+ * instructions have updated persistent storage before any data
+ * access or data transfer caused by subsequent instructions is
+ * initiated.
+ */
+#define pmem_wmb() __asm__ __volatile__(PPC_PHWSYNC ::: "memory")
+
#include <asm-generic/barrier.h>
#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 28dcf8222943..7e0f0322912b 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -41,7 +41,6 @@
#include <linux/compiler.h>
#include <asm/asm-compat.h>
#include <asm/synch.h>
-#include <asm/asm-405.h>
/* PPC bit number conversion */
#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
@@ -71,21 +70,62 @@ static inline void fn(unsigned long mask, \
unsigned long *p = (unsigned long *)_p; \
__asm__ __volatile__ ( \
prefix \
-"1:" PPC_LLARX(%0,0,%3,0) "\n" \
- stringify_in_c(op) "%0,%0,%2\n" \
- PPC405_ERR77(0,%3) \
+"1:" PPC_LLARX "%0,0,%3,0\n" \
+ #op "%I2 %0,%0,%2\n" \
PPC_STLCX "%0,0,%3\n" \
"bne- 1b\n" \
: "=&r" (old), "+m" (*p) \
- : "r" (mask), "r" (p) \
+ : "rK" (mask), "r" (p) \
: "cc", "memory"); \
}
DEFINE_BITOP(set_bits, or, "")
-DEFINE_BITOP(clear_bits, andc, "")
-DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
DEFINE_BITOP(change_bits, xor, "")
+static __always_inline bool is_rlwinm_mask_valid(unsigned long x)
+{
+ if (!x)
+ return false;
+ if (x & 1)
+ x = ~x; // make the mask non-wrapping
+ x += x & -x; // adding the low set bit results in at most one bit set
+
+ return !(x & (x - 1));
+}
+
+#define DEFINE_CLROP(fn, prefix) \
+static inline void fn(unsigned long mask, volatile unsigned long *_p) \
+{ \
+ unsigned long old; \
+ unsigned long *p = (unsigned long *)_p; \
+ \
+ if (IS_ENABLED(CONFIG_PPC32) && \
+ __builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) {\
+ asm volatile ( \
+ prefix \
+ "1:" "lwarx %0,0,%3\n" \
+ "rlwinm %0,%0,0,%2\n" \
+ "stwcx. %0,0,%3\n" \
+ "bne- 1b\n" \
+ : "=&r" (old), "+m" (*p) \
+ : "n" (~mask), "r" (p) \
+ : "cc", "memory"); \
+ } else { \
+ asm volatile ( \
+ prefix \
+ "1:" PPC_LLARX "%0,0,%3,0\n" \
+ "andc %0,%0,%2\n" \
+ PPC_STLCX "%0,0,%3\n" \
+ "bne- 1b\n" \
+ : "=&r" (old), "+m" (*p) \
+ : "r" (mask), "r" (p) \
+ : "cc", "memory"); \
+ } \
+}
+
+DEFINE_CLROP(clear_bits, "")
+DEFINE_CLROP(clear_bits_unlock, PPC_RELEASE_BARRIER)
+
static inline void arch_set_bit(int nr, volatile unsigned long *addr)
{
set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
@@ -117,14 +157,13 @@ static inline unsigned long fn( \
unsigned long *p = (unsigned long *)_p; \
__asm__ __volatile__ ( \
prefix \
-"1:" PPC_LLARX(%0,0,%3,eh) "\n" \
- stringify_in_c(op) "%1,%0,%2\n" \
- PPC405_ERR77(0,%3) \
+"1:" PPC_LLARX "%0,0,%3,%4\n" \
+ #op "%I2 %1,%0,%2\n" \
PPC_STLCX "%1,0,%3\n" \
"bne- 1b\n" \
postfix \
: "=&r" (old), "=&r" (t) \
- : "r" (mask), "r" (p) \
+ : "rK" (mask), "r" (p), "n" (eh) \
: "cc", "memory"); \
return (old & mask); \
}
@@ -132,12 +171,43 @@ static inline unsigned long fn( \
DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
DEFINE_TESTOP(test_and_set_bits_lock, or, "",
- PPC_ACQUIRE_BARRIER, 1)
-DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
- PPC_ATOMIC_EXIT_BARRIER, 0)
+ PPC_ACQUIRE_BARRIER, IS_ENABLED(CONFIG_PPC64))
DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
+static inline unsigned long test_and_clear_bits(unsigned long mask, volatile unsigned long *_p)
+{
+ unsigned long old, t;
+ unsigned long *p = (unsigned long *)_p;
+
+ if (IS_ENABLED(CONFIG_PPC32) &&
+ __builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) {
+ asm volatile (
+ PPC_ATOMIC_ENTRY_BARRIER
+ "1:" "lwarx %0,0,%3\n"
+ "rlwinm %1,%0,0,%2\n"
+ "stwcx. %1,0,%3\n"
+ "bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ : "=&r" (old), "=&r" (t)
+ : "n" (~mask), "r" (p)
+ : "cc", "memory");
+ } else {
+ asm volatile (
+ PPC_ATOMIC_ENTRY_BARRIER
+ "1:" PPC_LLARX "%0,0,%3,0\n"
+ "andc %1,%0,%2\n"
+ PPC_STLCX "%1,0,%3\n"
+ "bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+ }
+
+ return (old & mask);
+}
+
static inline int arch_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
@@ -173,9 +243,8 @@ clear_bit_unlock_return_word(int nr, volatile unsigned long *addr)
__asm__ __volatile__ (
PPC_RELEASE_BARRIER
-"1:" PPC_LLARX(%0,0,%3,0) "\n"
+"1:" PPC_LLARX "%0,0,%3,0\n"
"andc %1,%0,%2\n"
- PPC405_ERR77(0,%3)
PPC_STLCX "%1,0,%3\n"
"bne- 1b\n"
: "=&r" (old), "=&r" (t)
@@ -218,17 +287,36 @@ static inline void arch___clear_bit_unlock(int nr, volatile unsigned long *addr)
* fls: find last (most-significant) bit set.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static inline int fls(unsigned int x)
+static __always_inline int fls(unsigned int x)
{
- return 32 - __builtin_clz(x);
+ int lz;
+
+ if (__builtin_constant_p(x))
+ return x ? 32 - __builtin_clz(x) : 0;
+ asm("cntlzw %0,%1" : "=r" (lz) : "r" (x));
+ return 32 - lz;
}
#include <asm-generic/bitops/builtin-__fls.h>
-static inline int fls64(__u64 x)
+/*
+ * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
+ * instruction; for 32-bit we use the generic version, which does two
+ * 32-bit fls calls.
+ */
+#ifdef CONFIG_PPC64
+static __always_inline int fls64(__u64 x)
{
- return 64 - __builtin_clzll(x);
+ int lz;
+
+ if (__builtin_constant_p(x))
+ return x ? 64 - __builtin_clzll(x) : 0;
+ asm("cntlzd %0,%1" : "=r" (lz) : "r" (x));
+ return 64 - lz;
}
+#else
+#include <asm-generic/bitops/fls64.h>
+#endif
#ifdef CONFIG_PPC64
unsigned int __arch_hweight8(unsigned int w);
@@ -240,8 +328,6 @@ unsigned long __arch_hweight64(__u64 w);
#include <asm-generic/bitops/hweight.h>
#endif
-#include <asm-generic/bitops/find.h>
-
/* wrappers that deal with KASAN instrumentation */
#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/instrumented-lock.h>
diff --git a/arch/powerpc/include/asm/book3s/32/hash.h b/arch/powerpc/include/asm/book3s/32/hash.h
deleted file mode 100644
index 2a0a467d2985..000000000000
--- a/arch/powerpc/include/asm/book3s/32/hash.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H
-#define _ASM_POWERPC_BOOK3S_32_HASH_H
-#ifdef __KERNEL__
-
-/*
- * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
- * table containing PTEs, together with a set of 16 segment registers,
- * to define the virtual to physical address mapping.
- *
- * We use the hash table as an extended TLB, i.e. a cache of currently
- * active mappings. We maintain a two-level page table tree, much
- * like that used by the i386, for the sake of the Linux memory
- * management code. Low-level assembler code in hash_low_32.S
- * (procedure hash_page) is responsible for extracting ptes from the
- * tree and putting them into the hash table when necessary, and
- * updating the accessed and modified bits in the page table tree.
- */
-
-#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
-#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
-#define _PAGE_USER 0x004 /* usermode access allowed */
-#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
-#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
-#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
-#define _PAGE_DIRTY 0x080 /* C: page changed */
-#define _PAGE_ACCESSED 0x100 /* R: page referenced */
-#define _PAGE_EXEC 0x200 /* software: exec allowed */
-#define _PAGE_RW 0x400 /* software: user write access allowed */
-#define _PAGE_SPECIAL 0x800 /* software: Special page */
-
-#ifdef CONFIG_PTE_64BIT
-/* We never clear the high word of the pte */
-#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
-#else
-#define _PTE_NONE_MASK _PAGE_HASHPTE
-#endif
-
-#define _PMD_PRESENT 0
-#define _PMD_PRESENT_MASK (PAGE_MASK)
-#define _PMD_BAD (~PAGE_MASK)
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 3c0ba22dc360..678f9c9d89b6 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -2,191 +2,185 @@
#ifndef _ASM_POWERPC_BOOK3S_32_KUP_H
#define _ASM_POWERPC_BOOK3S_32_KUP_H
+#include <asm/bug.h>
#include <asm/book3s/32/mmu-hash.h>
+#include <asm/mmu.h>
+#include <asm/synch.h>
-#ifdef __ASSEMBLY__
-
-.macro kuep_update_sr gpr1, gpr2 /* NEVER use r0 as gpr2 due to addis */
-101: mtsrin \gpr1, \gpr2
- addi \gpr1, \gpr1, 0x111 /* next VSID */
- rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
- addis \gpr2, \gpr2, 0x1000 /* address of next segment */
- bdnz 101b
- isync
-.endm
-
-.macro kuep_lock gpr1, gpr2
-#ifdef CONFIG_PPC_KUEP
- li \gpr1, NUM_USER_SEGMENTS
- li \gpr2, 0
- mtctr \gpr1
- mfsrin \gpr1, \gpr2
- oris \gpr1, \gpr1, SR_NX@h /* set Nx */
- kuep_update_sr \gpr1, \gpr2
-#endif
-.endm
-
-.macro kuep_unlock gpr1, gpr2
-#ifdef CONFIG_PPC_KUEP
- li \gpr1, NUM_USER_SEGMENTS
- li \gpr2, 0
- mtctr \gpr1
- mfsrin \gpr1, \gpr2
- rlwinm \gpr1, \gpr1, 0, ~SR_NX /* Clear Nx */
- kuep_update_sr \gpr1, \gpr2
-#endif
-.endm
+#ifndef __ASSEMBLY__
-#ifdef CONFIG_PPC_KUAP
-
-.macro kuap_update_sr gpr1, gpr2, gpr3 /* NEVER use r0 as gpr2 due to addis */
-101: mtsrin \gpr1, \gpr2
- addi \gpr1, \gpr1, 0x111 /* next VSID */
- rlwinm \gpr1, \gpr1, 0, 0xf0ffffff /* clear VSID overflow */
- addis \gpr2, \gpr2, 0x1000 /* address of next segment */
- cmplw \gpr2, \gpr3
- blt- 101b
- isync
-.endm
-
-.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
- lwz \gpr2, KUAP(\thread)
- rlwinm. \gpr3, \gpr2, 28, 0xf0000000
- stw \gpr2, STACK_REGS_KUAP(\sp)
- beq+ 102f
- li \gpr1, 0
- stw \gpr1, KUAP(\thread)
- mfsrin \gpr1, \gpr2
- oris \gpr1, \gpr1, SR_KS@h /* set Ks */
- kuap_update_sr \gpr1, \gpr2, \gpr3
-102:
-.endm
-
-.macro kuap_restore sp, current, gpr1, gpr2, gpr3
- lwz \gpr2, STACK_REGS_KUAP(\sp)
- rlwinm. \gpr3, \gpr2, 28, 0xf0000000
- stw \gpr2, THREAD + KUAP(\current)
- beq+ 102f
- mfsrin \gpr1, \gpr2
- rlwinm \gpr1, \gpr1, 0, ~SR_KS /* Clear Ks */
- kuap_update_sr \gpr1, \gpr2, \gpr3
-102:
-.endm
-
-.macro kuap_check current, gpr
-#ifdef CONFIG_PPC_KUAP_DEBUG
- lwz \gpr2, KUAP(thread)
-999: twnei \gpr, 0
- EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
-#endif
-.endm
+#include <linux/jump_label.h>
-#endif /* CONFIG_PPC_KUAP */
+extern struct static_key_false disable_kuap_key;
-#else /* !__ASSEMBLY__ */
+static __always_inline bool kuep_is_disabled(void)
+{
+ return !IS_ENABLED(CONFIG_PPC_KUEP);
+}
#ifdef CONFIG_PPC_KUAP
#include <linux/sched.h>
-static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
+#define KUAP_NONE (~0UL)
+#define KUAP_ALL (~1UL)
+
+static __always_inline bool kuap_is_disabled(void)
{
- addr &= 0xf0000000; /* align addr to start of segment */
- barrier(); /* make sure thread.kuap is updated before playing with SRs */
- while (addr < end) {
- mtsrin(sr, addr);
- sr += 0x111; /* next VSID */
- sr &= 0xf0ffffff; /* clear VSID overflow */
- addr += 0x10000000; /* address of next segment */
- }
- isync(); /* Context sync required after mtsrin() */
+ return static_branch_unlikely(&disable_kuap_key);
}
-static __always_inline void allow_user_access(void __user *to, const void __user *from,
- u32 size, unsigned long dir)
+static inline void kuap_lock_one(unsigned long addr)
{
- u32 addr, end;
+ mtsr(mfsr(addr) | SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
- BUILD_BUG_ON(!__builtin_constant_p(dir));
- BUILD_BUG_ON(dir == KUAP_CURRENT);
+static inline void kuap_unlock_one(unsigned long addr)
+{
+ mtsr(mfsr(addr) & ~SR_KS, addr);
+ isync(); /* Context sync required after mtsr() */
+}
- if (!(dir & KUAP_WRITE))
+static inline void kuap_lock_all(void)
+{
+ update_user_segments(mfsr(0) | SR_KS);
+ isync(); /* Context sync required after mtsr() */
+}
+
+static inline void kuap_unlock_all(void)
+{
+ update_user_segments(mfsr(0) & ~SR_KS);
+ isync(); /* Context sync required after mtsr() */
+}
+
+void kuap_lock_all_ool(void);
+void kuap_unlock_all_ool(void);
+
+static inline void kuap_lock_addr(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_lock_one(addr);
+ else if (!ool)
+ kuap_lock_all();
+ else
+ kuap_lock_all_ool();
+}
+
+static inline void kuap_unlock(unsigned long addr, bool ool)
+{
+ if (likely(addr != KUAP_ALL))
+ kuap_unlock_one(addr);
+ else if (!ool)
+ kuap_unlock_all();
+ else
+ kuap_unlock_all_ool();
+}
+
+static inline void __kuap_lock(void)
+{
+}
+
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
+ unsigned long kuap = current->thread.kuap;
+
+ regs->kuap = kuap;
+ if (unlikely(kuap == KUAP_NONE))
return;
- addr = (__force u32)to;
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock_addr(kuap, false);
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+}
+
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ if (unlikely(kuap != KUAP_NONE)) {
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock_addr(kuap, false);
+ }
- if (unlikely(addr >= TASK_SIZE || !size))
+ if (likely(regs->kuap == KUAP_NONE))
return;
- end = min(addr + size, TASK_SIZE);
+ current->thread.kuap = regs->kuap;
- current->thread.kuap = (addr & 0xf0000000) | ((((end - 1) >> 28) + 1) & 0xf);
- kuap_update_sr(mfsrin(addr) & ~SR_KS, addr, end); /* Clear Ks */
+ kuap_unlock(regs->kuap, false);
}
-static __always_inline void prevent_user_access(void __user *to, const void __user *from,
- u32 size, unsigned long dir)
+static inline unsigned long __kuap_get_and_assert_locked(void)
{
- u32 addr, end;
+ unsigned long kuap = current->thread.kuap;
+
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
+ return kuap;
+}
+
+static __always_inline void __allow_user_access(void __user *to, const void __user *from,
+ u32 size, unsigned long dir)
+{
BUILD_BUG_ON(!__builtin_constant_p(dir));
- if (dir == KUAP_CURRENT) {
- u32 kuap = current->thread.kuap;
+ if (!(dir & KUAP_WRITE))
+ return;
- if (unlikely(!kuap))
- return;
+ current->thread.kuap = (__force u32)to;
+ kuap_unlock_one((__force u32)to);
+}
- addr = kuap & 0xf0000000;
- end = kuap << 28;
- } else if (dir & KUAP_WRITE) {
- addr = (__force u32)to;
- end = min(addr + size, TASK_SIZE);
+static __always_inline void __prevent_user_access(unsigned long dir)
+{
+ u32 kuap = current->thread.kuap;
+
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
- if (unlikely(addr >= TASK_SIZE || !size))
- return;
- } else {
+ if (!(dir & KUAP_WRITE))
return;
- }
- current->thread.kuap = 0;
- kuap_update_sr(mfsrin(addr) | SR_KS, addr, end); /* set Ks */
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock_addr(kuap, true);
}
-static inline unsigned long prevent_user_access_return(void)
+static inline unsigned long __prevent_user_access_return(void)
{
unsigned long flags = current->thread.kuap;
- unsigned long addr = flags & 0xf0000000;
- unsigned long end = flags << 28;
- void __user *to = (__force void __user *)addr;
- if (flags)
- prevent_user_access(to, to, end - addr, KUAP_READ_WRITE);
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = KUAP_NONE;
+ kuap_lock_addr(flags, true);
+ }
return flags;
}
-static inline void restore_user_access(unsigned long flags)
+static inline void __restore_user_access(unsigned long flags)
{
- unsigned long addr = flags & 0xf0000000;
- unsigned long end = flags << 28;
- void __user *to = (__force void __user *)addr;
-
- if (flags)
- allow_user_access(to, to, end - addr, KUAP_READ_WRITE);
+ if (flags != KUAP_NONE) {
+ current->thread.kuap = flags;
+ kuap_unlock(flags, true);
+ }
}
static inline bool
-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
- unsigned long begin = regs->kuap & 0xf0000000;
- unsigned long end = regs->kuap << 28;
+ unsigned long kuap = regs->kuap;
- if (!is_write)
+ if (!is_write || kuap == KUAP_ALL)
return false;
+ if (kuap == KUAP_NONE)
+ return true;
+
+ /* If faulting address doesn't match unlocked segment, unlock all */
+ if ((kuap ^ address) & 0xf0000000)
+ regs->kuap = KUAP_ALL;
- return WARN(address < begin || address >= end,
- "Bug: write fault blocked by segment registers !");
+ return false;
}
#endif /* CONFIG_PPC_KUAP */
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index 2e277ca0170f..78c6a5fde1d6 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -64,7 +64,92 @@ struct ppc_bat {
#define SR_KP 0x20000000 /* User key */
#define SR_KS 0x40000000 /* Supervisor key */
-#ifndef __ASSEMBLY__
+#ifdef __ASSEMBLY__
+
+#include <asm/asm-offsets.h>
+
+.macro uus_addi sr reg1 reg2 imm
+ .if NUM_USER_SEGMENTS > \sr
+ addi \reg1,\reg2,\imm
+ .endif
+.endm
+
+.macro uus_mtsr sr reg1
+ .if NUM_USER_SEGMENTS > \sr
+ mtsr \sr, \reg1
+ .endif
+.endm
+
+/*
+ * This isync() shouldn't be necessary as the kernel is not excepted to run
+ * any instruction in userspace soon after the update of segments and 'rfi'
+ * instruction is used to return to userspace, but hash based cores
+ * (at least G3) seem to exhibit a random behaviour when the 'isync' is not
+ * there. 603 cores don't have this behaviour so don't do the 'isync' as it
+ * saves several CPU cycles.
+ */
+.macro uus_isync
+#ifdef CONFIG_PPC_BOOK3S_604
+BEGIN_MMU_FTR_SECTION
+ isync
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+#endif
+.endm
+
+.macro update_user_segments_by_4 tmp1 tmp2 tmp3 tmp4
+ uus_addi 1, \tmp2, \tmp1, 0x111
+ uus_addi 2, \tmp3, \tmp1, 0x222
+ uus_addi 3, \tmp4, \tmp1, 0x333
+
+ uus_mtsr 0, \tmp1
+ uus_mtsr 1, \tmp2
+ uus_mtsr 2, \tmp3
+ uus_mtsr 3, \tmp4
+
+ uus_addi 4, \tmp1, \tmp1, 0x444
+ uus_addi 5, \tmp2, \tmp2, 0x444
+ uus_addi 6, \tmp3, \tmp3, 0x444
+ uus_addi 7, \tmp4, \tmp4, 0x444
+
+ uus_mtsr 4, \tmp1
+ uus_mtsr 5, \tmp2
+ uus_mtsr 6, \tmp3
+ uus_mtsr 7, \tmp4
+
+ uus_addi 8, \tmp1, \tmp1, 0x444
+ uus_addi 9, \tmp2, \tmp2, 0x444
+ uus_addi 10, \tmp3, \tmp3, 0x444
+ uus_addi 11, \tmp4, \tmp4, 0x444
+
+ uus_mtsr 8, \tmp1
+ uus_mtsr 9, \tmp2
+ uus_mtsr 10, \tmp3
+ uus_mtsr 11, \tmp4
+
+ uus_addi 12, \tmp1, \tmp1, 0x444
+ uus_addi 13, \tmp2, \tmp2, 0x444
+ uus_addi 14, \tmp3, \tmp3, 0x444
+ uus_addi 15, \tmp4, \tmp4, 0x444
+
+ uus_mtsr 12, \tmp1
+ uus_mtsr 13, \tmp2
+ uus_mtsr 14, \tmp3
+ uus_mtsr 15, \tmp4
+
+ uus_isync
+.endm
+
+#else
+
+/*
+ * This macro defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then hash functions will have to be
+ * changed to correspond.
+ */
+#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
/*
* Hardware Page Table Entry
@@ -90,10 +175,16 @@ struct hash_pte {
typedef struct {
unsigned long id;
- unsigned long vdso_base;
+ unsigned long sr0;
+ void __user *vdso;
} mm_context_t;
+#ifdef CONFIG_PPC_KUEP
+#define INIT_MM_CONTEXT(mm) .context.sr0 = SR_NX
+#endif
+
void update_bats(void);
+static inline void cleanup_cpu_mmu_context(void) { }
/* patch sites */
extern s32 patch__hash_page_A0, patch__hash_page_A1, patch__hash_page_A2;
@@ -101,6 +192,39 @@ extern s32 patch__hash_page_B, patch__hash_page_C;
extern s32 patch__flush_hash_A0, patch__flush_hash_A1, patch__flush_hash_A2;
extern s32 patch__flush_hash_B;
+#include <asm/reg.h>
+#include <asm/task_size_32.h>
+
+static __always_inline void update_user_segment(u32 n, u32 val)
+{
+ if (n << 28 < TASK_SIZE)
+ mtsr(val + n * 0x111, n << 28);
+}
+
+static __always_inline void update_user_segments(u32 val)
+{
+ val &= 0xf0ffffff;
+
+ update_user_segment(0, val);
+ update_user_segment(1, val);
+ update_user_segment(2, val);
+ update_user_segment(3, val);
+ update_user_segment(4, val);
+ update_user_segment(5, val);
+ update_user_segment(6, val);
+ update_user_segment(7, val);
+ update_user_segment(8, val);
+ update_user_segment(9, val);
+ update_user_segment(10, val);
+ update_user_segment(11, val);
+ update_user_segment(12, val);
+ update_user_segment(13, val);
+ update_user_segment(14, val);
+ update_user_segment(15, val);
+}
+
+int __init find_free_bat(void);
+unsigned int bat_block_size(unsigned long base, unsigned long top);
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 5b39c11e884a..75823f39e042 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -2,10 +2,45 @@
#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
-#include <asm/book3s/32/hash.h>
+/*
+ * The "classic" 32-bit implementation of the PowerPC MMU uses a hash
+ * table containing PTEs, together with a set of 16 segment registers,
+ * to define the virtual to physical address mapping.
+ *
+ * We use the hash table as an extended TLB, i.e. a cache of currently
+ * active mappings. We maintain a two-level page table tree, much
+ * like that used by the i386, for the sake of the Linux memory
+ * management code. Low-level assembler code in hash_low_32.S
+ * (procedure hash_page) is responsible for extracting ptes from the
+ * tree and putting them into the hash table when necessary, and
+ * updating the accessed and modified bits in the page table tree.
+ */
+
+#define _PAGE_PRESENT 0x001 /* software: pte contains a translation */
+#define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */
+#define _PAGE_USER 0x004 /* usermode access allowed */
+#define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
+#define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
+#define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
+#define _PAGE_DIRTY 0x080 /* C: page changed */
+#define _PAGE_ACCESSED 0x100 /* R: page referenced */
+#define _PAGE_EXEC 0x200 /* software: exec allowed */
+#define _PAGE_RW 0x400 /* software: user write access allowed */
+#define _PAGE_SPECIAL 0x800 /* software: Special page */
+
+#ifdef CONFIG_PTE_64BIT
+/* We never clear the high word of the pte */
+#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
+#else
+#define _PTE_NONE_MASK _PAGE_HASHPTE
+#endif
+
+#define _PMD_PRESENT 0
+#define _PMD_PRESENT_MASK (PAGE_MASK)
+#define _PMD_BAD (~PAGE_MASK)
/* And here we include common definitions */
@@ -37,8 +72,10 @@ static inline bool pte_user(pte_t pte)
*/
#ifdef CONFIG_PTE_64BIT
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
#else
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/*
@@ -75,31 +112,11 @@ static inline bool pte_user(pte_t pte)
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
-#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
- _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
-/*
- * Protection used for kernel text. We want the debuggers to be able to
- * set breakpoints anywhere, so don't write protect the kernel text
- * on platforms where such control is possible.
- */
-#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
- defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
-#else
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
-#endif
-
-/* Make modules code happy. We don't set RO yet */
-#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
-
-/* Advertise special mapping type for AGP */
-#define PAGE_AGP (PAGE_KERNEL_NC)
-#define HAVE_PAGE_AGP
-
#define PTE_INDEX_SIZE PTE_SHIFT
#define PMD_INDEX_SIZE 0
#define PUD_INDEX_SIZE 0
@@ -113,6 +130,9 @@ static inline bool pte_user(pte_t pte)
#define PMD_TABLE_SIZE 0
#define PUD_TABLE_SIZE 0
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
#endif /* __ASSEMBLY__ */
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
@@ -138,6 +158,7 @@ static inline bool pte_user(pte_t pte)
#ifndef __ASSEMBLY__
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
#endif /* !__ASSEMBLY__ */
@@ -182,24 +203,17 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#define VMALLOC_OFFSET (0x1000000) /* 16M */
-/*
- * With CONFIG_STRICT_KERNEL_RWX, kernel segments are set NX. But when modules
- * are used, NX cannot be set on VMALLOC space. So vmalloc VM space and linear
- * memory shall not share segments.
- */
-#if defined(CONFIG_STRICT_KERNEL_RWX) && defined(CONFIG_MODULES)
-#define VMALLOC_START ((_ALIGN((long)high_memory, 256L << 20) + VMALLOC_OFFSET) & \
- ~(VMALLOC_OFFSET - 1))
-#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
-#endif
#ifdef CONFIG_KASAN_VMALLOC
-#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
#define VMALLOC_END ioremap_bot
#endif
+#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
+#define MODULES_VADDR (MODULES_END - SZ_256M)
+
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
@@ -218,7 +232,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#define pte_clear(mm, addr, ptep) \
- do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
+ do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
@@ -241,8 +255,14 @@ extern void add_hash_page(unsigned context, unsigned long va,
unsigned long pmdval);
/* Flush an entry from the TLB/hash table */
-extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
- unsigned long address);
+static inline void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
+{
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ unsigned long ptephys = __pa(ptep) & PAGE_MASK;
+
+ flush_hash_pages(mm->context.id, addr, ptephys, 1);
+ }
+}
/*
* PTE updates. This function is called whenever an existing
@@ -253,84 +273,74 @@ extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
* and the PTE may be either 32 or 64 bit wide. In the later case,
* when using atomic updates, only the low part of the PTE is
* accessed atomically.
- *
- * In addition, on 44x, we also maintain a global flag indicating
- * that an executable user mapping was modified, which is needed
- * to properly flush the virtually tagged instruction cache of
- * those implementations.
*/
-#ifndef CONFIG_PTE_64BIT
-static inline unsigned long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
{
- unsigned long old, tmp;
+ pte_basic_t old;
- __asm__ __volatile__("\
-1: lwarx %0,0,%3\n\
- andc %1,%0,%4\n\
- or %1,%1,%5\n"
-" stwcx. %1,0,%3\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+ unsigned long tmp;
- return old;
-}
-#else /* CONFIG_PTE_64BIT */
-static inline unsigned long long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
-{
- unsigned long long old;
- unsigned long tmp;
+ asm volatile(
+#ifndef CONFIG_PTE_64BIT
+ "1: lwarx %0, 0, %3\n"
+ " andc %1, %0, %4\n"
+#else
+ "1: lwarx %L0, 0, %3\n"
+ " lwz %0, -4(%3)\n"
+ " andc %1, %L0, %4\n"
+#endif
+ " or %1, %1, %5\n"
+ " stwcx. %1, 0, %3\n"
+ " bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
+#ifndef CONFIG_PTE_64BIT
+ : "r" (p),
+#else
+ : "b" ((unsigned long)(p) + 4),
+#endif
+ "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+ } else {
+ old = pte_val(*p);
- __asm__ __volatile__("\
-1: lwarx %L0,0,%4\n\
- lwzx %0,0,%3\n\
- andc %1,%L0,%5\n\
- or %1,%1,%6\n"
-" stwcx. %1,0,%4\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
+ *p = __pte((old & ~(pte_basic_t)clr) | set);
+ }
return old;
}
-#endif /* CONFIG_PTE_64BIT */
/*
* 2.6 calls this without flushing the TLB entry; this is wrong
* for our hash-based implementation, we fix that up here.
*/
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
unsigned long old;
- old = pte_update(ptep, _PAGE_ACCESSED, 0);
- if (old & _PAGE_HASHPTE) {
- unsigned long ptephys = __pa(ptep) & PAGE_MASK;
- flush_hash_pages(context, addr, ptephys, 1);
- }
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+ if (old & _PAGE_HASHPTE)
+ flush_hash_entry(mm, ptep, addr);
+
return (old & _PAGE_ACCESSED) != 0;
}
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
- __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
+ return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- pte_update(ptep, _PAGE_RW, 0);
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
}
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
@@ -341,7 +351,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long set = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
- pte_update(ptep, 0, set);
+ pte_update(vma->vm_mm, address, ptep, 0, set, 0);
flush_tlb_page(vma, address);
}
@@ -349,27 +359,8 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
-#define pmd_page_vaddr(pmd) \
- ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
-#define pmd_page(pmd) \
- pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, addr) \
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir, addr) \
- ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
- (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
-#define pte_unmap(pte) kunmap_atomic(pte)
+#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
+#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
/*
* Encode and decode a swap entry.
@@ -541,7 +532,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
| (pte_val(pte) & ~_PAGE_HASHPTE));
else
- pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
+ pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
#elif defined(CONFIG_PTE_64BIT)
/* Second case is 32-bit with 64-bit PTE. In this case, we
@@ -560,9 +551,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
if (pte_val(*ptep) & _PAGE_HASHPTE)
flush_hash_entry(mm, ptep, addr);
__asm__ __volatile__("\
- stw%U0%X0 %2,%0\n\
+ stw%X0 %2,%0\n\
eieio\n\
- stw%U0%X0 %L2,%1"
+ stw%X1 %L2,%1"
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
: "r" (pte) : "memory");
diff --git a/arch/powerpc/include/asm/book3s/32/tlbflush.h b/arch/powerpc/include/asm/book3s/32/tlbflush.h
index 068085b709fb..ba1743c52b56 100644
--- a/arch/powerpc/include/asm/book3s/32/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/32/tlbflush.h
@@ -6,12 +6,69 @@
/*
* TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
*/
-extern void flush_tlb_mm(struct mm_struct *mm);
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
-extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void hash__flush_tlb_mm(struct mm_struct *mm);
+void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
+void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end);
+
+#ifdef CONFIG_SMP
+void _tlbie(unsigned long address);
+#else
+static inline void _tlbie(unsigned long address)
+{
+ asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
+}
+#endif
+void _tlbia(void);
+
+/*
+ * Called at the end of a mmu_gather operation to make sure the
+ * TLB flush is completely done.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ /* 603 needs to flush the whole TLB here since it doesn't use a hash table. */
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ _tlbia();
+}
+
+static inline void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ start &= PAGE_MASK;
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ hash__flush_range(mm, start, end);
+ else if (end - start <= PAGE_SIZE)
+ _tlbie(start);
+ else
+ _tlbia();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ hash__flush_tlb_mm(mm);
+ else
+ _tlbia();
+}
+
+static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ hash__flush_tlb_page(vma, vmaddr);
+ else
+ _tlbie(vmaddr);
+}
+
+static inline void
+flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ flush_range(vma->vm_mm, start, end);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ flush_range(&init_mm, start, end);
+}
+
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
{
@@ -22,4 +79,4 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
flush_tlb_mm(mm);
}
-#endif /* _ASM_POWERPC_TLBFLUSH_H */
+#endif /* _ASM_POWERPC_BOOK3S_32_TLBFLUSH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 8fd8599c9395..b6ac4f86c87b 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -13,20 +13,24 @@
*/
#define MAX_EA_BITS_PER_CONTEXT 46
-#define REGION_SHIFT (MAX_EA_BITS_PER_CONTEXT - 2)
/*
- * Our page table limit us to 64TB. Hence for the kernel mapping,
- * each MAP area is limited to 16 TB.
- * The four map areas are: linear mapping, vmap, IO and vmemmap
+ * Our page table limit us to 64TB. For 64TB physical memory, we only need 64GB
+ * of vmemmap space. To better support sparse memory layout, we use 61TB
+ * linear map range, 1TB of vmalloc, 1TB of I/O and 1TB of vmememmap.
*/
+#define REGION_SHIFT (40)
#define H_KERN_MAP_SIZE (ASM_CONST(1) << REGION_SHIFT)
/*
- * Define the address range of the kernel non-linear virtual area
- * 16TB
+ * Limits the linear mapping range
*/
-#define H_KERN_VIRT_START ASM_CONST(0xc000100000000000)
+#define H_MAX_PHYSMEM_BITS 46
+
+/*
+ * Define the address range of the kernel non-linear virtual area (61TB)
+ */
+#define H_KERN_VIRT_START ASM_CONST(0xc0003d0000000000)
#ifndef __ASSEMBLY__
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
@@ -34,11 +38,11 @@
#define H_PUD_TABLE_SIZE (sizeof(pud_t) << H_PUD_INDEX_SIZE)
#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << H_PGD_INDEX_SIZE)
-#define H_PAGE_F_GIX_SHIFT 53
-#define H_PAGE_F_SECOND _RPAGE_RPN44 /* HPTE is in 2ndary HPTEG */
-#define H_PAGE_F_GIX (_RPAGE_RPN43 | _RPAGE_RPN42 | _RPAGE_RPN41)
-#define H_PAGE_BUSY _RPAGE_RSV1 /* software: PTE & hash are busy */
-#define H_PAGE_HASHPTE _RPAGE_RSV2 /* software: PTE & hash are busy */
+#define H_PAGE_F_GIX_SHIFT _PAGE_PA_MAX
+#define H_PAGE_F_SECOND _RPAGE_PKEY_BIT0 /* HPTE is in 2ndary HPTEG */
+#define H_PAGE_F_GIX (_RPAGE_RPN43 | _RPAGE_RPN42 | _RPAGE_RPN41)
+#define H_PAGE_BUSY _RPAGE_RSV1
+#define H_PAGE_HASHPTE _RPAGE_PKEY_BIT4
/* PTE flags to conserve for HPTE identification */
#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | \
@@ -57,11 +61,12 @@
#define H_PMD_FRAG_NR (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
/* memory key bits, only 8 keys supported */
-#define H_PTE_PKEY_BIT0 0
-#define H_PTE_PKEY_BIT1 0
-#define H_PTE_PKEY_BIT2 _RPAGE_RSV3
-#define H_PTE_PKEY_BIT3 _RPAGE_RSV4
-#define H_PTE_PKEY_BIT4 _RPAGE_RSV5
+#define H_PTE_PKEY_BIT4 0
+#define H_PTE_PKEY_BIT3 0
+#define H_PTE_PKEY_BIT2 _RPAGE_PKEY_BIT3
+#define H_PTE_PKEY_BIT1 _RPAGE_PKEY_BIT2
+#define H_PTE_PKEY_BIT0 _RPAGE_PKEY_BIT1
+
/*
* On all 4K setups, remap_4k_pfn() equates to remap_pfn_range()
@@ -156,6 +161,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
extern int hash__has_transparent_hugepage(void);
#endif
+static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
+{
+ BUG();
+ return pmd;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
index d1d9177d9ebd..338e62fbea0b 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-64k.h
@@ -7,6 +7,19 @@
#define H_PUD_INDEX_SIZE 10 // size: 8B << 10 = 8KB, maps 2^10 x 16GB = 16TB
#define H_PGD_INDEX_SIZE 8 // size: 8B << 8 = 2KB, maps 2^8 x 16TB = 4PB
+/*
+ * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
+ * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
+ * page_to_nid does a page->section->node lookup
+ * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
+ * memory requirements with large number of sections.
+ * 51 bits is the max physical real address on POWER9
+ */
+#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME)
+#define H_MAX_PHYSMEM_BITS 51
+#else
+#define H_MAX_PHYSMEM_BITS 46
+#endif
/*
* Each context is 512TB size. SLB miss for first context/default context
@@ -32,15 +45,15 @@
*/
#define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */
#define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */
-#define H_PAGE_BUSY _RPAGE_RPN44 /* software: PTE & hash are busy */
+#define H_PAGE_BUSY _RPAGE_RSV1 /* software: PTE & hash are busy */
#define H_PAGE_HASHPTE _RPAGE_RPN43 /* PTE has associated HPTE */
/* memory key bits. */
-#define H_PTE_PKEY_BIT0 _RPAGE_RSV1
-#define H_PTE_PKEY_BIT1 _RPAGE_RSV2
-#define H_PTE_PKEY_BIT2 _RPAGE_RSV3
-#define H_PTE_PKEY_BIT3 _RPAGE_RSV4
-#define H_PTE_PKEY_BIT4 _RPAGE_RSV5
+#define H_PTE_PKEY_BIT4 _RPAGE_PKEY_BIT4
+#define H_PTE_PKEY_BIT3 _RPAGE_PKEY_BIT3
+#define H_PTE_PKEY_BIT2 _RPAGE_PKEY_BIT2
+#define H_PTE_PKEY_BIT1 _RPAGE_PKEY_BIT1
+#define H_PTE_PKEY_BIT0 _RPAGE_PKEY_BIT0
/*
* We need to differentiate between explicit huge page and THP huge
@@ -246,7 +259,7 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
*/
static inline int hash__pmd_trans_huge(pmd_t pmd)
{
- return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE)) ==
+ return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
(_PAGE_PTE | H_PAGE_THP_HUGE));
}
@@ -272,6 +285,12 @@ extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp);
extern int hash__has_transparent_hugepage(void);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
+}
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-pkey.h b/arch/powerpc/include/asm/book3s/64/hash-pkey.h
new file mode 100644
index 000000000000..f1e60d579f6c
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/hash-pkey.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_PKEY_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_PKEY_H
+
+/* We use key 3 for KERNEL */
+#define HASH_DEFAULT_KERNEL_KEY (HPTE_R_KEY_BIT0 | HPTE_R_KEY_BIT1)
+
+static inline u64 hash__vmflag_to_pte_pkey_bits(u64 vm_flags)
+{
+ return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT0 : 0x0UL) |
+ ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT1 : 0x0UL) |
+ ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) |
+ ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT3 : 0x0UL) |
+ ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT4 : 0x0UL));
+}
+
+static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
+{
+ unsigned long pte_pkey;
+
+ pte_pkey = (((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL));
+
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP) ||
+ mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
+ if ((pte_pkey == 0) && (flags & HPTE_USE_KERNEL_KEY))
+ return HASH_DEFAULT_KERNEL_KEY;
+ }
+
+ return pte_pkey;
+}
+
+static inline u16 hash__pte_to_pkey_bits(u64 pteflags)
+{
+ return (((pteflags & H_PTE_PKEY_BIT4) ? 0x10 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT3) ? 0x8 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT2) ? 0x4 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT1) ? 0x2 : 0x0UL) |
+ ((pteflags & H_PTE_PKEY_BIT0) ? 0x1 : 0x0UL));
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 2781ebf6add4..17e7a778c856 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -18,6 +18,10 @@
#include <asm/book3s/64/hash-4k.h>
#endif
+#define H_PTRS_PER_PTE (1 << H_PTE_INDEX_SIZE)
+#define H_PTRS_PER_PMD (1 << H_PMD_INDEX_SIZE)
+#define H_PTRS_PER_PUD (1 << H_PUD_INDEX_SIZE)
+
/* Bits to set in a PMD/PUD/PGD entry valid bit*/
#define HASH_PMD_VAL_BITS (0x8000000000000000UL)
#define HASH_PUD_VAL_BITS (0x8000000000000000UL)
@@ -99,10 +103,6 @@
* Defines the address of the vmemap area, in its own region on
* hash table CPUs.
*/
-#ifdef CONFIG_PPC_MM_SLICES
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#endif /* CONFIG_PPC_MM_SLICES */
/* PTEIDX nibble */
#define _PTEIDX_SECONDARY 0x8
@@ -134,9 +134,9 @@ static inline int get_region_id(unsigned long ea)
#define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS)
#define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS)
-static inline int hash__pgd_bad(pgd_t pgd)
+static inline int hash__p4d_bad(p4d_t p4d)
{
- return (pgd_val(pgd) == 0);
+ return (p4d_val(p4d) == 0);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
extern void hash__mark_rodata_ro(void);
@@ -145,7 +145,7 @@ extern void hash__mark_initmem_nx(void);
extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge);
-extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
+unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags);
/* Atomic PTE updates */
static inline unsigned long hash__pte_update(struct mm_struct *mm,
unsigned long addr,
@@ -251,9 +251,12 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
extern void hash__vmemmap_remove_mapping(unsigned long start,
unsigned long page_size);
-int hash__create_section_mapping(unsigned long start, unsigned long end, int nid);
+int hash__create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot);
int hash__remove_section_mapping(unsigned long start, unsigned long end);
+void hash__kernel_map_pages(struct page *page, int numpages, int enable);
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index 12e150e615b7..aa1c67c8bfc8 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -1,6 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_BOOK3S_64_HUGETLB_H
#define _ASM_POWERPC_BOOK3S_64_HUGETLB_H
+
+#include <asm/firmware.h>
+
/*
* For radix we want generic code to handle hugetlb. But then if we want
* both hash and radix to be enabled together we need to workaround the
@@ -8,10 +11,6 @@
*/
void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
-extern unsigned long
-radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags);
extern void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
diff --git a/arch/powerpc/include/asm/book3s/64/kexec.h b/arch/powerpc/include/asm/book3s/64/kexec.h
new file mode 100644
index 000000000000..d4b9d476ecba
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/kexec.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_POWERPC_BOOK3S_64_KEXEC_H_
+#define _ASM_POWERPC_BOOK3S_64_KEXEC_H_
+
+#include <asm/plpar_wrappers.h>
+
+#define reset_sprs reset_sprs
+static inline void reset_sprs(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ mtspr(SPRN_AMR, 0);
+ mtspr(SPRN_UAMOR, 0);
+ }
+
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ mtspr(SPRN_IAMR, 0);
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ mtspr(SPRN_CIABR, 0);
+ else
+ plpar_set_ciabr(0);
+ }
+
+ /* Do we need isync()? We are going via a kexec reset */
+ isync();
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
deleted file mode 100644
index 90dd3a3fc8c7..000000000000
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
-#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
-
-#include <linux/const.h>
-
-#define AMR_KUAP_BLOCK_READ UL(0x4000000000000000)
-#define AMR_KUAP_BLOCK_WRITE UL(0x8000000000000000)
-#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
-#define AMR_KUAP_SHIFT 62
-
-#ifdef __ASSEMBLY__
-
-.macro kuap_restore_amr gpr
-#ifdef CONFIG_PPC_KUAP
- BEGIN_MMU_FTR_SECTION_NESTED(67)
- ld \gpr, STACK_REGS_KUAP(r1)
- mtspr SPRN_AMR, \gpr
- END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
-#endif
-.endm
-
-.macro kuap_check_amr gpr1, gpr2
-#ifdef CONFIG_PPC_KUAP_DEBUG
- BEGIN_MMU_FTR_SECTION_NESTED(67)
- mfspr \gpr1, SPRN_AMR
- li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
- sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
-999: tdne \gpr1, \gpr2
- EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
- END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
-#endif
-.endm
-
-.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
-#ifdef CONFIG_PPC_KUAP
- BEGIN_MMU_FTR_SECTION_NESTED(67)
- .ifnb \msr_pr_cr
- bne \msr_pr_cr, 99f
- .endif
- mfspr \gpr1, SPRN_AMR
- std \gpr1, STACK_REGS_KUAP(r1)
- li \gpr2, (AMR_KUAP_BLOCKED >> AMR_KUAP_SHIFT)
- sldi \gpr2, \gpr2, AMR_KUAP_SHIFT
- cmpd \use_cr, \gpr1, \gpr2
- beq \use_cr, 99f
- // We don't isync here because we very recently entered via rfid
- mtspr SPRN_AMR, \gpr2
- isync
-99:
- END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67)
-#endif
-.endm
-
-#else /* !__ASSEMBLY__ */
-
-#ifdef CONFIG_PPC_KUAP
-
-#include <asm/reg.h>
-
-/*
- * We support individually allowing read or write, but we don't support nesting
- * because that would require an expensive read/modify write of the AMR.
- */
-
-static inline unsigned long get_kuap(void)
-{
- if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
- return 0;
-
- return mfspr(SPRN_AMR);
-}
-
-static inline void set_kuap(unsigned long value)
-{
- if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP))
- return;
-
- /*
- * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
- * before and after the move to AMR. See table 6 on page 1134.
- */
- isync();
- mtspr(SPRN_AMR, value);
- isync();
-}
-
-static __always_inline void allow_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
-{
- // This is written so we can resolve to a single case at build time
- BUILD_BUG_ON(!__builtin_constant_p(dir));
- if (dir == KUAP_READ)
- set_kuap(AMR_KUAP_BLOCK_WRITE);
- else if (dir == KUAP_WRITE)
- set_kuap(AMR_KUAP_BLOCK_READ);
- else if (dir == KUAP_READ_WRITE)
- set_kuap(0);
- else
- BUILD_BUG();
-}
-
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
-{
- set_kuap(AMR_KUAP_BLOCKED);
-}
-
-static inline unsigned long prevent_user_access_return(void)
-{
- unsigned long flags = get_kuap();
-
- set_kuap(AMR_KUAP_BLOCKED);
-
- return flags;
-}
-
-static inline void restore_user_access(unsigned long flags)
-{
- set_kuap(flags);
-}
-
-static inline bool
-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
-{
- return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
- (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
- "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
-}
-#endif /* CONFIG_PPC_KUAP */
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
new file mode 100644
index 000000000000..54cf46808157
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
+#define _ASM_POWERPC_BOOK3S_64_KUP_H
+
+#include <linux/const.h>
+#include <asm/reg.h>
+
+#define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
+#define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
+#define AMR_KUEP_BLOCKED UL(0x5455555555555555)
+#define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
+
+#ifdef __ASSEMBLY__
+
+.macro kuap_user_restore gpr1, gpr2
+#if defined(CONFIG_PPC_PKEY)
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ b 100f // skip_restore_amr
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
+ /*
+ * AMR and IAMR are going to be different when
+ * returning to userspace.
+ */
+ ld \gpr1, STACK_REGS_AMR(r1)
+
+ /*
+ * If kuap feature is not enabled, do the mtspr
+ * only if AMR value is different.
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(68)
+ mfspr \gpr2, SPRN_AMR
+ cmpd \gpr1, \gpr2
+ beq 99f
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
+
+ isync
+ mtspr SPRN_AMR, \gpr1
+99:
+ /*
+ * Restore IAMR only when returning to userspace
+ */
+ ld \gpr1, STACK_REGS_IAMR(r1)
+
+ /*
+ * If kuep feature is not enabled, do the mtspr
+ * only if IAMR value is different.
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(69)
+ mfspr \gpr2, SPRN_IAMR
+ cmpd \gpr1, \gpr2
+ beq 100f
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
+
+ isync
+ mtspr SPRN_IAMR, \gpr1
+
+100: //skip_restore_amr
+ /* No isync required, see kuap_user_restore() */
+#endif
+.endm
+
+.macro kuap_kernel_restore gpr1, gpr2
+#if defined(CONFIG_PPC_PKEY)
+
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ /*
+ * AMR is going to be mostly the same since we are
+ * returning to the kernel. Compare and do a mtspr.
+ */
+ ld \gpr2, STACK_REGS_AMR(r1)
+ mfspr \gpr1, SPRN_AMR
+ cmpd \gpr1, \gpr2
+ beq 100f
+ isync
+ mtspr SPRN_AMR, \gpr2
+ /*
+ * No isync required, see kuap_restore_amr()
+ * No need to restore IAMR when returning to kernel space.
+ */
+100:
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
+#endif
+.endm
+
+#ifdef CONFIG_PPC_KUAP
+.macro kuap_check_amr gpr1, gpr2
+#ifdef CONFIG_PPC_KUAP_DEBUG
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ mfspr \gpr1, SPRN_AMR
+ /* Prevent access to userspace using any key values */
+ LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
+999: tdne \gpr1, \gpr2
+ EMIT_WARN_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
+#endif
+.endm
+#endif
+
+/*
+ * if (pkey) {
+ *
+ * save AMR -> stack;
+ * if (kuap) {
+ * if (AMR != BLOCKED)
+ * KUAP_BLOCKED -> AMR;
+ * }
+ * if (from_user) {
+ * save IAMR -> stack;
+ * if (kuep) {
+ * KUEP_BLOCKED ->IAMR
+ * }
+ * }
+ * return;
+ * }
+ *
+ * if (kuap) {
+ * if (from_kernel) {
+ * save AMR -> stack;
+ * if (AMR != BLOCKED)
+ * KUAP_BLOCKED -> AMR;
+ * }
+ *
+ * }
+ */
+.macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
+#if defined(CONFIG_PPC_PKEY)
+
+ /*
+ * if both pkey and kuap is disabled, nothing to do
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(68)
+ b 100f // skip_save_amr
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
+
+ /*
+ * if pkey is disabled and we are entering from userspace
+ * don't do anything.
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(67)
+ .ifnb \msr_pr_cr
+ /*
+ * Without pkey we are not changing AMR outside the kernel
+ * hence skip this completely.
+ */
+ bne \msr_pr_cr, 100f // from userspace
+ .endif
+ END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
+
+ /*
+ * pkey is enabled or pkey is disabled but entering from kernel
+ */
+ mfspr \gpr1, SPRN_AMR
+ std \gpr1, STACK_REGS_AMR(r1)
+
+ /*
+ * update kernel AMR with AMR_KUAP_BLOCKED only
+ * if KUAP feature is enabled
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(69)
+ LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
+ cmpd \use_cr, \gpr1, \gpr2
+ beq \use_cr, 102f
+ /*
+ * We don't isync here because we very recently entered via an interrupt
+ */
+ mtspr SPRN_AMR, \gpr2
+ isync
+102:
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
+
+ /*
+ * if entering from kernel we don't need save IAMR
+ */
+ .ifnb \msr_pr_cr
+ beq \msr_pr_cr, 100f // from kernel space
+ mfspr \gpr1, SPRN_IAMR
+ std \gpr1, STACK_REGS_IAMR(r1)
+
+ /*
+ * update kernel IAMR with AMR_KUEP_BLOCKED only
+ * if KUEP feature is enabled
+ */
+ BEGIN_MMU_FTR_SECTION_NESTED(70)
+ LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
+ mtspr SPRN_IAMR, \gpr2
+ isync
+ END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
+ .endif
+
+100: // skip_save_amr
+#endif
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#include <linux/jump_label.h>
+
+DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
+
+#ifdef CONFIG_PPC_PKEY
+
+extern u64 __ro_after_init default_uamor;
+extern u64 __ro_after_init default_amr;
+extern u64 __ro_after_init default_iamr;
+
+#include <asm/mmu.h>
+#include <asm/ptrace.h>
+
+/* usage of kthread_use_mm() should inherit the
+ * AMR value of the operating address space. But, the AMR value is
+ * thread-specific and we inherit the address space and not thread
+ * access restrictions. Because of this ignore AMR value when accessing
+ * userspace via kernel thread.
+ */
+static inline u64 current_thread_amr(void)
+{
+ if (current->thread.regs)
+ return current->thread.regs->amr;
+ return default_amr;
+}
+
+static inline u64 current_thread_iamr(void)
+{
+ if (current->thread.regs)
+ return current->thread.regs->iamr;
+ return default_iamr;
+}
+#endif /* CONFIG_PPC_PKEY */
+
+#ifdef CONFIG_PPC_KUAP
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP);
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+ bool restore_amr = false, restore_iamr = false;
+ unsigned long amr, iamr;
+
+ if (!mmu_has_feature(MMU_FTR_PKEY))
+ return;
+
+ if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
+ amr = mfspr(SPRN_AMR);
+ if (amr != regs->amr)
+ restore_amr = true;
+ } else {
+ restore_amr = true;
+ }
+
+ if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
+ iamr = mfspr(SPRN_IAMR);
+ if (iamr != regs->iamr)
+ restore_iamr = true;
+ } else {
+ restore_iamr = true;
+ }
+
+
+ if (restore_amr || restore_iamr) {
+ isync();
+ if (restore_amr)
+ mtspr(SPRN_AMR, regs->amr);
+ if (restore_iamr)
+ mtspr(SPRN_IAMR, regs->iamr);
+ }
+ /*
+ * No isync required here because we are about to rfi
+ * back to previous context before any user accesses
+ * would be made, which is a CSI.
+ */
+}
+
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
+{
+ if (likely(regs->amr == amr))
+ return;
+
+ isync();
+ mtspr(SPRN_AMR, regs->amr);
+ /*
+ * No isync required here because we are about to rfi
+ * back to previous context before any user accesses
+ * would be made, which is a CSI.
+ *
+ * No need to restore IAMR when returning to kernel space.
+ */
+}
+
+static inline unsigned long __kuap_get_and_assert_locked(void)
+{
+ unsigned long amr = mfspr(SPRN_AMR);
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
+ WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
+ return amr;
+}
+
+/* Do nothing, book3s/64 does that in ASM */
+static inline void __kuap_lock(void)
+{
+}
+
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
+}
+
+/*
+ * We support individually allowing read or write, but we don't support nesting
+ * because that would require an expensive read/modify write of the AMR.
+ */
+
+static inline unsigned long get_kuap(void)
+{
+ /*
+ * We return AMR_KUAP_BLOCKED when we don't support KUAP because
+ * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
+ * cause restore_user_access to do a flush.
+ *
+ * This has no effect in terms of actually blocking things on hash,
+ * so it doesn't break anything.
+ */
+ if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
+ return AMR_KUAP_BLOCKED;
+
+ return mfspr(SPRN_AMR);
+}
+
+static __always_inline void set_kuap(unsigned long value)
+{
+ if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
+ return;
+
+ /*
+ * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
+ * before and after the move to AMR. See table 6 on page 1134.
+ */
+ isync();
+ mtspr(SPRN_AMR, value);
+ isync();
+}
+
+static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ /*
+ * For radix this will be a storage protection fault (DSISR_PROTFAULT).
+ * For hash this will be a key fault (DSISR_KEYFAULT)
+ */
+ /*
+ * We do have exception table entry, but accessing the
+ * userspace results in fault. This could be because we
+ * didn't unlock the AMR or access is denied by userspace
+ * using a key value that blocks access. We are only interested
+ * in catching the use case of accessing without unlocking
+ * the AMR. Hence check for BLOCK_WRITE/READ against AMR.
+ */
+ if (is_write) {
+ return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
+ }
+ return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
+}
+
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{
+ unsigned long thread_amr = 0;
+
+ // This is written so we can resolve to a single case at build time
+ BUILD_BUG_ON(!__builtin_constant_p(dir));
+
+ if (mmu_has_feature(MMU_FTR_PKEY))
+ thread_amr = current_thread_amr();
+
+ if (dir == KUAP_READ)
+ set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
+ else if (dir == KUAP_WRITE)
+ set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
+ else if (dir == KUAP_READ_WRITE)
+ set_kuap(thread_amr);
+ else
+ BUILD_BUG();
+}
+
+#else /* CONFIG_PPC_KUAP */
+
+static inline unsigned long get_kuap(void)
+{
+ return AMR_KUAP_BLOCKED;
+}
+
+static inline void set_kuap(unsigned long value) { }
+
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{ }
+
+#endif /* !CONFIG_PPC_KUAP */
+
+static __always_inline void prevent_user_access(unsigned long dir)
+{
+ set_kuap(AMR_KUAP_BLOCKED);
+ if (static_branch_unlikely(&uaccess_flush_key))
+ do_uaccess_flush();
+}
+
+static inline unsigned long prevent_user_access_return(void)
+{
+ unsigned long flags = get_kuap();
+
+ set_kuap(AMR_KUAP_BLOCKED);
+ if (static_branch_unlikely(&uaccess_flush_key))
+ do_uaccess_flush();
+
+ return flags;
+}
+
+static inline void restore_user_access(unsigned long flags)
+{
+ set_kuap(flags);
+ if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
+ do_uaccess_flush();
+}
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 3fa1b962dc27..1c4eebbc69c9 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -18,7 +18,7 @@
* complete pgtable.h but only a portion of it.
*/
#include <asm/book3s/64/pgtable.h>
-#include <asm/bug.h>
+#include <asm/book3s/64/slice.h>
#include <asm/task_size_64.h>
#include <asm/cpu_has_feature.h>
@@ -86,8 +86,8 @@
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
-#define HPTE_R_KEY_BIT0 ASM_CONST(0x2000000000000000)
-#define HPTE_R_KEY_BIT1 ASM_CONST(0x1000000000000000)
+#define HPTE_R_KEY_BIT4 ASM_CONST(0x2000000000000000)
+#define HPTE_R_KEY_BIT3 ASM_CONST(0x1000000000000000)
#define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
@@ -103,8 +103,8 @@
#define HPTE_R_R ASM_CONST(0x0000000000000100)
#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
#define HPTE_R_KEY_BIT2 ASM_CONST(0x0000000000000800)
-#define HPTE_R_KEY_BIT3 ASM_CONST(0x0000000000000400)
-#define HPTE_R_KEY_BIT4 ASM_CONST(0x0000000000000200)
+#define HPTE_R_KEY_BIT1 ASM_CONST(0x0000000000000400)
+#define HPTE_R_KEY_BIT0 ASM_CONST(0x0000000000000200)
#define HPTE_R_KEY (HPTE_R_KEY_LO | HPTE_R_KEY_HI)
#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
@@ -452,7 +452,10 @@ static inline unsigned long hpt_hash(unsigned long vpn,
#define HPTE_LOCAL_UPDATE 0x1
#define HPTE_NOHPTE_UPDATE 0x2
+#define HPTE_USE_KERNEL_KEY 0x4
+long hpte_insert_repeating(unsigned long hash, unsigned long vpn, unsigned long pa,
+ unsigned long rlags, unsigned long vflags, int psize, int ssize);
extern int __hash_page_4K(unsigned long ea, unsigned long access,
unsigned long vsid, pte_t *ptep, unsigned long trap,
unsigned long flags, int ssize, int subpage_prot);
@@ -466,6 +469,8 @@ extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
unsigned long flags);
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
unsigned long dsisr);
+void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc);
+int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr, unsigned long msr);
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, unsigned long flags,
int ssize, unsigned int shift, unsigned int mmu_psize);
@@ -519,7 +524,14 @@ void slb_save_contents(struct slb_entry *slb_ptr);
void slb_dump_contents(struct slb_entry *slb_ptr);
extern void slb_vmalloc_update(void);
-extern void slb_set_size(u16 size);
+void preload_new_slb_context(unsigned long start, unsigned long sp);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+void slb_set_size(u16 size);
+#else
+static inline void slb_set_size(u16 size) { }
+#endif
+
#endif /* __ASSEMBLY__ */
/*
@@ -577,8 +589,8 @@ extern void slb_set_size(u16 size);
* For vmalloc and memmap, we use just one context with 512TB. With 64 byte
* struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)).
*/
-#if (MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
-#define MAX_KERNEL_CTX_CNT (1UL << (MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
+#if (H_MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
+#define MAX_KERNEL_CTX_CNT (1UL << (H_MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
#else
#define MAX_KERNEL_CTX_CNT 1
#endif
@@ -793,7 +805,7 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
}
/*
- * For kernel space, we use context ids as below
+ * For kernel space, we use context ids as
* below. Range is 512TB per context.
*
* 0x00001 - [ 0xc000000000000000 - 0xc001ffffffffffff]
@@ -842,6 +854,32 @@ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
unsigned htab_shift_for_mem_size(unsigned long mem_size);
-#endif /* __ASSEMBLY__ */
+enum slb_index {
+ LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
+ KSTACK_INDEX = 1, /* Kernel stack map */
+};
+
+#define slb_esid_mask(ssize) \
+ (((ssize) == MMU_SEGSIZE_256M) ? ESID_MASK : ESID_MASK_1T)
+
+static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
+ enum slb_index index)
+{
+ return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
+}
+
+static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
+ unsigned long flags)
+{
+ return (vsid << slb_vsid_shift(ssize)) | flags |
+ ((unsigned long)ssize << SLB_VSID_SSIZE_SHIFT);
+}
+static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
+ unsigned long flags)
+{
+ return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
+}
+
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index bb3deb76c951..570a4960cf17 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -19,6 +19,7 @@ struct mmu_psize_def {
int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
unsigned int tlbiel; /* tlbiel supported for that page size */
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
+ unsigned long h_rpt_pgsize; /* H_RPT_INVALIDATE page size encoding */
union {
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
unsigned long ap; /* Ap encoding used by PowerISA 3.0 */
@@ -27,21 +28,6 @@ struct mmu_psize_def {
extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
#endif /* __ASSEMBLY__ */
-/*
- * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
- * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
- * page_to_nid does a page->section->node lookup
- * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
- * memory requirements with large number of sections.
- * 51 bits is the max physical real address on POWER9
- */
-#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
- defined(CONFIG_PPC_64K_PAGES)
-#define MAX_PHYSMEM_BITS 51
-#else
-#define MAX_PHYSMEM_BITS 46
-#endif
-
/* 64-bit classic hash table MMU */
#include <asm/book3s/64/mmu-hash.h>
@@ -76,19 +62,25 @@ extern struct patb_entry *partition_tb;
#define PRTS_MASK 0x1f /* process table size field */
#define PRTB_MASK 0x0ffffffffffff000UL
+/* Number of supported LPID bits */
+extern unsigned int mmu_lpid_bits;
+
/* Number of supported PID bits */
extern unsigned int mmu_pid_bits;
/* Base PID to allocate from */
extern unsigned int mmu_base_pid;
+/*
+ * memory block size used with radix translation.
+ */
+extern unsigned long __ro_after_init radix_mem_block_size;
+
#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
#define PRTB_ENTRIES (1ul << mmu_pid_bits)
-/*
- * Power9 currently only support 64K partition table size.
- */
-#define PATB_SIZE_SHIFT 16
+#define PATB_SIZE_SHIFT (mmu_lpid_bits + 4)
+#define PATB_ENTRIES (1ul << mmu_lpid_bits)
typedef unsigned long mm_context_id_t;
struct spinlock;
@@ -107,7 +99,9 @@ typedef struct {
* from EA and new context ids to build the new VAs.
*/
mm_context_id_t id;
+#ifdef CONFIG_PPC_64S_HASH_MMU
mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
+#endif
};
/* Number of bits in the mm_cpumask */
@@ -116,9 +110,14 @@ typedef struct {
/* Number of users of the external (Nest) MMU */
atomic_t copros;
+ /* Number of user space windows opened in process mm_context */
+ atomic_t vas_windows;
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
struct hash_mm_context *hash_context;
+#endif
- unsigned long vdso_base;
+ void __user *vdso;
/*
* pagetable fragment support
*/
@@ -139,6 +138,7 @@ typedef struct {
#endif
} mm_context_t;
+#ifdef CONFIG_PPC_64S_HASH_MMU
static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
{
return ctx->hash_context->user_psize;
@@ -196,19 +196,32 @@ static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
/*
* The current system page and segment sizes
*/
-extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
-extern int mmu_vmemmap_psize;
extern int mmu_io_psize;
+#else /* CONFIG_PPC_64S_HASH_MMU */
+#ifdef CONFIG_PPC_64K_PAGES
+#define mmu_virtual_psize MMU_PAGE_64K
+#else
+#define mmu_virtual_psize MMU_PAGE_4K
+#endif
+#endif
+extern int mmu_linear_psize;
+extern int mmu_vmemmap_psize;
/* MMU initialization */
void mmu_early_init_devtree(void);
void hash__early_init_devtree(void);
void radix__early_init_devtree(void);
+#ifdef CONFIG_PPC_PKEY
+void pkey_early_init_devtree(void);
+#else
+static inline void pkey_early_init_devtree(void) {}
+#endif
+
extern void hash__early_init_mmu(void);
extern void radix__early_init_mmu(void);
-static inline void early_init_mmu(void)
+static inline void __init early_init_mmu(void)
{
if (radix_enabled())
return radix__early_init_mmu();
@@ -225,24 +238,38 @@ static inline void early_init_mmu_secondary(void)
extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size);
-extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size);
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
- if (early_radix_enabled())
- return radix__setup_initial_memory_limit(first_memblock_base,
- first_memblock_size);
- return hash__setup_initial_memory_limit(first_memblock_base,
- first_memblock_size);
+ /*
+ * Hash has more strict restrictions. At this point we don't
+ * know which translations we will pick. Hence go with hash
+ * restrictions.
+ */
+ if (!early_radix_enabled())
+ hash__setup_initial_memory_limit(first_memblock_base,
+ first_memblock_size);
}
#ifdef CONFIG_PPC_PSERIES
-extern void radix_init_pseries(void);
+void __init radix_init_pseries(void);
#else
-static inline void radix_init_pseries(void) { };
+static inline void radix_init_pseries(void) { }
#endif
+#ifdef CONFIG_HOTPLUG_CPU
+#define arch_clear_mm_cpumask_cpu(cpu, mm) \
+ do { \
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
+ atomic_dec(&(mm)->context.active_cpus); \
+ cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
+ } \
+ } while (0)
+
+void cleanup_cpu_mmu_context(void);
+#endif
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
{
int index = ea >> MAX_EA_BITS_PER_CONTEXT;
@@ -262,6 +289,7 @@ static inline unsigned long get_user_vsid(mm_context_t *ctx,
return get_vsid(context, ea, ssize);
}
+#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
index a41e91bd0580..dd2cff53a111 100644
--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
@@ -85,9 +85,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
}
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud)
{
- *pgd = __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
+ *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS);
}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -107,9 +107,25 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
return pud;
}
+static inline void __pud_free(pud_t *pud)
+{
+ struct page *page = virt_to_page(pud);
+
+ /*
+ * Early pud pages allocated via memblock allocator
+ * can't be directly freed to slab. KFENCE pages have
+ * both reserved and slab flags set so need to be freed
+ * kmem_cache_free.
+ */
+ if (PageReserved(page) && !PageSlab(page))
+ free_reserved_page(page);
+ else
+ kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
+}
+
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
{
- kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
+ return __pud_free(pud);
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index 4e697bc2f4cd..48f21820afe2 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -26,16 +26,6 @@ static inline int pud_huge(pud_t pud)
return 0;
}
-static inline int pgd_huge(pgd_t pgd)
-{
- /*
- * leaf pte for huge page
- */
- if (radix_enabled())
- return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
- return 0;
-}
-#define pgd_huge pgd_huge
/*
* With radix , we have hugepage ptes in the pud and pmd entries. We don't
* need to setup hugepage directory for them. Our pte and page directory format
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index 34d1018896b3..2fce3498b000 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -30,15 +30,6 @@ static inline int pud_huge(pud_t pud)
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
-static inline int pgd_huge(pgd_t pgd)
-{
- /*
- * leaf pte for huge page
- */
- return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
-}
-#define pgd_huge pgd_huge
-
/*
* With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
* need to setup hugepage directory for them. Our pte and page directory format
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 201a69e6a355..c436d8422654 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -2,17 +2,17 @@
#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
-#include <asm-generic/5level-fixup.h>
+#include <asm-generic/pgtable-nop4d.h>
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
#include <linux/bug.h>
+#include <linux/sizes.h>
#endif
/*
* Common bits between hash and Radix page table
*/
-#define _PAGE_BIT_SWAP_TYPE 0
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
@@ -32,11 +32,13 @@
#define _RPAGE_SW1 0x00800
#define _RPAGE_SW2 0x00400
#define _RPAGE_SW3 0x00200
-#define _RPAGE_RSV1 0x1000000000000000UL
-#define _RPAGE_RSV2 0x0800000000000000UL
-#define _RPAGE_RSV3 0x0400000000000000UL
-#define _RPAGE_RSV4 0x0200000000000000UL
-#define _RPAGE_RSV5 0x00040UL
+#define _RPAGE_RSV1 0x00040UL
+
+#define _RPAGE_PKEY_BIT4 0x1000000000000000UL
+#define _RPAGE_PKEY_BIT3 0x0800000000000000UL
+#define _RPAGE_PKEY_BIT2 0x0400000000000000UL
+#define _RPAGE_PKEY_BIT1 0x0200000000000000UL
+#define _RPAGE_PKEY_BIT0 0x0100000000000000UL
#define _PAGE_PTE 0x4000000000000000UL /* distinguishes PTEs from pointers */
#define _PAGE_PRESENT 0x8000000000000000UL /* pte contains a translation */
@@ -58,13 +60,12 @@
*/
#define _RPAGE_RPN0 0x01000
#define _RPAGE_RPN1 0x02000
-#define _RPAGE_RPN44 0x0100000000000000UL
#define _RPAGE_RPN43 0x0080000000000000UL
#define _RPAGE_RPN42 0x0040000000000000UL
#define _RPAGE_RPN41 0x0020000000000000UL
/* Max physical address bit as per radix table */
-#define _RPAGE_PA_MAX 57
+#define _RPAGE_PA_MAX 56
/*
* Max physical address bit we will use for now.
@@ -115,8 +116,8 @@
*/
#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_RW | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_READ)
-#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
- _PAGE_RW | _PAGE_EXEC)
+#define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_READ | _PAGE_EXEC)
+#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
/*
* _PAGE_CHG_MASK masks of bits that are to be preserved across
* pgprot changes
@@ -125,8 +126,6 @@
_PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
_PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
-#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
- H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
/*
* We define 2 sets of base prot bits, one for basic pages (ie,
* cacheable kernel and user pages) and one for non cacheable
@@ -151,33 +150,17 @@
#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+/* Radix only, Hash uses PAGE_READONLY_X + execute-only pkey instead */
+#define PAGE_EXECONLY __pgprot(_PAGE_BASE | _PAGE_EXEC)
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
-#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
- _PAGE_TOLERANT)
-#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
- _PAGE_NON_IDEMPOTENT)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_TOLERANT)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NON_IDEMPOTENT)
#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
-/*
- * Protection used for kernel text. We want the debuggers to be able to
- * set breakpoints anywhere, so don't write protect the kernel text
- * on platforms where such control is possible.
- */
-#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
- defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
-#else
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
-#endif
-
-/* Make modules code happy. We don't set RO yet */
-#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
-#define PAGE_AGP (PAGE_KERNEL_NC)
-
#ifndef __ASSEMBLY__
/*
* page table defines
@@ -231,6 +214,12 @@ extern unsigned long __pmd_frag_size_shift;
#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE)
#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
+#define MAX_PTRS_PER_PTE ((H_PTRS_PER_PTE > R_PTRS_PER_PTE) ? H_PTRS_PER_PTE : R_PTRS_PER_PTE)
+#define MAX_PTRS_PER_PMD ((H_PTRS_PER_PMD > R_PTRS_PER_PMD) ? H_PTRS_PER_PMD : R_PTRS_PER_PMD)
+#define MAX_PTRS_PER_PUD ((H_PTRS_PER_PUD > R_PTRS_PER_PUD) ? H_PTRS_PER_PUD : R_PTRS_PER_PUD)
+#define MAX_PTRS_PER_PGD (1 << (H_PGD_INDEX_SIZE > RADIX_PGD_INDEX_SIZE ? \
+ H_PGD_INDEX_SIZE : RADIX_PGD_INDEX_SIZE))
+
/* PMD_SHIFT determines what a second-level page table entry can map */
#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
#define PMD_SIZE (1UL << PMD_SHIFT)
@@ -251,7 +240,7 @@ extern unsigned long __pmd_frag_size_shift;
/* Bits to mask out from a PUD to get to the PMD page */
#define PUD_MASKED_BITS 0xc0000000000000ffUL
/* Bits to mask out from a PGD to get to the PUD page */
-#define PGD_MASKED_BITS 0xc0000000000000ffUL
+#define P4D_MASKED_BITS 0xc0000000000000ffUL
/*
* Used as an indicator for rcu callback functions
@@ -295,6 +284,13 @@ extern unsigned long pci_io_base;
#include <asm/book3s/64/hash.h>
#include <asm/book3s/64/radix.h>
+#if H_MAX_PHYSMEM_BITS > R_MAX_PHYSMEM_BITS
+#define MAX_PHYSMEM_BITS H_MAX_PHYSMEM_BITS
+#else
+#define MAX_PHYSMEM_BITS R_MAX_PHYSMEM_BITS
+#endif
+
+
#ifdef CONFIG_PPC_64K_PAGES
#include <asm/book3s/64/pgtable-64k.h>
#else
@@ -317,10 +313,8 @@ extern unsigned long pci_io_base;
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
-#define IOREMAP_END (KERN_IO_END)
-
-/* Advertise special mapping type for AGP */
-#define HAVE_PAGE_AGP
+#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
+#define FIXADDR_SIZE SZ_32M
#ifndef __ASSEMBLY__
@@ -382,11 +376,31 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
({ \
- int __r; \
- __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
- __r; \
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
})
+/*
+ * On Book3S CPUs, clearing the accessed bit without a TLB flush
+ * doesn't cause data corruption. [ It could cause incorrect
+ * page aging and the (mistaken) reclaim of hot pages, but the
+ * chance of that should be relatively low. ]
+ *
+ * So as a performance optimization don't flush the TLB when
+ * clearing the accessed bit, it will eventually be flushed by
+ * a context switch or a VM operation anyway. [ In the rare
+ * event of it not getting flushed for a long time the delay
+ * shouldn't really matter because there's no real memory
+ * pressure for swapout to react to. ]
+ *
+ * Note: this optimisation also exists in pte_needs_flush() and
+ * huge_pmd_needs_flush().
+ */
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young ptep_test_and_clear_young
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+#define pmdp_clear_flush_young pmdp_test_and_clear_young
+
static inline int __pte_write(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));
@@ -553,6 +567,12 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
}
#endif /* CONFIG_NUMA_BALANCING */
+static inline bool pte_hw_valid(pte_t pte)
+{
+ return (pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE)) ==
+ cpu_to_be64(_PAGE_PRESENT | _PAGE_PTE);
+}
+
static inline int pte_present(pte_t pte)
{
/*
@@ -561,12 +581,11 @@ static inline int pte_present(pte_t pte)
* invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID
* if we find _PAGE_PRESENT cleared.
*/
- return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID));
-}
-static inline bool pte_hw_valid(pte_t pte)
-{
- return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
+ if (pte_hw_valid(pte))
+ return true;
+ return (pte_raw(pte) & cpu_to_be64(_PAGE_INVALID | _PAGE_PTE)) ==
+ cpu_to_be64(_PAGE_INVALID | _PAGE_PTE);
}
#ifdef CONFIG_PPC_MEM_KEYS
@@ -611,7 +630,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
VM_BUG_ON(pfn >> (64 - PAGE_SHIFT));
VM_BUG_ON((pfn << PAGE_SHIFT) & ~PTE_RPN_MASK);
- return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot));
+ return __pte(((pte_basic_t)pfn << PAGE_SHIFT) | pgprot_val(pgprot) | _PAGE_PTE);
}
static inline unsigned long pte_pfn(pte_t pte)
@@ -647,11 +666,6 @@ static inline pte_t pte_mkexec(pte_t pte)
return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC));
}
-static inline pte_t pte_mkpte(pte_t pte)
-{
- return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
-}
-
static inline pte_t pte_mkwrite(pte_t pte)
{
/*
@@ -722,17 +736,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
* Don't have overlapping bits with _PAGE_HPTEFLAGS \
* We filter HPTEFLAGS on set_pte. \
*/ \
- BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
+ BUILD_BUG_ON(_PAGE_HPTEFLAGS & SWP_TYPE_MASK); \
BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY); \
+ BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_EXCLUSIVE); \
} while (0)
#define SWP_TYPE_BITS 5
-#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \
- & ((1UL << SWP_TYPE_BITS) - 1))
+#define SWP_TYPE_MASK ((1UL << SWP_TYPE_BITS) - 1)
+#define __swp_type(x) ((x).val & SWP_TYPE_MASK)
#define __swp_offset(x) (((x).val & PTE_RPN_MASK) >> PAGE_SHIFT)
#define __swp_entry(type, offset) ((swp_entry_t) { \
- ((type) << _PAGE_BIT_SWAP_TYPE) \
- | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
+ (type) | (((offset) << PAGE_SHIFT) & PTE_RPN_MASK)})
/*
* swp_entry_t must be independent of pte bits. We build a swp_entry_t from
* swap type and offset we get from swap and convert that to pte to find a
@@ -745,11 +759,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x)))
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_SOFT_DIRTY
#else
#define _PAGE_SWP_SOFT_DIRTY 0UL
#endif /* CONFIG_MEM_SOFT_DIRTY */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_NON_IDEMPOTENT
+
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
@@ -767,6 +783,22 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
}
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
+}
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SWP_EXCLUSIVE));
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_EXCLUSIVE));
+}
+
static inline bool check_pte_access(unsigned long access, unsigned long ptev)
{
/*
@@ -815,6 +847,14 @@ static inline int pte_none(pte_t pte)
static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, int percpu)
{
+
+ VM_WARN_ON(!(pte_raw(pte) & cpu_to_be64(_PAGE_PTE)));
+ /*
+ * Keep the _PAGE_PTE added till we are sure we handle _PAGE_PTE
+ * in all the callers.
+ */
+ pte = __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
+
if (radix_enabled())
return radix__set_pte_at(mm, addr, ptep, pte, percpu);
return hash__set_pte_at(mm, addr, ptep, pte, percpu);
@@ -862,6 +902,13 @@ static inline bool pte_ci(pte_t pte)
static inline void pmd_clear(pmd_t *pmdp)
{
+ if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) {
+ /*
+ * Don't use this if we can possibly have a hash page table
+ * entry mapping this.
+ */
+ WARN_ON((pmd_val(*pmdp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE));
+ }
*pmdp = __pmd(0);
}
@@ -910,6 +957,13 @@ static inline int pmd_bad(pmd_t pmd)
static inline void pud_clear(pud_t *pudp)
{
+ if (IS_ENABLED(CONFIG_DEBUG_VM) && !radix_enabled()) {
+ /*
+ * Don't use this if we can possibly have a hash page table
+ * entry mapping this.
+ */
+ WARN_ON((pud_val(*pudp) & (H_PAGE_HASHPTE | _PAGE_PTE)) == (H_PAGE_HASHPTE | _PAGE_PTE));
+ }
*pudp = __pud(0);
}
@@ -949,81 +1003,66 @@ static inline bool pud_access_permitted(pud_t pud, bool write)
return pte_access_permitted(pud_pte(pud), write);
}
-#define pgd_write(pgd) pte_write(pgd_pte(pgd))
+#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) })
+static inline __be64 p4d_raw(p4d_t x)
+{
+ return pgd_raw(x.pgd);
+}
+
+#define p4d_write(p4d) pte_write(p4d_pte(p4d))
-static inline void pgd_clear(pgd_t *pgdp)
+static inline void p4d_clear(p4d_t *p4dp)
{
- *pgdp = __pgd(0);
+ *p4dp = __p4d(0);
}
-static inline int pgd_none(pgd_t pgd)
+static inline int p4d_none(p4d_t p4d)
{
- return !pgd_raw(pgd);
+ return !p4d_raw(p4d);
}
-static inline int pgd_present(pgd_t pgd)
+static inline int p4d_present(p4d_t p4d)
{
- return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
+ return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT));
}
-static inline pte_t pgd_pte(pgd_t pgd)
+static inline pte_t p4d_pte(p4d_t p4d)
{
- return __pte_raw(pgd_raw(pgd));
+ return __pte_raw(p4d_raw(p4d));
}
-static inline pgd_t pte_pgd(pte_t pte)
+static inline p4d_t pte_p4d(pte_t pte)
{
- return __pgd_raw(pte_raw(pte));
+ return __p4d_raw(pte_raw(pte));
}
-static inline int pgd_bad(pgd_t pgd)
+static inline int p4d_bad(p4d_t p4d)
{
if (radix_enabled())
- return radix__pgd_bad(pgd);
- return hash__pgd_bad(pgd);
+ return radix__p4d_bad(p4d);
+ return hash__p4d_bad(p4d);
}
-#define pgd_access_permitted pgd_access_permitted
-static inline bool pgd_access_permitted(pgd_t pgd, bool write)
+#define p4d_access_permitted p4d_access_permitted
+static inline bool p4d_access_permitted(p4d_t p4d, bool write)
{
- return pte_access_permitted(pgd_pte(pgd), write);
+ return pte_access_permitted(p4d_pte(p4d), write);
}
-extern struct page *pgd_page(pgd_t pgd);
+extern struct page *p4d_page(p4d_t p4d);
/* Pointers in the page table tree are physical addresses */
#define __pgtable_ptr_val(ptr) __pa(ptr)
-#define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS)
-#define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS)
-#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS)
-
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
-#define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1))
-#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
-#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))
-
-/*
- * Find an entry in a page-table-directory. We combine the address region
- * (the high order N bits) and the pgd portion of the address.
- */
-
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-#define pud_offset(pgdp, addr) \
- (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr))
-#define pmd_offset(pudp,addr) \
- (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
-#define pte_offset_kernel(dir,addr) \
- (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-
-static inline void pte_unmap(pte_t *pte) { }
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ return (pud_t *)__va(p4d_val(p4d) & ~P4D_MASKED_BITS);
+}
-/* to find an entry in a kernel page-table-directory */
-/* This now only contains the vmalloc pages */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)__va(pud_val(pud) & ~PUD_MASKED_BITS);
+}
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -1046,6 +1085,8 @@ static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t p
return hash__map_kernel_page(ea, pa, prot);
}
+void unmap_kernel_page(unsigned long va);
+
static inline int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys)
@@ -1065,6 +1106,16 @@ static inline void vmemmap_remove_mapping(unsigned long start,
}
#endif
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+static inline void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (radix_enabled())
+ radix__kernel_map_pages(page, numpages, enable);
+ else
+ hash__kernel_map_pages(page, numpages, enable);
+}
+#endif
+
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte_raw(pmd_raw(pmd));
@@ -1139,8 +1190,11 @@ extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
-extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd);
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmd)
+{
+}
+
extern int hash__has_transparent_hugepage(void);
static inline int has_transparent_hugepage(void)
{
@@ -1168,10 +1222,6 @@ static inline int pmd_large(pmd_t pmd)
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
}
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
- return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
-}
/*
* For radix we should always find H_PAGE_HASHPTE zero. Hence
* the below will work for radix too
@@ -1206,7 +1256,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
* should return true.
* We should not call this on a hugetlb entry. We should check for HugeTLB
* entry using vma->vm_flags
- * The page table walk rule is explained in Documentation/vm/transhuge.rst
+ * The page table walk rule is explained in Documentation/mm/transhuge.rst
*/
static inline int pmd_trans_huge(pmd_t pmd)
{
@@ -1226,13 +1276,28 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
return hash__pmd_same(pmd_a, pmd_b);
}
-static inline pmd_t pmd_mkhuge(pmd_t pmd)
+static inline pmd_t __pmd_mkhuge(pmd_t pmd)
{
if (radix_enabled())
return radix__pmd_mkhuge(pmd);
return hash__pmd_mkhuge(pmd);
}
+/*
+ * pfn_pmd return a pmd_t that can be used as pmd pte entry.
+ */
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+#ifdef CONFIG_DEBUG_VM
+ if (radix_enabled())
+ WARN_ON((pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)) == 0);
+ else
+ WARN_ON((pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE | H_PAGE_THP_HUGE)) !=
+ cpu_to_be64(_PAGE_PTE | H_PAGE_THP_HUGE));
+#endif
+ return pmd;
+}
+
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
@@ -1260,6 +1325,11 @@ static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
}
#define pmdp_collapse_flush pmdp_collapse_flush
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
+pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
+ unsigned long addr,
+ pmd_t *pmdp, int full);
+
#define __HAVE_ARCH_PGTABLE_DEPOSIT
static inline void pgtable_trans_huge_deposit(struct mm_struct *mm,
pmd_t *pmdp, pgtable_t pgtable)
@@ -1303,7 +1373,9 @@ extern void serialize_against_pte_lookup(struct mm_struct *mm);
static inline pmd_t pmd_mkdevmap(pmd_t pmd)
{
- return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
+ if (radix_enabled())
+ return radix__pmd_mkdevmap(pmd);
+ return hash__pmd_mkdevmap(pmd);
}
static inline int pmd_devmap(pmd_t pmd)
@@ -1368,12 +1440,5 @@ static inline bool pud_is_leaf(pud_t pud)
return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE));
}
-#define pgd_is_leaf pgd_is_leaf
-#define pgd_leaf pgd_is_leaf
-static inline bool pgd_is_leaf(pgd_t pgd)
-{
- return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PTE));
-}
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/pkeys.h b/arch/powerpc/include/asm/book3s/64/pkeys.h
new file mode 100644
index 000000000000..5b178139f3c0
--- /dev/null
+++ b/arch/powerpc/include/asm/book3s/64/pkeys.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _ASM_POWERPC_BOOK3S_64_PKEYS_H
+#define _ASM_POWERPC_BOOK3S_64_PKEYS_H
+
+#include <asm/book3s/64/hash-pkey.h>
+
+static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags)
+{
+ if (!mmu_has_feature(MMU_FTR_PKEY))
+ return 0x0UL;
+
+ if (radix_enabled())
+ BUG();
+ return hash__vmflag_to_pte_pkey_bits(vm_flags);
+}
+
+static inline u16 pte_to_pkey_bits(u64 pteflags)
+{
+ if (radix_enabled())
+ BUG();
+ return hash__pte_to_pkey_bits(pteflags);
+}
+
+#endif /*_ASM_POWERPC_KEYS_H */
diff --git a/arch/powerpc/include/asm/book3s/64/radix-4k.h b/arch/powerpc/include/asm/book3s/64/radix-4k.h
index d5f5ab73dc7f..035ceecd6d67 100644
--- a/arch/powerpc/include/asm/book3s/64/radix-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/radix-4k.h
@@ -11,7 +11,7 @@
#define RADIX_PGD_INDEX_SIZE 13 // size: 8B << 13 = 64KB, maps 2^13 x 512GB = 4PB
/*
- * One fragment per per page
+ * One fragment per page
*/
#define RADIX_PTE_FRAG_SIZE_SHIFT (RADIX_PTE_INDEX_SIZE + 3)
#define RADIX_PTE_FRAG_NR (PAGE_SIZE >> RADIX_PTE_FRAG_SIZE_SHIFT)
diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
index d97db3ad9aae..686001eda936 100644
--- a/arch/powerpc/include/asm/book3s/64/radix.h
+++ b/arch/powerpc/include/asm/book3s/64/radix.h
@@ -30,11 +30,16 @@
/* Don't have anything in the reserved bits and leaf bits */
#define RADIX_PMD_BAD_BITS 0x60000000000000e0UL
#define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
-#define RADIX_PGD_BAD_BITS 0x60000000000000e0UL
+#define RADIX_P4D_BAD_BITS 0x60000000000000e0UL
#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
#define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
+
+#define R_PTRS_PER_PTE (1 << RADIX_PTE_INDEX_SIZE)
+#define R_PTRS_PER_PMD (1 << RADIX_PMD_INDEX_SIZE)
+#define R_PTRS_PER_PUD (1 << RADIX_PUD_INDEX_SIZE)
+
/*
* Size of EA range mapped by our pagetables.
*/
@@ -68,11 +73,11 @@
*
*
* 3rd quadrant expanded:
- * +------------------------------+
+ * +------------------------------+ Highest address (0xc010000000000000)
+ * +------------------------------+ KASAN shadow end (0xc00fc00000000000)
* | |
* | |
- * | |
- * +------------------------------+ Kernel vmemmap end (0xc010000000000000)
+ * +------------------------------+ Kernel vmemmap end/shadow start (0xc00e000000000000)
* | |
* | 512TB |
* | |
@@ -91,6 +96,23 @@
* +------------------------------+ Kernel linear (0xc.....)
*/
+/* For the sizes of the shadow area, see kasan.h */
+
+/*
+ * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
+ * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
+ * page_to_nid does a page->section->node lookup
+ * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
+ * memory requirements with large number of sections.
+ * 51 bits is the max physical real address on POWER9
+ */
+
+#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME)
+#define R_MAX_PHYSMEM_BITS 51
+#else
+#define R_MAX_PHYSMEM_BITS 46
+#endif
+
#define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
/*
* 49 = MAX_EA_BITS_PER_CONTEXT (hash specific). To make sure we pick
@@ -206,8 +228,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
* from ptesync, it should probably go into update_mmu_cache, rather
* than set_pte_at (which is used to set ptes unrelated to faults).
*
- * Spurious faults to vmalloc region are not tolerated, so there is
- * a ptesync in flush_cache_vmap.
+ * Spurious faults from the kernel memory are not tolerated, so there
+ * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
+ * the pte update sequence from ISA Book III 6.10 Translation Table
+ * Update Synchronization Requirements.
*/
}
@@ -227,9 +251,9 @@ static inline int radix__pud_bad(pud_t pud)
}
-static inline int radix__pgd_bad(pgd_t pgd)
+static inline int radix__p4d_bad(p4d_t p4d)
{
- return !!(pgd_val(pgd) & RADIX_PGD_BAD_BITS);
+ return !!(p4d_val(p4d) & RADIX_P4D_BAD_BITS);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -263,6 +287,11 @@ static inline int radix__has_transparent_hugepage(void)
}
#endif
+static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
+}
+
extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);
@@ -289,8 +318,12 @@ static inline unsigned long radix__get_tree_size(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int radix__create_section_mapping(unsigned long start, unsigned long end, int nid);
+int radix__create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot);
int radix__remove_section_mapping(unsigned long start, unsigned long end);
#endif /* CONFIG_MEMORY_HOTPLUG */
+
+void radix__kernel_map_pages(struct page *page, int numpages, int enable);
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/slice.h b/arch/powerpc/include/asm/book3s/64/slice.h
index f0d3194ba41b..5fbe18544cbd 100644
--- a/arch/powerpc/include/asm/book3s/64/slice.h
+++ b/arch/powerpc/include/asm/book3s/64/slice.h
@@ -2,6 +2,16 @@
#ifndef _ASM_POWERPC_BOOK3S_64_SLICE_H
#define _ASM_POWERPC_BOOK3S_64_SLICE_H
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+#ifdef CONFIG_HUGETLB_PAGE
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#endif
+
#define SLICE_LOW_SHIFT 28
#define SLICE_LOW_TOP (0x100000000ul)
#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
@@ -13,4 +23,20 @@
#define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW_USER64
+struct mm_struct;
+
+unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
+ unsigned long flags, unsigned int psize,
+ int topdown);
+
+unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
+
+void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long len, unsigned int psize);
+
+void slice_init_new_context_exec(struct mm_struct *mm);
+void slice_setup_new_exec(void);
+
+#endif /* __ASSEMBLY__ */
+
#endif /* _ASM_POWERPC_BOOK3S_64_SLICE_H */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 64d02a704bcb..751921f6db46 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -32,6 +32,11 @@ static inline void arch_enter_lazy_mmu_mode(void)
if (radix_enabled())
return;
+ /*
+ * apply_to_page_range can call us this preempt enabled when
+ * operating on kernel page tables.
+ */
+ preempt_disable();
batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
@@ -47,6 +52,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
if (batch->index)
__flush_tlb_pending(batch);
batch->active = 0;
+ preempt_enable();
}
#define arch_flush_lazy_mmu_mode() do {} while (0)
@@ -112,9 +118,12 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start,
struct mmu_gather;
extern void hash__tlb_flush(struct mmu_gather *tlb);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
/* Private function for use by PCI IO mapping code */
-extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
- unsigned long end);
-extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
- unsigned long addr);
+extern void __flush_hash_table_range(unsigned long start, unsigned long end);
+void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
+#else
+static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
+#endif
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index ca8db193ae38..77797a2a82eb 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -2,10 +2,29 @@
#ifndef _ASM_POWERPC_TLBFLUSH_RADIX_H
#define _ASM_POWERPC_TLBFLUSH_RADIX_H
+#include <asm/hvcall.h>
+
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
struct vm_area_struct;
struct mm_struct;
struct mmu_gather;
+static inline u64 psize_to_rpti_pgsize(unsigned long psize)
+{
+ if (psize == MMU_PAGE_4K)
+ return H_RPTI_PAGE_4K;
+ if (psize == MMU_PAGE_64K)
+ return H_RPTI_PAGE_64K;
+ if (psize == MMU_PAGE_2M)
+ return H_RPTI_PAGE_2M;
+ if (psize == MMU_PAGE_1G)
+ return H_RPTI_PAGE_1G;
+ return H_RPTI_PAGE_ALL;
+}
+
static inline int mmu_get_ap(int psize)
{
return mmu_psize_defs[psize].ap;
@@ -20,7 +39,7 @@ extern void radix__flush_pwc_lpid(unsigned int lpid);
extern void radix__flush_all_lpid(unsigned int lpid);
extern void radix__flush_all_lpid_guest(unsigned int lpid);
#else
-static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); };
+static inline void radix__tlbiel_all(unsigned int action) { WARN_ON(1); }
static inline void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr,
unsigned long page_size)
@@ -45,6 +64,8 @@ extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long end, int psize);
+void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long end, int psize);
extern void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index dcb5c3839d2f..67655cd60545 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -14,7 +14,6 @@ enum {
TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
};
-#ifdef CONFIG_PPC_NATIVE
static inline void tlbiel_all(void)
{
/*
@@ -30,9 +29,6 @@ static inline void tlbiel_all(void)
else
hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
}
-#else
-static inline void tlbiel_all(void) { BUG(); };
-#endif
static inline void tlbiel_all_lpid(bool radix)
{
@@ -142,10 +138,86 @@ static inline void flush_all_mm(struct mm_struct *mm)
static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
unsigned long address)
{
- /* See ptep_set_access_flags comment */
- if (atomic_read(&vma->vm_mm->context.copros) > 0)
- flush_tlb_page(vma, address);
+ /*
+ * Book3S 64 does not require spurious fault flushes because the PTE
+ * must be re-fetched in case of an access permission problem. So the
+ * only reason for a spurious fault should be concurrent modification
+ * to the PTE, in which case the PTE will eventually be re-fetched by
+ * the MMU when it attempts the access again.
+ *
+ * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
+ * Entry, Setting a Reference or Change Bit or Upgrading Access
+ * Authority (PTE Subject to Atomic Hardware Updates):
+ *
+ * "If the only change being made to a valid PTE that is subject to
+ * atomic hardware updates is to set the Reference or Change bit to
+ * 1 or to upgrade access authority, a simpler sequence suffices
+ * because the translation hardware will refetch the PTE if an
+ * access is attempted for which the only problems were reference
+ * and/or change bits needing to be set or insufficient access
+ * authority."
+ *
+ * The nest MMU in POWER9 does not perform this PTE re-fetch, but
+ * it avoids the spurious fault problem by flushing the TLB before
+ * upgrading PTE permissions, see radix__ptep_set_access_flags.
+ */
+}
+
+static inline bool __pte_flags_need_flush(unsigned long oldval,
+ unsigned long newval)
+{
+ unsigned long delta = oldval ^ newval;
+
+ /*
+ * The return value of this function doesn't matter for hash,
+ * ptep_modify_prot_start() does a pte_update() which does or schedules
+ * any necessary hash table update and flush.
+ */
+ if (!radix_enabled())
+ return true;
+
+ /*
+ * We do not expect kernel mappings or non-PTEs or not-present PTEs.
+ */
+ VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
+ VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
+ VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
+ VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
+ VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
+ VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
+
+ /*
+ * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
+ *
+ * In theory, some changed software bits could be tolerated, in
+ * practice those should rarely if ever matter.
+ */
+
+ if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
+ return true;
+
+ /*
+ * If any of the above was present in old but cleared in new, flush.
+ * With the exception of _PAGE_ACCESSED, don't worry about flushing
+ * if that was cleared (see the comment in ptep_clear_flush_young()).
+ */
+ if ((delta & ~_PAGE_ACCESSED) & oldval)
+ return true;
+
+ return false;
+}
+
+static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
+{
+ return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
+}
+#define pte_needs_flush pte_needs_flush
+
+static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
+{
+ return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
}
+#define huge_pmd_needs_flush huge_pmd_needs_flush
extern bool tlbie_capable;
extern bool tlbie_enabled;
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index 0e1263455d73..d18b748ea3ae 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -8,7 +8,6 @@
#include <asm/book3s/32/pgtable.h>
#endif
-#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
/* Insert a PTE, top-level function is out of line. It uses an inline
* low level function in the respective pgtable-* files
@@ -26,6 +25,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT
+void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
@@ -35,7 +36,14 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ if (IS_ENABLED(CONFIG_PPC32) && !mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ return;
+ if (radix_enabled())
+ return;
+ __update_mmu_cache(vma, address, ptep);
+}
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/bpf_perf_event.h b/arch/powerpc/include/asm/bpf_perf_event.h
new file mode 100644
index 000000000000..e8a7b4ffb58c
--- /dev/null
+++ b/arch/powerpc/include/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_BPF_PERF_EVENT_H
+#define _ASM_POWERPC_BPF_PERF_EVENT_H
+
+#include <asm/ptrace.h>
+
+typedef struct user_pt_regs bpf_user_pt_regs_t;
+
+#endif /* _ASM_POWERPC_BPF_PERF_EVENT_H */
diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h
index 461b0f193864..860f8868f11e 100644
--- a/arch/powerpc/include/asm/btext.h
+++ b/arch/powerpc/include/asm/btext.h
@@ -23,12 +23,12 @@ extern void btext_unmap(void);
extern void btext_drawchar(char c);
extern void btext_drawstring(const char *str);
-extern void btext_drawhex(unsigned long v);
-extern void btext_drawtext(const char *c, unsigned int len);
+void __init btext_drawhex(unsigned long v);
+void __init btext_drawtext(const char *c, unsigned int len);
-extern void btext_clearscreen(void);
-extern void btext_flushscreen(void);
-extern void btext_flushline(void);
+void __init btext_clearscreen(void);
+void __init btext_flushscreen(void);
+void __init btext_flushline(void);
#endif /* __KERNEL__ */
#endif /* __PPC_BTEXT_H */
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index 338f36cd9934..61a4736355c2 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -4,15 +4,17 @@
#ifdef __KERNEL__
#include <asm/asm-compat.h>
+#include <asm/extable.h>
#ifdef CONFIG_BUG
#ifdef __ASSEMBLY__
#include <asm/asm-offsets.h>
#ifdef CONFIG_DEBUG_BUGVERBOSE
-.macro EMIT_BUG_ENTRY addr,file,line,flags
+.macro __EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"aw"
-5001: PPC_LONG \addr, 5002f
+5001: .4byte \addr - .
+ .4byte 5002f - .
.short \line, \flags
.org 5001b+BUG_ENTRY_SIZE
.previous
@@ -21,30 +23,43 @@
.previous
.endm
#else
-.macro EMIT_BUG_ENTRY addr,file,line,flags
+.macro __EMIT_BUG_ENTRY addr,file,line,flags
.section __bug_table,"aw"
-5001: PPC_LONG \addr
+5001: .4byte \addr - .
.short \flags
.org 5001b+BUG_ENTRY_SIZE
.previous
.endm
#endif /* verbose */
+.macro EMIT_WARN_ENTRY addr,file,line,flags
+ EX_TABLE(\addr,\addr+4)
+ __EMIT_BUG_ENTRY \addr,\file,\line,\flags
+.endm
+
+.macro EMIT_BUG_ENTRY addr,file,line,flags
+ .if \flags & 1 /* BUGFLAG_WARNING */
+ .err /* Use EMIT_WARN_ENTRY for warnings */
+ .endif
+ __EMIT_BUG_ENTRY \addr,\file,\line,\flags
+.endm
+
#else /* !__ASSEMBLY__ */
/* _EMIT_BUG_ENTRY expects args %0,%1,%2,%3 to be FILE, LINE, flags and
sizeof(struct bug_entry), respectively */
#ifdef CONFIG_DEBUG_BUGVERBOSE
#define _EMIT_BUG_ENTRY \
".section __bug_table,\"aw\"\n" \
- "2:\t" PPC_LONG "1b, %0\n" \
- "\t.short %1, %2\n" \
+ "2: .4byte 1b - .\n" \
+ " .4byte %0 - .\n" \
+ " .short %1, %2\n" \
".org 2b+%3\n" \
".previous\n"
#else
#define _EMIT_BUG_ENTRY \
".section __bug_table,\"aw\"\n" \
- "2:\t" PPC_LONG "1b\n" \
- "\t.short %2\n" \
+ "2: .4byte 1b - .\n" \
+ " .short %2\n" \
".org 2b+%3\n" \
".previous\n"
#endif
@@ -58,6 +73,16 @@
"i" (sizeof(struct bug_entry)), \
##__VA_ARGS__)
+#define WARN_ENTRY(insn, flags, label, ...) \
+ asm_volatile_goto( \
+ "1: " insn "\n" \
+ EX_TABLE(1b, %l[label]) \
+ _EMIT_BUG_ENTRY \
+ : : "i" (__FILE__), "i" (__LINE__), \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry)), \
+ ##__VA_ARGS__ : : label)
+
/*
* BUG_ON() and WARN_ON() do their best to cooperate with compile-time
* optimisations. However depending on the complexity of the condition
@@ -68,7 +93,19 @@
BUG_ENTRY("twi 31, 0, 0", 0); \
unreachable(); \
} while (0)
+#define HAVE_ARCH_BUG
+
+#define __WARN_FLAGS(flags) do { \
+ __label__ __label_warn_on; \
+ \
+ WARN_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags), __label_warn_on); \
+ unreachable(); \
+ \
+__label_warn_on: \
+ break; \
+} while (0)
+#ifdef CONFIG_PPC64
#define BUG_ON(x) do { \
if (__builtin_constant_p(x)) { \
if (x) \
@@ -78,31 +115,43 @@
} \
} while (0)
-#define __WARN_FLAGS(flags) BUG_ENTRY("twi 31, 0, 0", BUGFLAG_WARNING | (flags))
-
#define WARN_ON(x) ({ \
- int __ret_warn_on = !!(x); \
- if (__builtin_constant_p(__ret_warn_on)) { \
- if (__ret_warn_on) \
+ bool __ret_warn_on = false; \
+ do { \
+ if (__builtin_constant_p((x))) { \
+ if (!(x)) \
+ break; \
__WARN(); \
- } else { \
- BUG_ENTRY(PPC_TLNEI " %4, 0", \
- BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \
- "r" (__ret_warn_on)); \
- } \
+ __ret_warn_on = true; \
+ } else { \
+ __label__ __label_warn_on; \
+ \
+ WARN_ENTRY(PPC_TLNEI " %4, 0", \
+ BUGFLAG_WARNING | BUGFLAG_TAINT(TAINT_WARN), \
+ __label_warn_on, \
+ "r" ((__force long)(x))); \
+ break; \
+__label_warn_on: \
+ __ret_warn_on = true; \
+ } \
+ } while (0); \
unlikely(__ret_warn_on); \
})
-#define HAVE_ARCH_BUG
#define HAVE_ARCH_BUG_ON
#define HAVE_ARCH_WARN_ON
+#endif
+
#endif /* __ASSEMBLY __ */
#else
#ifdef __ASSEMBLY__
.macro EMIT_BUG_ENTRY addr,file,line,flags
.endm
+.macro EMIT_WARN_ENTRY addr,file,line,flags
+.endm
#else /* !__ASSEMBLY__ */
#define _EMIT_BUG_ENTRY
+#define _EMIT_WARN_ENTRY
#endif
#endif /* CONFIG_BUG */
@@ -111,11 +160,12 @@
#ifndef __ASSEMBLY__
struct pt_regs;
-extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
-extern void bad_page_fault(struct pt_regs *, unsigned long, int);
+void hash__do_page_fault(struct pt_regs *);
+void bad_page_fault(struct pt_regs *, int);
extern void _exception(int, struct pt_regs *, int, unsigned long);
extern void _exception_pkey(struct pt_regs *, unsigned long, int);
extern void die(const char *, struct pt_regs *, long);
+void die_mce(const char *str, struct pt_regs *regs, long err);
extern bool die_will_crash(void);
extern void panic_flush_kmsg_start(void);
extern void panic_flush_kmsg_end(void);
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 72b81015cebe..ae0a68a838e8 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -6,7 +6,7 @@
/* bytes per L1 cache line */
-#if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX)
+#if defined(CONFIG_PPC_8xx)
#define L1_CACHE_SHIFT 4
#define MAX_COPY_PREFETCH 1
#define IFETCH_ALIGN_SHIFT 2
@@ -97,7 +97,7 @@ static inline u32 l1_icache_bytes(void)
#endif
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(".data..read_mostly")
#ifdef CONFIG_PPC_BOOK3S_32
extern long _get_L2CR(void);
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index 4a1c9f0200e1..7564dd4fd12b 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -4,22 +4,15 @@
#ifndef _ASM_POWERPC_CACHEFLUSH_H
#define _ASM_POWERPC_CACHEFLUSH_H
-#ifdef __KERNEL__
-
#include <linux/mm.h>
#include <asm/cputable.h>
+#include <asm/cpu_has_feature.h>
/*
- * No cache flushing is required when address mappings are changed,
- * because the caches on PowerPCs are physically addressed.
+ * This flag is used to indicate that the page pointed to by a pte is clean
+ * and does not require cleaning before returning it to the user.
*/
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_dup_mm(mm) do { } while (0)
-#define flush_cache_range(vma, start, end) do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_icache_page(vma, page) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
+#define PG_dcache_clean PG_arch_1
#ifdef CONFIG_PPC_BOOK3S_64
/*
@@ -33,21 +26,32 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
asm volatile("ptesync" ::: "memory");
}
-#else
-static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
-#endif
+#define flush_cache_vmap flush_cache_vmap
+#endif /* CONFIG_PPC_BOOK3S_64 */
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *page);
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+/*
+ * This is called when a page has been modified by the kernel.
+ * It just marks the page as not i-cache clean. We do the i-cache
+ * flush later when the page is given to a user process, if necessary.
+ */
+static inline void flush_dcache_page(struct page *page)
+{
+ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+ return;
+ /* avoid an atomic op if possible */
+ if (test_bit(PG_dcache_clean, &page->flags))
+ clear_bit(PG_dcache_clean, &page->flags);
+}
void flush_icache_range(unsigned long start, unsigned long stop);
-extern void flush_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long addr,
- int len);
-extern void flush_dcache_icache_page(struct page *page);
-void __flush_dcache_icache(void *page);
+#define flush_icache_range flush_icache_range
+
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len);
+#define flush_icache_user_page flush_icache_user_page
+
+void flush_dcache_icache_page(struct page *page);
/**
* flush_dcache_range(): Write any modified data cache blocks out to memory and
@@ -65,17 +69,13 @@ static inline void flush_dcache_range(unsigned long start, unsigned long stop)
unsigned long size = stop - (unsigned long)addr + (bytes - 1);
unsigned long i;
- if (IS_ENABLED(CONFIG_PPC64)) {
+ if (IS_ENABLED(CONFIG_PPC64))
mb(); /* sync */
- isync();
- }
for (i = 0; i < size >> shift; i++, addr += bytes)
dcbf(addr);
mb(); /* sync */
- if (IS_ENABLED(CONFIG_PPC64))
- isync();
}
/*
@@ -115,14 +115,16 @@ static inline void invalidate_dcache_range(unsigned long start,
mb(); /* sync */
}
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
- } while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
+#ifdef CONFIG_4xx
+static inline void flush_instruction_cache(void)
+{
+ iccci((void *)KERNELBASE);
+ isync();
+}
+#else
+void flush_instruction_cache(void);
+#endif
-#endif /* __KERNEL__ */
+#include <asm-generic/cacheflush.h>
#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
index 9cce06194dcc..4b573a3b7e17 100644
--- a/arch/powerpc/include/asm/checksum.h
+++ b/arch/powerpc/include/asm/checksum.h
@@ -18,19 +18,18 @@
* Like csum_partial, this must be called with even lengths,
* except for the last fragment.
*/
-extern __wsum csum_partial_copy_generic(const void *src, void *dst,
- int len, __wsum sum,
- int *src_err, int *dst_err);
+extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr);
+ int len);
#define HAVE_CSUM_COPY_USER
extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
- int len, __wsum sum, int *err_ptr);
+ int len);
-#define csum_partial_copy_nocheck(src, dst, len, sum) \
- csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
+#define _HAVE_ARCH_CSUM_AND_COPY
+#define csum_partial_copy_nocheck(src, dst, len) \
+ csum_partial_copy_generic((src), (dst), (len))
/*
@@ -39,14 +38,15 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
*/
static inline __sum16 csum_fold(__wsum sum)
{
- unsigned int tmp;
-
- /* swap the two 16-bit halves of sum */
- __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
- /* if there is a carry from adding the two 16-bit halves,
- it will carry from the lower half into the upper half,
- giving us the correct sum in the upper half. */
- return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
+ u32 tmp = (__force u32)sum;
+
+ /*
+ * swap the two 16-bit halves of sum
+ * if there is a carry from adding the two 16-bit halves,
+ * it will carry from the lower half into the upper half,
+ * giving us the correct sum in the upper half.
+ */
+ return (__force __sum16)(~(tmp + rol32(tmp, 16)) >> 16);
}
static inline u32 from64to32(u64 x)
@@ -92,20 +92,19 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
}
#define HAVE_ARCH_CSUM_ADD
-static inline __wsum csum_add(__wsum csum, __wsum addend)
+static __always_inline __wsum csum_add(__wsum csum, __wsum addend)
{
#ifdef __powerpc64__
u64 res = (__force u64)csum;
-#endif
+
+ res += (__force u64)addend;
+ return (__force __wsum)((u32)res + (res >> 32));
+#else
if (__builtin_constant_p(csum) && csum == 0)
return addend;
if (__builtin_constant_p(addend) && addend == 0)
return csum;
-#ifdef __powerpc64__
- res += (__force u64)addend;
- return (__force __wsum)((u32)res + (res >> 32));
-#else
asm("addc %0,%0,%1;"
"addze %0,%0;"
: "+r" (csum) : "r" (addend) : "xer");
@@ -113,6 +112,13 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
#endif
}
+#define HAVE_ARCH_CSUM_SHIFT
+static __always_inline __wsum csum_shift(__wsum sum, int offset)
+{
+ /* rotate sum to align it with a 16b boundary */
+ return (__force __wsum)rol32((__force u32)sum, (offset & 1) << 3);
+}
+
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries. ihl is the number
@@ -164,7 +170,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
*/
__wsum __csum_partial(const void *buff, int len, __wsum sum);
-static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
+static __always_inline __wsum csum_partial(const void *buff, int len, __wsum sum)
{
if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
if (len == 2)
diff --git a/arch/powerpc/include/asm/clocksource.h b/arch/powerpc/include/asm/clocksource.h
new file mode 100644
index 000000000000..0a26ef13a34a
--- /dev/null
+++ b/arch/powerpc/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_CLOCKSOURCE_H
+#define _ASM_POWERPC_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif /* _ASM_POWERPC_CLOCKSOURCE_H */
diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h
index 27183871eb3b..05f246c0e36e 100644
--- a/arch/powerpc/include/asm/cmpxchg.h
+++ b/arch/powerpc/include/asm/cmpxchg.h
@@ -6,7 +6,6 @@
#include <linux/compiler.h>
#include <asm/synch.h>
#include <linux/bug.h>
-#include <asm/asm-405.h>
#ifdef __BIG_ENDIAN
#define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
@@ -29,7 +28,6 @@ static inline u32 __xchg_##type##sfx(volatile void *p, u32 val) \
"1: lwarx %0,0,%3\n" \
" andc %1,%0,%5\n" \
" or %1,%1,%4\n" \
- PPC405_ERR77(0,%3) \
" stwcx. %1,0,%3\n" \
" bne- 1b\n" \
: "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p) \
@@ -60,7 +58,6 @@ u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new) \
" bne- 2f\n" \
" andc %1,%0,%6\n" \
" or %1,%1,%5\n" \
- PPC405_ERR77(0,%3) \
" stwcx. %1,0,%3\n" \
" bne- 1b\n" \
br2 \
@@ -92,7 +89,6 @@ __xchg_u32_local(volatile void *p, unsigned long val)
__asm__ __volatile__(
"1: lwarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
" stwcx. %3,0,%2 \n\
bne- 1b"
: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
@@ -109,7 +105,6 @@ __xchg_u32_relaxed(u32 *p, unsigned long val)
__asm__ __volatile__(
"1: lwarx %0,0,%2\n"
- PPC405_ERR77(0, %2)
" stwcx. %3,0,%2\n"
" bne- 1b"
: "=&r" (prev), "+m" (*p)
@@ -127,7 +122,6 @@ __xchg_u64_local(volatile void *p, unsigned long val)
__asm__ __volatile__(
"1: ldarx %0,0,%2 \n"
- PPC405_ERR77(0,%2)
" stdcx. %3,0,%2 \n\
bne- 1b"
: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
@@ -144,7 +138,6 @@ __xchg_u64_relaxed(u64 *p, unsigned long val)
__asm__ __volatile__(
"1: ldarx %0,0,%2\n"
- PPC405_ERR77(0, %2)
" stdcx. %3,0,%2\n"
" bne- 1b"
: "=&r" (prev), "+m" (*p)
@@ -192,14 +185,14 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
return x;
}
-#define xchg_local(ptr,x) \
+#define arch_xchg_local(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_local((ptr), \
(unsigned long)_x_, sizeof(*(ptr))); \
})
-#define xchg_relaxed(ptr, x) \
+#define arch_xchg_relaxed(ptr, x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@@ -229,7 +222,6 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
- PPC405_ERR77(0,%2)
" stwcx. %4,0,%2\n\
bne- 1b"
PPC_ATOMIC_EXIT_BARRIER
@@ -252,7 +244,6 @@ __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
"1: lwarx %0,0,%2 # __cmpxchg_u32\n\
cmpw 0,%0,%3\n\
bne- 2f\n"
- PPC405_ERR77(0,%2)
" stwcx. %4,0,%2\n\
bne- 1b"
"\n\
@@ -273,7 +264,6 @@ __cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
"1: lwarx %0,0,%2 # __cmpxchg_u32_relaxed\n"
" cmpw 0,%0,%3\n"
" bne- 2f\n"
- PPC405_ERR77(0, %2)
" stwcx. %4,0,%2\n"
" bne- 1b\n"
"2:"
@@ -301,7 +291,6 @@ __cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
"1: lwarx %0,0,%2 # __cmpxchg_u32_acquire\n"
" cmpw 0,%0,%3\n"
" bne- 2f\n"
- PPC405_ERR77(0, %2)
" stwcx. %4,0,%2\n"
" bne- 1b\n"
PPC_ACQUIRE_BARRIER
@@ -478,7 +467,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
return old;
}
-#define cmpxchg(ptr, o, n) \
+#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -487,7 +476,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
})
-#define cmpxchg_local(ptr, o, n) \
+#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -495,7 +484,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
(unsigned long)_n_, sizeof(*(ptr))); \
})
-#define cmpxchg_relaxed(ptr, o, n) \
+#define arch_cmpxchg_relaxed(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -504,7 +493,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
-#define cmpxchg_acquire(ptr, o, n) \
+#define arch_cmpxchg_acquire(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
@@ -513,29 +502,29 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \
})
#ifdef CONFIG_PPC64
-#define cmpxchg64(ptr, o, n) \
+#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
+ arch_cmpxchg((ptr), (o), (n)); \
})
-#define cmpxchg64_local(ptr, o, n) \
+#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
+ arch_cmpxchg_local((ptr), (o), (n)); \
})
-#define cmpxchg64_relaxed(ptr, o, n) \
+#define arch_cmpxchg64_relaxed(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_relaxed((ptr), (o), (n)); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
})
-#define cmpxchg64_acquire(ptr, o, n) \
+#define arch_cmpxchg64_acquire(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_acquire((ptr), (o), (n)); \
+ arch_cmpxchg_acquire((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
-#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
+#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 898b54262881..1c6316ec4b74 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -11,6 +11,7 @@
#include <linux/string.h>
#include <linux/kallsyms.h>
#include <asm/asm-compat.h>
+#include <asm/inst.h>
/* Flags for create_branch:
* "b" == create_branch(addr, target, 0);
@@ -21,34 +22,80 @@
#define BRANCH_SET_LINK 0x1
#define BRANCH_ABSOLUTE 0x2
-bool is_offset_in_branch_range(long offset);
-unsigned int create_branch(const unsigned int *addr,
- unsigned long target, int flags);
-unsigned int create_cond_branch(const unsigned int *addr,
- unsigned long target, int flags);
-int patch_branch(unsigned int *addr, unsigned long target, int flags);
-int patch_instruction(unsigned int *addr, unsigned int instr);
-int raw_patch_instruction(unsigned int *addr, unsigned int instr);
+DECLARE_STATIC_KEY_FALSE(init_mem_is_free);
+
+/*
+ * Powerpc branch instruction is :
+ *
+ * 0 6 30 31
+ * +---------+----------------+---+---+
+ * | opcode | LI |AA |LK |
+ * +---------+----------------+---+---+
+ * Where AA = 0 and LK = 0
+ *
+ * LI is a signed 24 bits integer. The real branch offset is computed
+ * by: imm32 = SignExtend(LI:'0b00', 32);
+ *
+ * So the maximum forward branch should be:
+ * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
+ * The maximum backward branch should be:
+ * (0xff800000 << 2) = 0xfe000000 = -0x2000000
+ */
+static inline bool is_offset_in_branch_range(long offset)
+{
+ return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
+}
+
+static inline bool is_offset_in_cond_branch_range(long offset)
+{
+ return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3);
+}
+
+static inline int create_branch(ppc_inst_t *instr, const u32 *addr,
+ unsigned long target, int flags)
+{
+ long offset;
+
+ *instr = ppc_inst(0);
+ offset = target;
+ if (! (flags & BRANCH_ABSOLUTE))
+ offset = offset - (unsigned long)addr;
+
+ /* Check we can represent the target in the instruction format */
+ if (!is_offset_in_branch_range(offset))
+ return 1;
+
+ /* Mask out the flags and target, so they don't step on each other. */
+ *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC));
+
+ return 0;
+}
+
+int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
+ unsigned long target, int flags);
+int patch_branch(u32 *addr, unsigned long target, int flags);
+int patch_instruction(u32 *addr, ppc_inst_t instr);
+int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
static inline unsigned long patch_site_addr(s32 *site)
{
return (unsigned long)site + *site;
}
-static inline int patch_instruction_site(s32 *site, unsigned int instr)
+static inline int patch_instruction_site(s32 *site, ppc_inst_t instr)
{
- return patch_instruction((unsigned int *)patch_site_addr(site), instr);
+ return patch_instruction((u32 *)patch_site_addr(site), instr);
}
static inline int patch_branch_site(s32 *site, unsigned long target, int flags)
{
- return patch_branch((unsigned int *)patch_site_addr(site), target, flags);
+ return patch_branch((u32 *)patch_site_addr(site), target, flags);
}
static inline int modify_instruction(unsigned int *addr, unsigned int clr,
unsigned int set)
{
- return patch_instruction(addr, (*addr & ~clr) | set);
+ return patch_instruction(addr, ppc_inst((*addr & ~clr) | set));
}
static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned int set)
@@ -56,29 +103,36 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned
return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
}
-int instr_is_relative_branch(unsigned int instr);
-int instr_is_relative_link_branch(unsigned int instr);
-int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
-unsigned long branch_target(const unsigned int *instr);
-unsigned int translate_branch(const unsigned int *dest,
- const unsigned int *src);
-extern bool is_conditional_branch(unsigned int instr);
-#ifdef CONFIG_PPC_BOOK3E_64
-void __patch_exception(int exc, unsigned long addr);
-#define patch_exception(exc, name) do { \
- extern unsigned int name; \
- __patch_exception((exc), (unsigned long)&name); \
-} while (0)
-#endif
+static inline unsigned int branch_opcode(ppc_inst_t instr)
+{
+ return ppc_inst_primary_opcode(instr) & 0x3F;
+}
+
+static inline int instr_is_branch_iform(ppc_inst_t instr)
+{
+ return branch_opcode(instr) == 18;
+}
+
+static inline int instr_is_branch_bform(ppc_inst_t instr)
+{
+ return branch_opcode(instr) == 16;
+}
+
+int instr_is_relative_branch(ppc_inst_t instr);
+int instr_is_relative_link_branch(ppc_inst_t instr);
+unsigned long branch_target(const u32 *instr);
+int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src);
+bool is_conditional_branch(ppc_inst_t instr);
#define OP_RT_RA_MASK 0xffff0000UL
-#define LIS_R2 0x3c020000UL
-#define ADDIS_R2_R12 0x3c4c0000UL
-#define ADDI_R2_R2 0x38420000UL
+#define LIS_R2 (PPC_RAW_LIS(_R2, 0))
+#define ADDIS_R2_R12 (PPC_RAW_ADDIS(_R2, _R12, 0))
+#define ADDI_R2_R2 (PPC_RAW_ADDI(_R2, _R2, 0))
+
static inline unsigned long ppc_function_entry(void *func)
{
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
u32 *insn = func;
/*
@@ -103,13 +157,13 @@ static inline unsigned long ppc_function_entry(void *func)
return (unsigned long)(insn + 2);
else
return (unsigned long)func;
-#elif defined(PPC64_ELF_ABI_v1)
+#elif defined(CONFIG_PPC64_ELF_ABI_V1)
/*
* On PPC64 ABIv1 the function pointer actually points to the
* function's descriptor. The first entry in the descriptor is the
* address of the function text.
*/
- return ((func_descr_t *)func)->entry;
+ return ((struct func_desc *)func)->addr;
#else
return (unsigned long)func;
#endif
@@ -117,7 +171,7 @@ static inline unsigned long ppc_function_entry(void *func)
static inline unsigned long ppc_global_function_entry(void *func)
{
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
/* PPC64 ABIv2 the global entry point is at the address */
return (unsigned long)func;
#else
@@ -134,7 +188,7 @@ static inline unsigned long ppc_global_function_entry(void *func)
static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
{
unsigned long addr;
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
/* check for dot variant */
char dot_name[1 + KSYM_NAME_LEN];
bool dot_appended = false;
@@ -155,7 +209,7 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
if (!addr && dot_appended)
/* Let's try the original non-dot symbol lookup */
addr = kallsyms_lookup_name(name);
-#elif defined(PPC64_ELF_ABI_v2)
+#elif defined(CONFIG_PPC64_ELF_ABI_V2)
addr = kallsyms_lookup_name(name);
if (addr)
addr = ppc_function_entry((void *)addr);
@@ -165,25 +219,21 @@ static inline unsigned long ppc_kallsyms_lookup_name(const char *name)
return addr;
}
-#ifdef CONFIG_PPC64
/*
* Some instruction encodings commonly used in dynamic ftracing
* and function live patching.
*/
/* This must match the definition of STK_GOT in <asm/ppc_asm.h> */
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define R2_STACK_OFFSET 24
#else
#define R2_STACK_OFFSET 40
#endif
-#define PPC_INST_LD_TOC (PPC_INST_LD | ___PPC_RT(__REG_R2) | \
- ___PPC_RA(__REG_R1) | R2_STACK_OFFSET)
+#define PPC_INST_LD_TOC PPC_RAW_LD(_R2, _R1, R2_STACK_OFFSET)
/* usually preceded by a mflr r0 */
-#define PPC_INST_STD_LR (PPC_INST_STD | ___PPC_RS(__REG_R0) | \
- ___PPC_RA(__REG_R1) | PPC_LR_STKOFF)
-#endif /* CONFIG_PPC64 */
+#define PPC_INST_STD_LR PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF)
#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 3e3cdfaa76c6..dda4091fd012 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -8,27 +8,20 @@
#include <linux/types.h>
#include <linux/sched.h>
+#define compat_ipc_pid_t compat_ipc_pid_t
+typedef u16 compat_ipc_pid_t;
+
+#define compat_ipc64_perm compat_ipc64_perm
+
#include <asm-generic/compat.h>
-#define COMPAT_USER_HZ 100
#ifdef __BIG_ENDIAN__
#define COMPAT_UTS_MACHINE "ppc\0\0"
#else
#define COMPAT_UTS_MACHINE "ppcle\0\0"
#endif
-typedef u32 __compat_uid_t;
-typedef u32 __compat_gid_t;
-typedef u32 __compat_uid32_t;
-typedef u32 __compat_gid32_t;
-typedef u32 compat_mode_t;
-typedef u32 compat_dev_t;
typedef s16 compat_nlink_t;
-typedef u16 compat_ipc_pid_t;
-typedef u32 compat_caddr_t;
-typedef __kernel_fsid_t compat_fsid_t;
-typedef s64 compat_s64;
-typedef u64 compat_u64;
struct compat_stat {
compat_dev_t st_dev;
@@ -50,68 +43,6 @@ struct compat_stat {
u32 __unused4[2];
};
-struct compat_flock {
- short l_type;
- short l_whence;
- compat_off_t l_start;
- compat_off_t l_len;
- compat_pid_t l_pid;
-};
-
-#define F_GETLK64 12 /* using 'struct flock64' */
-#define F_SETLK64 13
-#define F_SETLKW64 14
-
-struct compat_flock64 {
- short l_type;
- short l_whence;
- compat_loff_t l_start;
- compat_loff_t l_len;
- compat_pid_t l_pid;
-};
-
-struct compat_statfs {
- int f_type;
- int f_bsize;
- int f_blocks;
- int f_bfree;
- int f_bavail;
- int f_files;
- int f_ffree;
- compat_fsid_t f_fsid;
- int f_namelen; /* SunOS ignores this field. */
- int f_frsize;
- int f_flags;
- int f_spare[4];
-};
-
-#define COMPAT_RLIM_INFINITY 0xffffffff
-
-typedef u32 compat_old_sigset_t;
-
-#define _COMPAT_NSIG 64
-#define _COMPAT_NSIG_BPW 32
-
-typedef u32 compat_sigset_word;
-
-#define COMPAT_OFF_T_MAX 0x7fffffff
-
-static inline void __user *arch_compat_alloc_user_space(long len)
-{
- struct pt_regs *regs = current->thread.regs;
- unsigned long usp = regs->gpr[1];
-
- /*
- * We can't access below the stack pointer in the 32bit ABI and
- * can access 288 bytes in the 64bit big-endian ABI,
- * or 512 bytes with the new ELFv2 little-endian ABI.
- */
- if (!is_32bit_task())
- usp -= USER_REDZONE_SIZE;
-
- return (void __user *) (usp - len);
-}
-
/*
* ipc64_perm is actually 32/64bit clean but since the compat layer refers to
* it we may as well define it.
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h
index f2682b28b050..4b63931c49e0 100644
--- a/arch/powerpc/include/asm/context_tracking.h
+++ b/arch/powerpc/include/asm/context_tracking.h
@@ -2,7 +2,7 @@
#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
#define _ASM_POWERPC_CONTEXT_TRACKING_H
-#ifdef CONFIG_CONTEXT_TRACKING
+#ifdef CONFIG_CONTEXT_TRACKING_USER
#define SCHEDULE_USER bl schedule_user
#else
#define SCHEDULE_USER bl schedule
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
index a116fe931789..3bdd74739cb8 100644
--- a/arch/powerpc/include/asm/cpm1.h
+++ b/arch/powerpc/include/asm/cpm1.h
@@ -68,6 +68,7 @@ extern void cpm_reset(void);
#define PROFF_SPI ((uint)0x0180)
#define PROFF_SCC3 ((uint)0x0200)
#define PROFF_SMC1 ((uint)0x0280)
+#define PROFF_DSP1 ((uint)0x02c0)
#define PROFF_SCC4 ((uint)0x0300)
#define PROFF_SMC2 ((uint)0x0380)
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index 2211b934ecb4..9ee192a6c5d7 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -594,7 +594,7 @@ typedef struct fcc_enet {
uint fen_p256c; /* Total packets 256 < bytes <= 511 */
uint fen_p512c; /* Total packets 512 < bytes <= 1023 */
uint fen_p1024c; /* Total packets 1024 < bytes <= 1518 */
- uint fen_cambuf; /* Internal CAM buffer poiner */
+ uint fen_cambuf; /* Internal CAM buffer pointer */
ushort fen_rfthr; /* Received frames threshold */
ushort fen_rfcnt; /* Received frames count */
} fcc_enet_t;
@@ -1133,8 +1133,8 @@ enum cpm_clk {
CPM_CLK_DUMMY
};
-extern int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode);
-extern int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
+int __init cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode);
+int __init cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
#define CPM_PIN_INPUT 0
#define CPM_PIN_OUTPUT 1
@@ -1143,7 +1143,7 @@ extern int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
#define CPM_PIN_GPIO 4
#define CPM_PIN_OPENDRAIN 8
-void cpm2_set_pin(int port, int pin, int flags);
+void __init cpm2_set_pin(int port, int pin, int flags);
#endif /* __CPM2__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
index 7897d16e0990..727d4b321937 100644
--- a/arch/powerpc/include/asm/cpu_has_feature.h
+++ b/arch/powerpc/include/asm/cpu_has_feature.h
@@ -7,7 +7,7 @@
#include <linux/bug.h>
#include <asm/cputable.h>
-static inline bool early_cpu_has_feature(unsigned long feature)
+static __always_inline bool early_cpu_has_feature(unsigned long feature)
{
return !!((CPU_FTRS_ALWAYS & feature) ||
(CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
@@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
return static_branch_likely(&cpu_feature_keys[i]);
}
#else
-static inline bool cpu_has_feature(unsigned long feature)
+static __always_inline bool cpu_has_feature(unsigned long feature)
{
return early_cpu_has_feature(feature);
}
diff --git a/arch/powerpc/include/asm/cpu_setup.h b/arch/powerpc/include/asm/cpu_setup.h
new file mode 100644
index 000000000000..30e2fe389502
--- /dev/null
+++ b/arch/powerpc/include/asm/cpu_setup.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_CPU_SETUP_H
+#define _ASM_POWERPC_CPU_SETUP_H
+void __setup_cpu_power7(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_power8(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_power9(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_power10(unsigned long offset, struct cpu_spec *spec);
+void __restore_cpu_power7(void);
+void __restore_cpu_power8(void);
+void __restore_cpu_power9(void);
+void __restore_cpu_power10(void);
+
+void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440ep(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440epx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440gx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440grx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440spe(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_440x5(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_460ex(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_460gt(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_603(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_604(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_750(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_750cx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_750fx(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_7400(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_7410(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_745x(unsigned long offset, struct cpu_spec *spec);
+
+void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec *spec);
+void __restore_cpu_pa6t(void);
+void __restore_cpu_ppc970(void);
+
+void __setup_cpu_e5500(unsigned long offset, struct cpu_spec *spec);
+void __setup_cpu_e6500(unsigned long offset, struct cpu_spec *spec);
+void __restore_cpu_e5500(void);
+void __restore_cpu_e6500(void);
+#endif /* _ASM_POWERPC_CPU_SETUP_H */
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 9844b3ded187..0cce5dc7fb1c 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -85,7 +85,7 @@ extern struct pnv_idle_states_t *pnv_idle_states;
extern int nr_pnv_idle_states;
unsigned long pnv_cpu_offline(unsigned int cpu);
-int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
+int __init validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
static inline void report_invalid_psscr_val(u64 psscr_val, int err)
{
switch (err) {
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 40a4d3c6fd99..757dbded11dc 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -17,16 +17,6 @@ struct cpu_spec;
typedef void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
typedef void (*cpu_restore_t)(void);
-enum powerpc_oprofile_type {
- PPC_OPROFILE_INVALID = 0,
- PPC_OPROFILE_RS64 = 1,
- PPC_OPROFILE_POWER4 = 2,
- PPC_OPROFILE_G4 = 3,
- PPC_OPROFILE_FSL_EMB = 4,
- PPC_OPROFILE_CELL = 5,
- PPC_OPROFILE_PA6T = 6,
-};
-
enum powerpc_pmc_type {
PPC_PMC_DEFAULT = 0,
PPC_PMC_IBM = 1,
@@ -41,7 +31,6 @@ extern int machine_check_4xx(struct pt_regs *regs);
extern int machine_check_440A(struct pt_regs *regs);
extern int machine_check_e500mc(struct pt_regs *regs);
extern int machine_check_e500(struct pt_regs *regs);
-extern int machine_check_e200(struct pt_regs *regs);
extern int machine_check_47x(struct pt_regs *regs);
int machine_check_8xx(struct pt_regs *regs);
int machine_check_83xx(struct pt_regs *regs);
@@ -81,19 +70,6 @@ struct cpu_spec {
/* Used to restore cpu setup on secondary processors and at resume */
cpu_restore_t cpu_restore;
- /* Used by oprofile userspace to select the right counters */
- char *oprofile_cpu_type;
-
- /* Processor specific oprofile operations */
- enum powerpc_oprofile_type oprofile_type;
-
- /* Bit locations inside the mmcra change */
- unsigned long oprofile_mmcra_sihv;
- unsigned long oprofile_mmcra_sipr;
-
- /* Bits to clear during an oprofile exception */
- unsigned long oprofile_mmcra_clear;
-
/* Name of processor class, for the ELF AT_PLATFORM entry */
char *platform;
@@ -137,7 +113,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_DBELL ASM_CONST(0x00000004)
#define CPU_FTR_CAN_NAP ASM_CONST(0x00000008)
#define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x00000010)
-#define CPU_FTR_NODSISRALIGN ASM_CONST(0x00000020)
+// ASM_CONST(0x00000020) Free
#define CPU_FTR_FPU_UNAVAILABLE ASM_CONST(0x00000040)
#define CPU_FTR_LWSYNC ASM_CONST(0x00000080)
#define CPU_FTR_NOEXECUTE ASM_CONST(0x00000100)
@@ -165,6 +141,7 @@ static inline void cpu_feature_keys_init(void) { }
#else /* CONFIG_PPC32 */
/* Define these to 0 for the sake of tests in common code */
#define CPU_FTR_PPC_LE (0)
+#define CPU_FTR_SPE (0)
#endif
/*
@@ -198,7 +175,7 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0000000080000000)
#define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0000000100000000)
#define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0000000200000000)
-#define CPU_FTR_PKEY LONG_ASM_CONST(0x0000000400000000)
+/* LONG_ASM_CONST(0x0000000400000000) Free */
#define CPU_FTR_VMX_COPY LONG_ASM_CONST(0x0000000800000000)
#define CPU_FTR_TM LONG_ASM_CONST(0x0000001000000000)
#define CPU_FTR_CFAR LONG_ASM_CONST(0x0000002000000000)
@@ -213,12 +190,12 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_P9_TIDR LONG_ASM_CONST(0x0000800000000000)
#define CPU_FTR_P9_TLBIE_ERAT_BUG LONG_ASM_CONST(0x0001000000000000)
#define CPU_FTR_P9_RADIX_PREFETCH_BUG LONG_ASM_CONST(0x0002000000000000)
+#define CPU_FTR_ARCH_31 LONG_ASM_CONST(0x0004000000000000)
+#define CPU_FTR_DAWR1 LONG_ASM_CONST(0x0008000000000000)
#ifndef __ASSEMBLY__
-#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN)
-
-#define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE)
+#define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE)
/* We only set the altivec features if the kernel was compiled with altivec
* support
@@ -292,8 +269,6 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTR_MAYBE_CAN_NAP 0
#endif
-#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | \
- CPU_FTR_COHERENT_ICACHE)
#define CPU_FTRS_603 (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_604 (CPU_FTR_COMMON | CPU_FTR_PPC_LE)
@@ -368,7 +343,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
#define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
- CPU_FTR_MAYBE_CAN_NAP)
+ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_COMMON | CPU_FTR_NOEXECUTE)
@@ -377,38 +352,33 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON)
#define CPU_FTRS_8XX (CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_40X (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_44X (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_440x6 (CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
+#define CPU_FTRS_40X (CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_44X (CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_440x6 (CPU_FTR_NOEXECUTE | \
CPU_FTR_INDEXED_DCR)
#define CPU_FTRS_47X (CPU_FTRS_440x6)
-#define CPU_FTRS_E200 (CPU_FTR_SPE_COMP | \
- CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
- CPU_FTR_NOEXECUTE | \
- CPU_FTR_DEBUG_LVL_EXC)
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | \
- CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
CPU_FTR_NOEXECUTE)
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | \
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
- CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
-#define CPU_FTRS_E500MC (CPU_FTR_NODSISRALIGN | \
+ CPU_FTR_NOEXECUTE)
+#define CPU_FTRS_E500MC ( \
CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
/*
* e5500/e6500 erratum A-006958 is a timebase bug that can use the
* same workaround as CPU_FTR_CELL_TB_BUG.
*/
-#define CPU_FTRS_E5500 (CPU_FTR_NODSISRALIGN | \
+#define CPU_FTRS_E5500 ( \
CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG)
-#define CPU_FTRS_E6500 (CPU_FTR_NODSISRALIGN | \
+#define CPU_FTRS_E6500 ( \
CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT)
-#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
/* 64-bit CPUs */
#define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \
@@ -437,7 +407,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | \
- CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX | CPU_FTR_PKEY)
+ CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX )
#define CPU_FTRS_POWER8 (CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -447,7 +417,7 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
- CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_PKEY)
+ CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP )
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
#define CPU_FTRS_POWER9 (CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
@@ -458,8 +428,8 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
- CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_PKEY | \
- CPU_FTR_P9_TLBIE_STQ_BUG | CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR)
+ CPU_FTR_TM_COMP | CPU_FTR_ARCH_300 | CPU_FTR_P9_TLBIE_STQ_BUG | \
+ CPU_FTR_P9_TLBIE_ERAT_BUG | CPU_FTR_P9_TIDR)
#define CPU_FTRS_POWER9_DD2_0 (CPU_FTRS_POWER9 | CPU_FTR_P9_RADIX_PREFETCH_BUG)
#define CPU_FTRS_POWER9_DD2_1 (CPU_FTRS_POWER9 | \
CPU_FTR_P9_RADIX_PREFETCH_BUG | \
@@ -467,6 +437,21 @@ static inline void cpu_feature_keys_init(void) { }
#define CPU_FTRS_POWER9_DD2_2 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
CPU_FTR_P9_TM_HV_ASSIST | \
CPU_FTR_P9_TM_XER_SO_BUG)
+#define CPU_FTRS_POWER9_DD2_3 (CPU_FTRS_POWER9 | CPU_FTR_POWER9_DD2_1 | \
+ CPU_FTR_P9_TM_HV_ASSIST | \
+ CPU_FTR_P9_TM_XER_SO_BUG | \
+ CPU_FTR_DAWR)
+#define CPU_FTRS_POWER10 (CPU_FTR_LWSYNC | \
+ CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\
+ CPU_FTR_MMCRA | CPU_FTR_SMT | \
+ CPU_FTR_COHERENT_ICACHE | \
+ CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
+ CPU_FTR_DSCR | CPU_FTR_SAO | \
+ CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
+ CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
+ CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
+ CPU_FTR_ARCH_300 | CPU_FTR_ARCH_31 | \
+ CPU_FTR_DAWR | CPU_FTR_DAWR1)
#define CPU_FTRS_CELL (CPU_FTR_LWSYNC | \
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
@@ -477,41 +462,42 @@ static inline void cpu_feature_keys_init(void) { }
CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_PPCAS_ARCH_V2)
-#ifdef __powerpc64__
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3E_64
#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500)
#else
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | CPU_FTRS_POWER8 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_VSX_COMP | CPU_FTRS_POWER9 | \
- CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | \
+ CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10)
#else
#define CPU_FTRS_POSSIBLE \
(CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
CPU_FTRS_POWER6 | CPU_FTRS_POWER7 | CPU_FTRS_POWER8E | \
CPU_FTRS_POWER8 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \
CPU_FTR_VSX_COMP | CPU_FTR_ALTIVEC_COMP | CPU_FTRS_POWER9 | \
- CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2)
+ CPU_FTRS_POWER9_DD2_1 | CPU_FTRS_POWER9_DD2_2 | \
+ CPU_FTRS_POWER9_DD2_3 | CPU_FTRS_POWER10)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
#else
enum {
CPU_FTRS_POSSIBLE =
-#ifdef CONFIG_PPC_BOOK3S_601
- CPU_FTRS_PPC601 |
-#elif defined(CONFIG_PPC_BOOK3S_32)
- CPU_FTRS_PPC601 | CPU_FTRS_603 | CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
+#ifdef CONFIG_PPC_BOOK3S_604
+ CPU_FTRS_604 | CPU_FTRS_740_NOTAU |
CPU_FTRS_740 | CPU_FTRS_750 | CPU_FTRS_750FX1 |
CPU_FTRS_750FX2 | CPU_FTRS_750FX | CPU_FTRS_750GX |
CPU_FTRS_7400_NOTAU | CPU_FTRS_7400 | CPU_FTRS_7450_20 |
CPU_FTRS_7450_21 | CPU_FTRS_7450_23 | CPU_FTRS_7455_1 |
CPU_FTRS_7455_20 | CPU_FTRS_7455 | CPU_FTRS_7447_10 |
- CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
- CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
+ CPU_FTRS_7447 | CPU_FTRS_7447A |
CPU_FTRS_CLASSIC32 |
-#else
- CPU_FTRS_GENERIC_32 |
+#endif
+#ifdef CONFIG_PPC_BOOK3S_603
+ CPU_FTRS_603 | CPU_FTRS_82XX |
+ CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
#endif
#ifdef CONFIG_PPC_8xx
CPU_FTRS_8XX |
@@ -519,16 +505,12 @@ enum {
#ifdef CONFIG_40x
CPU_FTRS_40X |
#endif
-#ifdef CONFIG_44x
- CPU_FTRS_44X | CPU_FTRS_440x6 |
-#endif
#ifdef CONFIG_PPC_47x
CPU_FTRS_47X | CPU_FTR_476_DD2 |
+#elif defined(CONFIG_44x)
+ CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif
-#ifdef CONFIG_E200
- CPU_FTRS_E200 |
-#endif
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
CPU_FTRS_E500 | CPU_FTRS_E500_2 |
#endif
#ifdef CONFIG_PPC_E500MC
@@ -538,8 +520,8 @@ enum {
};
#endif /* __powerpc64__ */
-#ifdef __powerpc64__
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3E_64
#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500)
#else
@@ -547,7 +529,6 @@ enum {
#define CPU_FTRS_DT_CPU_BASE \
(CPU_FTR_LWSYNC | \
CPU_FTR_FPU_UNAVAILABLE | \
- CPU_FTR_NODSISRALIGN | \
CPU_FTR_NOEXECUTE | \
CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_STCX_CHECKS_ADDRESS | \
@@ -563,33 +544,34 @@ enum {
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_POSSIBLE & ~CPU_FTR_HVMODE & CPU_FTRS_POWER7 & \
CPU_FTRS_POWER8E & CPU_FTRS_POWER8 & CPU_FTRS_POWER9 & \
- CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE)
+ CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \
+ CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE)
#else
#define CPU_FTRS_ALWAYS \
(CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE & CPU_FTRS_POWER9 & \
- CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_DT_CPU_BASE)
+ CPU_FTRS_POWER9_DD2_1 & CPU_FTRS_POWER9_DD2_2 & \
+ CPU_FTRS_POWER10 & CPU_FTRS_DT_CPU_BASE)
#endif /* CONFIG_CPU_LITTLE_ENDIAN */
#endif
#else
enum {
CPU_FTRS_ALWAYS =
-#ifdef CONFIG_PPC_BOOK3S_601
- CPU_FTRS_PPC601 &
-#elif defined(CONFIG_PPC_BOOK3S_32)
- CPU_FTRS_603 & CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
+#ifdef CONFIG_PPC_BOOK3S_604
+ CPU_FTRS_604 & CPU_FTRS_740_NOTAU &
CPU_FTRS_740 & CPU_FTRS_750 & CPU_FTRS_750FX1 &
CPU_FTRS_750FX2 & CPU_FTRS_750FX & CPU_FTRS_750GX &
CPU_FTRS_7400_NOTAU & CPU_FTRS_7400 & CPU_FTRS_7450_20 &
CPU_FTRS_7450_21 & CPU_FTRS_7450_23 & CPU_FTRS_7455_1 &
CPU_FTRS_7455_20 & CPU_FTRS_7455 & CPU_FTRS_7447_10 &
- CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
- CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
+ CPU_FTRS_7447 & CPU_FTRS_7447A &
CPU_FTRS_CLASSIC32 &
-#else
- CPU_FTRS_GENERIC_32 &
+#endif
+#ifdef CONFIG_PPC_BOOK3S_603
+ CPU_FTRS_603 & CPU_FTRS_82XX &
+ CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
#endif
#ifdef CONFIG_PPC_8xx
CPU_FTRS_8XX &
@@ -597,13 +579,12 @@ enum {
#ifdef CONFIG_40x
CPU_FTRS_40X &
#endif
-#ifdef CONFIG_44x
+#ifdef CONFIG_PPC_47x
+ CPU_FTRS_47X &
+#elif defined(CONFIG_44x)
CPU_FTRS_44X & CPU_FTRS_440x6 &
#endif
-#ifdef CONFIG_E200
- CPU_FTRS_E200 &
-#endif
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
CPU_FTRS_E500 & CPU_FTRS_E500_2 &
#endif
#ifdef CONFIG_PPC_E500MC
@@ -614,7 +595,12 @@ enum {
};
#endif /* __powerpc64__ */
-#define HBP_NUM 1
+/*
+ * Maximum number of hw breakpoint supported on powerpc. Number of
+ * breakpoints supported by actual hw might be less than this, which
+ * is decided at run time in nr_wp_slots().
+ */
+#define HBP_NUM_MAX 2
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index deb99fd6e060..f26c430f3982 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -23,7 +23,6 @@
extern int threads_per_core;
extern int threads_per_subcore;
extern int threads_shift;
-extern bool has_big_cores;
extern cpumask_t threads_core_mask;
#else
#define threads_per_core 1
@@ -33,44 +32,11 @@ extern cpumask_t threads_core_mask;
#define threads_core_mask (*get_cpu_mask(0))
#endif
-/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
- * hit by the argument
- *
- * @threads: a cpumask of online threads
- *
- * This function returns a cpumask which will have one online cpu's
- * bit set for each core that has at least one thread set in the argument.
- *
- * This can typically be used for things like IPI for tlb invalidations
- * since those need to be done only once per core/TLB
- */
-static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
-{
- cpumask_t tmp, res;
- int i, cpu;
-
- cpumask_clear(&res);
- for (i = 0; i < NR_CPUS; i += threads_per_core) {
- cpumask_shift_left(&tmp, &threads_core_mask, i);
- if (cpumask_intersects(threads, &tmp)) {
- cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
- if (cpu < nr_cpu_ids)
- cpumask_set_cpu(cpu, &res);
- }
- }
- return res;
-}
-
static inline int cpu_nr_cores(void)
{
return nr_cpu_ids >> threads_shift;
}
-static inline cpumask_t cpu_online_cores_map(void)
-{
- return cpu_thread_mask_to_cores(cpu_online_mask);
-}
-
#ifdef CONFIG_SMP
int cpu_core_index_of_thread(int cpu);
int cpu_first_thread_of_core(int core);
@@ -99,6 +65,36 @@ static inline int cpu_last_thread_sibling(int cpu)
return cpu | (threads_per_core - 1);
}
+/*
+ * tlb_thread_siblings are siblings which share a TLB. This is not
+ * architected, is not something a hypervisor could emulate and a future
+ * CPU may change behaviour even in compat mode, so this should only be
+ * used on PowerNV, and only with care.
+ */
+static inline int cpu_first_tlb_thread_sibling(int cpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return cpu & ~0x6; /* Big Core */
+ else
+ return cpu_first_thread_sibling(cpu);
+}
+
+static inline int cpu_last_tlb_thread_sibling(int cpu)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return cpu | 0x6; /* Big Core */
+ else
+ return cpu_last_thread_sibling(cpu);
+}
+
+static inline int cpu_tlb_thread_sibling_step(void)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
+ return 2; /* Big Core */
+ else
+ return 1;
+}
+
static inline u32 get_tensr(void)
{
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 2431b4ada2fa..431ae2343022 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -19,6 +19,7 @@
#include <asm/div64.h>
#include <asm/time.h>
#include <asm/param.h>
+#include <asm/firmware.h>
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
@@ -36,6 +37,8 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct)
return mulhdu((__force u64) ct, __cputime_usec_factor);
}
+#define cputime_to_nsecs(cputime) tb_to_ns((__force u64)cputime)
+
/*
* PPC64 uses PACA which is task independent for storing accounting data while
* PPC32 uses struct thread_info, therefore at task switch the accounting data
@@ -43,9 +46,12 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct)
*/
#ifdef CONFIG_PPC64
#define get_accounting(tsk) (&get_paca()->accounting)
+#define raw_get_accounting(tsk) (&local_paca->accounting)
static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
+
#else
#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
+#define raw_get_accounting(tsk) get_accounting(tsk)
/*
* Called from the context switch with interrupts disabled, to charge all
* accumulated times to the current process, and to prepare accounting on
@@ -60,6 +66,50 @@ static inline void arch_vtime_task_switch(struct task_struct *prev)
}
#endif
+/*
+ * account_cpu_user_entry/exit runs "unreconciled", so can't trace,
+ * can't use get_paca()
+ */
+static notrace inline void account_cpu_user_entry(void)
+{
+ unsigned long tb = mftb();
+ struct cpu_accounting_data *acct = raw_get_accounting(current);
+
+ acct->utime += (tb - acct->starttime_user);
+ acct->starttime = tb;
+}
+
+static notrace inline void account_cpu_user_exit(void)
+{
+ unsigned long tb = mftb();
+ struct cpu_accounting_data *acct = raw_get_accounting(current);
+
+ acct->stime += (tb - acct->starttime);
+ acct->starttime_user = tb;
+}
+
+static notrace inline void account_stolen_time(void)
+{
+#ifdef CONFIG_PPC_SPLPAR
+ if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
+ struct lppaca *lp = local_paca->lppaca_ptr;
+
+ if (unlikely(local_paca->dtl_ridx != be64_to_cpu(lp->dtl_idx)))
+ pseries_accumulate_stolen_time();
+ }
+#endif
+}
+
#endif /* __KERNEL__ */
+#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+static inline void account_cpu_user_entry(void)
+{
+}
+static inline void account_cpu_user_exit(void)
+{
+}
+static notrace inline void account_stolen_time(void)
+{
+}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/crashdump-ppc64.h b/arch/powerpc/include/asm/crashdump-ppc64.h
new file mode 100644
index 000000000000..68d9717cc5ee
--- /dev/null
+++ b/arch/powerpc/include/asm/crashdump-ppc64.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_POWERPC_CRASHDUMP_PPC64_H
+#define _ASM_POWERPC_CRASHDUMP_PPC64_H
+
+/*
+ * Backup region - first 64KB of System RAM
+ *
+ * If ever the below macros are to be changed, please be judicious.
+ * The implicit assumptions are:
+ * - start, end & size are less than UINT32_MAX.
+ * - start & size are at least 8 byte aligned.
+ *
+ * For implementation details: arch/powerpc/purgatory/trampoline_64.S
+ */
+#define BACKUP_SRC_START 0
+#define BACKUP_SRC_END 0xffff
+#define BACKUP_SRC_SIZE (BACKUP_SRC_END - BACKUP_SRC_START + 1)
+
+#endif /* __ASM_POWERPC_CRASHDUMP_PPC64_H */
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index 4ce6808deed3..3e9da22a2779 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -11,8 +11,10 @@
#include <linux/smp.h>
#include <linux/threads.h>
+#include <asm/cputhreads.h>
#include <asm/ppc-opcode.h>
#include <asm/feature-fixups.h>
+#include <asm/kvm_ppc.h>
#define PPC_DBELL_MSG_BRDCAST (0x04000000)
#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
@@ -87,9 +89,6 @@ static inline void ppc_msgsync(void)
#endif /* CONFIG_PPC_BOOK3S */
-extern void doorbell_global_ipi(int cpu);
-extern void doorbell_core_ipi(int cpu);
-extern int doorbell_try_core_ipi(int cpu);
extern void doorbell_exception(struct pt_regs *regs);
static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
@@ -100,4 +99,63 @@ static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag)
_ppc_msgsnd(msg);
}
+#ifdef CONFIG_SMP
+
+/*
+ * Doorbells must only be used if CPU_FTR_DBELL is available.
+ * msgsnd is used in HV, and msgsndp is used in !HV.
+ *
+ * These should be used by platform code that is aware of restrictions.
+ * Other arch code should use ->cause_ipi.
+ *
+ * doorbell_global_ipi() sends a dbell to any target CPU.
+ * Must be used only by architectures that address msgsnd target
+ * by PIR/get_hard_smp_processor_id.
+ */
+static inline void doorbell_global_ipi(int cpu)
+{
+ u32 tag = get_hard_smp_processor_id(cpu);
+
+ kvmppc_set_host_ipi(cpu);
+ /* Order previous accesses vs. msgsnd, which is treated as a store */
+ ppc_msgsnd_sync();
+ ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
+}
+
+/*
+ * doorbell_core_ipi() sends a dbell to a target CPU in the same core.
+ * Must be used only by architectures that address msgsnd target
+ * by TIR/cpu_thread_in_core.
+ */
+static inline void doorbell_core_ipi(int cpu)
+{
+ u32 tag = cpu_thread_in_core(cpu);
+
+ kvmppc_set_host_ipi(cpu);
+ /* Order previous accesses vs. msgsnd, which is treated as a store */
+ ppc_msgsnd_sync();
+ ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
+}
+
+/*
+ * Attempt to cause a core doorbell if destination is on the same core.
+ * Returns 1 on success, 0 on failure.
+ */
+static inline int doorbell_try_core_ipi(int cpu)
+{
+ int this_cpu = get_cpu();
+ int ret = 0;
+
+ if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) {
+ doorbell_core_ipi(cpu);
+ ret = 1;
+ }
+
+ put_cpu();
+
+ return ret;
+}
+
+#endif /* CONFIG_SMP */
+
#endif /* _ASM_POWERPC_DBELL_H */
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 7141ccea8c94..a92059964579 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -53,8 +53,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
#define mfdcr(rn) \
({unsigned int rval; \
if (__builtin_constant_p(rn) && rn < 1024) \
- asm volatile("mfdcr %0," __stringify(rn) \
- : "=r" (rval)); \
+ asm volatile("mfdcr %0, %1" : "=r" (rval) \
+ : "n" (rn)); \
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
rval = mfdcrx(rn); \
else \
@@ -64,8 +64,8 @@ static inline void mtdcrx(unsigned int reg, unsigned int val)
#define mtdcr(rn, v) \
do { \
if (__builtin_constant_p(rn) && rn < 1024) \
- asm volatile("mtdcr " __stringify(rn) ",%0" \
- : : "r" (v)); \
+ asm volatile("mtdcr %0, %1" \
+ : : "n" (rn), "r" (v)); \
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
mtdcrx(rn, v); \
else \
diff --git a/arch/powerpc/include/asm/debug.h b/arch/powerpc/include/asm/debug.h
index 7756026b95ca..86a14736c76c 100644
--- a/arch/powerpc/include/asm/debug.h
+++ b/arch/powerpc/include/asm/debug.h
@@ -45,15 +45,11 @@ static inline int debugger_break_match(struct pt_regs *regs) { return 0; }
static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
#endif
-void __set_breakpoint(struct arch_hw_breakpoint *brk);
+void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk);
bool ppc_breakpoint_available(void);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
extern void do_send_trap(struct pt_regs *regs, unsigned long address,
unsigned long error_code, int brkpt);
-#else
-
-extern void do_break(struct pt_regs *regs, unsigned long address,
- unsigned long error_code);
#endif
#endif /* _ASM_POWERPC_DEBUG_H */
diff --git a/arch/powerpc/include/asm/debugfs.h b/arch/powerpc/include/asm/debugfs.h
deleted file mode 100644
index 2c5c48571d75..000000000000
--- a/arch/powerpc/include/asm/debugfs.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _ASM_POWERPC_DEBUGFS_H
-#define _ASM_POWERPC_DEBUGFS_H
-
-/*
- * Copyright 2017, Michael Ellerman, IBM Corporation.
- */
-
-#include <linux/debugfs.h>
-
-extern struct dentry *powerpc_debugfs_root;
-
-#endif /* _ASM_POWERPC_DEBUGFS_H */
diff --git a/arch/powerpc/include/asm/delay.h b/arch/powerpc/include/asm/delay.h
index 66963f7d3e64..51bb8c1476c7 100644
--- a/arch/powerpc/include/asm/delay.h
+++ b/arch/powerpc/include/asm/delay.h
@@ -54,7 +54,7 @@ extern void udelay(unsigned long usecs);
({ \
typeof(condition) __ret; \
unsigned long __loops = tb_ticks_per_usec * timeout; \
- unsigned long __start = get_tbl(); \
+ unsigned long __start = mftb(); \
\
if (delay) { \
while (!(__ret = (condition)) && \
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 266542769e4b..47ed639f3b8f 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -19,11 +19,6 @@ struct iommu_table;
*/
struct dev_archdata {
/*
- * Set to %true if the dma_iommu_ops are requested to use a direct
- * window instead of dynamically mapping memory.
- */
- bool iommu_bypass : 1;
- /*
* These two used to be a union. However, with the hybrid ops we need
* both so here we store both a DMA offset for direct mappings and
* an iommu_table for remapped DMA.
@@ -34,9 +29,6 @@ struct dev_archdata {
struct iommu_table *iommu_table_base;
#endif
-#ifdef CONFIG_IOMMU_API
- void *iommu_domain;
-#endif
#ifdef CONFIG_PPC64
struct pci_dn *pci_data;
#endif
@@ -49,10 +41,18 @@ struct dev_archdata {
#ifdef CONFIG_CXL_BASE
struct cxl_context *cxl_ctx;
#endif
+#ifdef CONFIG_PCI_IOV
+ void *iov_data;
+#endif
};
struct pdev_archdata {
u64 dma_mask;
+ /*
+ * Pointer to nvdimm_pmu structure, to handle the unregistering
+ * of pmu device
+ */
+ void *priv;
};
#endif /* _ASM_POWERPC_DEVICE_H */
diff --git a/arch/powerpc/include/asm/dma-direct.h b/arch/powerpc/include/asm/dma-direct.h
index abc154d784b0..128304cbee1d 100644
--- a/arch/powerpc/include/asm/dma-direct.h
+++ b/arch/powerpc/include/asm/dma-direct.h
@@ -2,12 +2,12 @@
#ifndef ASM_POWERPC_DMA_DIRECT_H
#define ASM_POWERPC_DMA_DIRECT_H 1
-static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return paddr + dev->archdata.dma_offset;
}
-static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr)
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
{
return daddr - dev->archdata.dma_offset;
}
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
index 1b4f0254868f..d97c66d9ae34 100644
--- a/arch/powerpc/include/asm/dma.h
+++ b/arch/powerpc/include/asm/dma.h
@@ -151,10 +151,9 @@
#define DMA2_EXT_REG 0x4D6
#ifndef __powerpc64__
- /* in arch/ppc/kernel/setup.c -- Cort */
+ /* in arch/powerpc/kernel/setup_32.c -- Cort */
extern unsigned int DMA_MODE_WRITE;
extern unsigned int DMA_MODE_READ;
- extern unsigned long ISA_DMA_THRESHOLD;
#else
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
@@ -341,11 +340,5 @@ extern int request_dma(unsigned int dmanr, const char *device_id);
/* release it again */
extern void free_dma(unsigned int dmanr);
-#ifdef CONFIG_PCI
-extern int isa_dma_bridge_buggy;
-#else
-#define isa_dma_bridge_buggy (0)
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DMA_H */
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
index 3d76e1c388c2..13bf6dee8e2d 100644
--- a/arch/powerpc/include/asm/drmem.h
+++ b/arch/powerpc/include/asm/drmem.h
@@ -8,31 +8,47 @@
#ifndef _ASM_POWERPC_LMB_H
#define _ASM_POWERPC_LMB_H
+#include <linux/sched.h>
+
struct drmem_lmb {
u64 base_addr;
u32 drc_index;
u32 aa_index;
u32 flags;
-#ifdef CONFIG_MEMORY_HOTPLUG
- int nid;
-#endif
};
struct drmem_lmb_info {
struct drmem_lmb *lmbs;
int n_lmbs;
- u32 lmb_size;
+ u64 lmb_size;
};
+struct device_node;
+struct property;
+
extern struct drmem_lmb_info *drmem_info;
+static inline struct drmem_lmb *drmem_lmb_next(struct drmem_lmb *lmb,
+ const struct drmem_lmb *start)
+{
+ /*
+ * DLPAR code paths can take several milliseconds per element
+ * when interacting with firmware. Ensure that we don't
+ * unfairly monopolize the CPU.
+ */
+ if (((++lmb - start) % 16) == 0)
+ cond_resched();
+
+ return lmb;
+}
+
#define for_each_drmem_lmb_in_range(lmb, start, end) \
- for ((lmb) = (start); (lmb) <= (end); (lmb)++)
+ for ((lmb) = (start); (lmb) < (end); lmb = drmem_lmb_next(lmb, start))
#define for_each_drmem_lmb(lmb) \
for_each_drmem_lmb_in_range((lmb), \
&drmem_info->lmbs[0], \
- &drmem_info->lmbs[drmem_info->n_lmbs - 1])
+ &drmem_info->lmbs[drmem_info->n_lmbs])
/*
* The of_drconf_cell_v1 struct defines the layout of the LMB data
@@ -65,8 +81,9 @@ struct of_drconf_cell_v2 {
#define DRCONF_MEM_ASSIGNED 0x00000008
#define DRCONF_MEM_AI_INVALID 0x00000040
#define DRCONF_MEM_RESERVED 0x00000080
+#define DRCONF_MEM_HOTREMOVABLE 0x00000100
-static inline u32 drmem_lmb_size(void)
+static inline u64 drmem_lmb_size(void)
{
return drmem_info->lmb_size;
}
@@ -89,13 +106,15 @@ static inline bool drmem_lmb_reserved(struct drmem_lmb *lmb)
}
u64 drmem_lmb_memory_max(void);
-void __init walk_drmem_lmbs(struct device_node *dn,
- void (*func)(struct drmem_lmb *, const __be32 **));
+int walk_drmem_lmbs(struct device_node *dn, void *data,
+ int (*func)(struct drmem_lmb *, const __be32 **, void *));
int drmem_update_dt(void);
#ifdef CONFIG_PPC_PSERIES
-void __init walk_drmem_lmbs_early(unsigned long node,
- void (*func)(struct drmem_lmb *, const __be32 **));
+int __init
+walk_drmem_lmbs_early(unsigned long node, void *data,
+ int (*func)(struct drmem_lmb *, const __be32 **, void *));
+void drmem_update_lmbs(struct property *prop);
#endif
static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
@@ -103,22 +122,4 @@ static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
lmb->aa_index = 0xffffffff;
}
-#ifdef CONFIG_MEMORY_HOTPLUG
-static inline void lmb_set_nid(struct drmem_lmb *lmb)
-{
- lmb->nid = memory_add_physaddr_to_nid(lmb->base_addr);
-}
-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
-{
- lmb->nid = -1;
-}
-#else
-static inline void lmb_set_nid(struct drmem_lmb *lmb)
-{
-}
-static inline void lmb_clear_nid(struct drmem_lmb *lmb)
-{
-}
-#endif
-
#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/include/asm/dtl.h b/arch/powerpc/include/asm/dtl.h
new file mode 100644
index 000000000000..4bcb9f9ac764
--- /dev/null
+++ b/arch/powerpc/include/asm/dtl.h
@@ -0,0 +1,44 @@
+#ifndef _ASM_POWERPC_DTL_H
+#define _ASM_POWERPC_DTL_H
+
+#include <asm/lppaca.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * Layout of entries in the hypervisor's dispatch trace log buffer.
+ */
+struct dtl_entry {
+ u8 dispatch_reason;
+ u8 preempt_reason;
+ __be16 processor_id;
+ __be32 enqueue_to_dispatch_time;
+ __be32 ready_to_enqueue_time;
+ __be32 waiting_to_ready_time;
+ __be64 timebase;
+ __be64 fault_addr;
+ __be64 srr0;
+ __be64 srr1;
+};
+
+#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
+#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
+
+/*
+ * Dispatch trace log event enable mask:
+ * 0x1: voluntary virtual processor waits
+ * 0x2: time-slice preempts
+ * 0x4: virtual partition memory page faults
+ */
+#define DTL_LOG_CEDE 0x1
+#define DTL_LOG_PREEMPT 0x2
+#define DTL_LOG_FAULT 0x4
+#define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
+
+extern struct kmem_cache *dtl_cache;
+extern rwlock_t dtl_access_lock;
+
+extern void register_dtl_buffer(int cpu);
+extern void alloc_dtl_buffers(unsigned long *time_limit);
+extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
+
+#endif /* _ASM_POWERPC_DTL_H */
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 6f9b2a12540a..514dd056c2c8 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -27,7 +27,6 @@ struct pci_dn;
#define EEH_FORCE_DISABLED 0x02 /* EEH disabled */
#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
-#define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */
#define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */
#define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */
@@ -74,7 +73,6 @@ struct pci_dn;
struct eeh_pe {
int type; /* PE type: PHB/Bus/Device */
int state; /* PE EEH dependent mode */
- int config_addr; /* Traditional PCI address */
int addr; /* PE configuration address */
struct pci_controller *phb; /* Associated PHB */
struct pci_bus *bus; /* Top PCI bus for bus PE */
@@ -133,7 +131,6 @@ static inline bool eeh_pe_passed(struct eeh_pe *pe)
struct eeh_dev {
int mode; /* EEH mode */
- int class_code; /* Class code of the device */
int bdfn; /* bdfn of device (for cfg ops) */
struct pci_controller *controller;
int pe_config_addr; /* PE config address */
@@ -148,7 +145,10 @@ struct eeh_dev {
struct pci_dn *pdn; /* Associated PCI device node */
struct pci_dev *pdev; /* Associated PCI device */
bool in_error; /* Error flag for edev */
+
+ /* VF specific properties */
struct pci_dev *physfn; /* Associated SRIOV PF */
+ int vf_index; /* Index of this VF */
};
/* "fmt" must be a simple literal string */
@@ -214,21 +214,19 @@ enum {
struct eeh_ops {
char *name;
- int (*init)(void);
- void* (*probe)(struct pci_dn *pdn, void *data);
+ struct eeh_dev *(*probe)(struct pci_dev *pdev);
int (*set_option)(struct eeh_pe *pe, int option);
- int (*get_pe_addr)(struct eeh_pe *pe);
int (*get_state)(struct eeh_pe *pe, int *delay);
int (*reset)(struct eeh_pe *pe, int option);
int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len);
int (*configure_bridge)(struct eeh_pe *pe);
int (*err_inject)(struct eeh_pe *pe, int type, int func,
unsigned long addr, unsigned long mask);
- int (*read_config)(struct pci_dn *pdn, int where, int size, u32 *val);
- int (*write_config)(struct pci_dn *pdn, int where, int size, u32 val);
+ int (*read_config)(struct eeh_dev *edev, int where, int size, u32 *val);
+ int (*write_config)(struct eeh_dev *edev, int where, int size, u32 val);
int (*next_error)(struct eeh_pe **pe);
- int (*restore_config)(struct pci_dn *pdn);
- int (*notify_resume)(struct pci_dn *pdn);
+ int (*restore_config)(struct eeh_dev *edev);
+ int (*notify_resume)(struct eeh_dev *edev);
};
extern int eeh_subsystem_flags;
@@ -280,10 +278,9 @@ int eeh_phb_pe_create(struct pci_controller *phb);
int eeh_wait_state(struct eeh_pe *pe, int max_wait);
struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root);
-struct eeh_pe *eeh_pe_get(struct pci_controller *phb,
- int pe_no, int config_addr);
-int eeh_add_to_parent_pe(struct eeh_dev *edev);
-int eeh_rmv_from_parent_pe(struct eeh_dev *edev);
+struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no);
+int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent);
+int eeh_pe_tree_remove(struct eeh_dev *edev);
void eeh_pe_update_time_stamp(struct eeh_pe *pe);
void *eeh_pe_traverse(struct eeh_pe *root,
eeh_pe_traverse_func fn, void *flag);
@@ -293,19 +290,12 @@ void eeh_pe_restore_bars(struct eeh_pe *pe);
const char *eeh_pe_loc_get(struct eeh_pe *pe);
struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
-struct eeh_dev *eeh_dev_init(struct pci_dn *pdn);
-void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
void eeh_show_enabled(void);
-int __init eeh_ops_register(struct eeh_ops *ops);
-int __exit eeh_ops_unregister(const char *name);
+int __init eeh_init(struct eeh_ops *ops);
int eeh_check_failure(const volatile void __iomem *token);
int eeh_dev_check_failure(struct eeh_dev *edev);
void eeh_addr_cache_init(void);
-void eeh_add_device_early(struct pci_dn *);
-void eeh_add_device_tree_early(struct pci_dn *);
-void eeh_add_device_late(struct pci_dev *);
-void eeh_add_device_tree_late(struct pci_bus *);
-void eeh_add_sysfs_files(struct pci_bus *);
+void eeh_probe_device(struct pci_dev *pdev);
void eeh_remove_device(struct pci_dev *);
int eeh_unfreeze_pe(struct eeh_pe *pe);
int eeh_pe_reset_and_recover(struct eeh_pe *pe);
@@ -318,7 +308,6 @@ int eeh_pe_reset(struct eeh_pe *pe, int option, bool include_passed);
int eeh_pe_configure(struct eeh_pe *pe);
int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
unsigned long addr, unsigned long mask);
-int eeh_restore_vf_config(struct pci_dn *pdn);
/**
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
@@ -344,13 +333,6 @@ static inline bool eeh_enabled(void)
static inline void eeh_show_enabled(void) { }
-static inline void *eeh_dev_init(struct pci_dn *pdn, void *data)
-{
- return NULL;
-}
-
-static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
-
static inline int eeh_check_failure(const volatile void __iomem *token)
{
return 0;
@@ -360,22 +342,19 @@ static inline int eeh_check_failure(const volatile void __iomem *token)
static inline void eeh_addr_cache_init(void) { }
-static inline void eeh_add_device_early(struct pci_dn *pdn) { }
-
-static inline void eeh_add_device_tree_early(struct pci_dn *pdn) { }
-
-static inline void eeh_add_device_late(struct pci_dev *dev) { }
-
-static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
-
-static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
+static inline void eeh_probe_device(struct pci_dev *dev) { }
static inline void eeh_remove_device(struct pci_dev *dev) { }
#define EEH_POSSIBLE_ERROR(val, type) (0)
#define EEH_IO_ERROR_VALUE(size) (-1UL)
+static inline int eeh_phb_pe_create(struct pci_controller *phb) { return 0; }
#endif /* CONFIG_EEH */
+#if defined(CONFIG_PPC_PSERIES) && defined(CONFIG_EEH)
+void pseries_eeh_init_edev_recursive(struct pci_dn *pdn);
+#endif
+
#ifdef CONFIG_PPC64
/*
* MMIO read/write operations with EEH support.
@@ -475,7 +454,7 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
}
-void eeh_cache_debugfs_init(void);
+void __init eeh_cache_debugfs_init(void);
#endif /* CONFIG_PPC64 */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 57c229a86f08..79f1c480b5eb 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -53,8 +53,6 @@ static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
}
#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
-typedef elf_vrregset_t elf_fpxregset_t;
-
/* ELF_HWCAP yields a mask that user programs can use to figure out what
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
@@ -162,7 +160,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
* even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes
*/
-#define ARCH_DLINFO \
+#define COMMON_ARCH_DLINFO \
do { \
/* Handle glibc compatibility. */ \
NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
@@ -170,12 +168,30 @@ do { \
/* Cache size items */ \
NEW_AUX_ENT(AT_DCACHEBSIZE, dcache_bsize); \
NEW_AUX_ENT(AT_ICACHEBSIZE, icache_bsize); \
- NEW_AUX_ENT(AT_UCACHEBSIZE, ucache_bsize); \
- VDSO_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso_base); \
+ NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
+ VDSO_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long)current->mm->context.vdso);\
ARCH_DLINFO_CACHE_GEOMETRY; \
} while (0)
+#define ARCH_DLINFO \
+do { \
+ COMMON_ARCH_DLINFO; \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_min_sigframe_size()); \
+} while (0)
+
+#define COMPAT_ARCH_DLINFO \
+do { \
+ COMMON_ARCH_DLINFO; \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, get_min_sigframe_size_compat()); \
+} while (0)
+
/* Relocate the kernel image to @final_address */
void relocate(unsigned long final_address);
+struct func_desc {
+ unsigned long addr;
+ unsigned long toc;
+ unsigned long env;
+};
+
#endif /* _ASM_POWERPC_ELF_H */
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index d3a7e36f1402..cdf3c6df5123 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -37,7 +37,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* A "hypercall" is an "sc 1" instruction. This header file file provides C
+/* A "hypercall" is an "sc 1" instruction. This header file provides C
* wrapper functions for the ePAPR hypervisor interface. It is inteded
* for use by Linux device drivers and other operating systems.
*
@@ -65,7 +65,7 @@
* but the gcc inline assembly syntax does not allow us to specify registers
* on the clobber list that are also on the input/output list. Therefore,
* the lists of clobbered registers depends on the number of register
- * parmeters ("+r" and "=r") passed to the hypercall.
+ * parameters ("+r" and "=r") passed to the hypercall.
*
* Each assembly block should use one of the HCALL_CLOBBERSx macros. As a
* general rule, 'x' is the number of parameters passed to the assembly
@@ -246,7 +246,7 @@ static inline unsigned int ev_int_get_mask(unsigned int interrupt,
* ev_int_eoi - signal the end of interrupt processing
* @interrupt: the interrupt number
*
- * This function signals the end of processing for the the specified
+ * This function signals the end of processing for the specified
* interrupt, which must be the interrupt currently in service. By
* definition, this is also the highest-priority interrupt.
*
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 54a98ef7d7fe..b1ef1e92c34a 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -66,14 +66,7 @@
#define EX_TLB_SRR0 (10 * 8)
#define EX_TLB_SRR1 (11 * 8)
#define EX_TLB_R7 (12 * 8)
-#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
-#define EX_TLB_R8 (13 * 8)
-#define EX_TLB_R9 (14 * 8)
-#define EX_TLB_LR (15 * 8)
-#define EX_TLB_SIZE (16 * 8)
-#else
#define EX_TLB_SIZE (13 * 8)
-#endif
#define START_EXCEPTION(label) \
.globl exc_##label##_book3e; \
@@ -110,8 +103,7 @@ exc_##label##_book3e:
std r11,EX_TLB_R12(r12); \
mtspr SPRN_SPRG_TLB_EXFRAME,r14; \
std r15,EX_TLB_SRR1(r12); \
- std r16,EX_TLB_SRR0(r12); \
- TLB_MISS_PROLOG_STATS
+ std r16,EX_TLB_SRR0(r12);
/* And these are the matching epilogs that restores things
*
@@ -143,7 +135,6 @@ exc_##label##_book3e:
mtspr SPRN_SRR0,r15; \
ld r15,EX_TLB_R15(r12); \
mtspr SPRN_SRR1,r16; \
- TLB_MISS_RESTORE_STATS \
ld r16,EX_TLB_R16(r12); \
ld r12,EX_TLB_R12(r12); \
@@ -158,53 +149,19 @@ exc_##label##_book3e:
addi r11,r13,PACA_EXTLB; \
TLB_MISS_RESTORE(r11)
-#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
-#define TLB_MISS_PROLOG_STATS \
- mflr r10; \
- std r8,EX_TLB_R8(r12); \
- std r9,EX_TLB_R9(r12); \
- std r10,EX_TLB_LR(r12);
-#define TLB_MISS_RESTORE_STATS \
- ld r16,EX_TLB_LR(r12); \
- ld r9,EX_TLB_R9(r12); \
- ld r8,EX_TLB_R8(r12); \
- mtlr r16;
-#define TLB_MISS_STATS_D(name) \
- addi r9,r13,MMSTAT_DSTATS+name; \
- bl tlb_stat_inc;
-#define TLB_MISS_STATS_I(name) \
- addi r9,r13,MMSTAT_ISTATS+name; \
- bl tlb_stat_inc;
-#define TLB_MISS_STATS_X(name) \
- ld r8,PACA_EXTLB+EX_TLB_ESR(r13); \
- cmpdi cr2,r8,-1; \
- beq cr2,61f; \
- addi r9,r13,MMSTAT_DSTATS+name; \
- b 62f; \
-61: addi r9,r13,MMSTAT_ISTATS+name; \
-62: bl tlb_stat_inc;
-#define TLB_MISS_STATS_SAVE_INFO \
- std r14,EX_TLB_ESR(r12); /* save ESR */
-#define TLB_MISS_STATS_SAVE_INFO_BOLTED \
- std r14,PACA_EXTLB+EX_TLB_ESR(r13); /* save ESR */
-#else
-#define TLB_MISS_PROLOG_STATS
-#define TLB_MISS_RESTORE_STATS
-#define TLB_MISS_PROLOG_STATS_BOLTED
-#define TLB_MISS_RESTORE_STATS_BOLTED
-#define TLB_MISS_STATS_D(name)
-#define TLB_MISS_STATS_I(name)
-#define TLB_MISS_STATS_X(name)
-#define TLB_MISS_STATS_Y(name)
-#define TLB_MISS_STATS_SAVE_INFO
-#define TLB_MISS_STATS_SAVE_INFO_BOLTED
+#ifndef __ASSEMBLY__
+extern unsigned int interrupt_base_book3e;
#endif
#define SET_IVOR(vector_number, vector_offset) \
LOAD_REG_ADDR(r3,interrupt_base_book3e);\
ori r3,r3,vector_offset@l; \
mtspr SPRN_IVOR##vector_number,r3;
-
+/*
+ * powerpc relies on return from interrupt/syscall being context synchronising
+ * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
+ * synchronisation instructions.
+ */
#define RFI_TO_KERNEL \
rfi
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 33f4f72eb035..bb6f78fcf981 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -33,11 +33,20 @@
#include <asm/feature-fixups.h>
/* PACA save area size in u64 units (exgen, exmc, etc) */
-#if defined(CONFIG_RELOCATABLE)
#define EX_SIZE 10
-#else
-#define EX_SIZE 9
-#endif
+
+/* PACA save area offsets */
+#define EX_R9 0
+#define EX_R10 8
+#define EX_R11 16
+#define EX_R12 24
+#define EX_R13 32
+#define EX_DAR 40
+#define EX_DSISR 48
+#define EX_CCR 52
+#define EX_CFAR 56
+#define EX_PPR 64
+#define EX_CTR 72
/*
* maximum recursive depth of MCE exceptions
@@ -61,17 +70,45 @@
nop; \
nop
+#define ENTRY_FLUSH_SLOT \
+ ENTRY_FLUSH_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop;
+
+#define SCV_ENTRY_FLUSH_SLOT \
+ SCV_ENTRY_FLUSH_FIXUP_SECTION; \
+ nop; \
+ nop; \
+ nop;
+
/*
* r10 must be free to use, r13 must be paca
*/
#define INTERRUPT_TO_KERNEL \
- STF_ENTRY_BARRIER_SLOT
+ STF_ENTRY_BARRIER_SLOT; \
+ ENTRY_FLUSH_SLOT
+
+/*
+ * r10, ctr must be free to use, r13 must be paca
+ */
+#define SCV_INTERRUPT_TO_KERNEL \
+ STF_ENTRY_BARRIER_SLOT; \
+ SCV_ENTRY_FLUSH_SLOT
/*
* Macros for annotating the expected destination of (h)rfid
*
* The nop instructions allow us to insert one or more instructions to flush the
* L1-D cache when returning to userspace or a guest.
+ *
+ * powerpc relies on return from interrupt/syscall being context synchronising
+ * (which hrfid, rfid, and rfscv are) to support ARCH_HAS_MEMBARRIER_SYNC_CORE
+ * without additional synchronisation instructions.
+ *
+ * soft-masked interrupt replay does not include a context-synchronising rfid,
+ * but those always return to kernel, the sync is only required when returning
+ * to user.
*/
#define RFI_FLUSH_SLOT \
RFI_FLUSH_FIXUP_SECTION; \
@@ -127,6 +164,15 @@
hrfid; \
b hrfi_flush_fallback
+#define RFSCV_TO_USER \
+ STF_EXIT_BARRIER_SLOT; \
+ RFI_FLUSH_SLOT; \
+ RFSCV; \
+ b rfscv_flush_fallback
+
+#else /* __ASSEMBLY__ */
+/* Prototype for function defined in exceptions-64s.S */
+void do_uaccess_flush(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_EXCEPTION_H */
diff --git a/arch/powerpc/include/asm/extable.h b/arch/powerpc/include/asm/extable.h
index eb91b2d2935a..26ce2e5c0fa8 100644
--- a/arch/powerpc/include/asm/extable.h
+++ b/arch/powerpc/include/asm/extable.h
@@ -17,6 +17,8 @@
#define ARCH_HAS_RELATIVE_EXTABLE
+#ifndef __ASSEMBLY__
+
struct exception_table_entry {
int insn;
int fixup;
@@ -28,3 +30,15 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
}
#endif
+
+/*
+ * Helper macro for exception table entries
+ */
+#define EX_TABLE(_fault, _target) \
+ stringify_in_c(.section __ex_table,"a";)\
+ stringify_in_c(.balign 4;) \
+ stringify_in_c(.long (_fault) - . ;) \
+ stringify_in_c(.long (_target) - . ;) \
+ stringify_in_c(.previous)
+
+#endif
diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h
index c814a2b55389..27f9e11eda28 100644
--- a/arch/powerpc/include/asm/fadump-internal.h
+++ b/arch/powerpc/include/asm/fadump-internal.h
@@ -19,11 +19,6 @@
#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
-/* Alignment per CMA requirement. */
-#define FADUMP_CMA_ALIGNMENT (PAGE_SIZE << \
- max_t(unsigned long, MAX_ORDER - 1, \
- pageblock_order))
-
/* FAD commands */
#define FADUMP_REGISTER 1
#define FADUMP_UNREGISTER 2
@@ -55,7 +50,7 @@ struct fadump_crash_info_header {
u64 elfcorehdr_addr;
u32 crashing_cpu;
struct pt_regs regs;
- struct cpumask online_mask;
+ struct cpumask cpu_mask;
};
struct fadump_memory_range {
@@ -64,12 +59,14 @@ struct fadump_memory_range {
};
/* fadump memory ranges info */
+#define RNG_NAME_SZ 16
struct fadump_mrange_info {
- char name[16];
+ char name[RNG_NAME_SZ];
struct fadump_memory_range *mem_ranges;
u32 mem_ranges_sz;
u32 mem_range_cnt;
u32 max_mem_ranges;
+ bool is_static;
};
/* Platform specific callback functions */
@@ -135,10 +132,10 @@ struct fadump_ops {
};
/* Helper functions */
-s32 fadump_setup_cpu_notes_buf(u32 num_cpus);
+s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus);
void fadump_free_cpu_notes_buf(void);
-u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs);
-void fadump_update_elfcore_header(char *bufp);
+u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs);
+void __init fadump_update_elfcore_header(char *bufp);
bool is_fadump_boot_mem_contiguous(void);
bool is_fadump_reserved_mem_contiguous(void);
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
index b0af97add751..ac605fc369c4 100644
--- a/arch/powerpc/include/asm/feature-fixups.h
+++ b/arch/powerpc/include/asm/feature-fixups.h
@@ -36,6 +36,24 @@ label##2: \
.align 2; \
label##3:
+
+#ifndef CONFIG_CC_IS_CLANG
+#define CHECK_ALT_SIZE(else_size, body_size) \
+ .ifgt (else_size) - (body_size); \
+ .error "Feature section else case larger than body"; \
+ .endif;
+#else
+/*
+ * If we use the ifgt syntax above, clang's assembler complains about the
+ * expression being non-absolute when the code appears in an inline assembly
+ * statement.
+ * As a workaround use an .org directive that has no effect if the else case
+ * instructions are smaller than the body, but fails otherwise.
+ */
+#define CHECK_ALT_SIZE(else_size, body_size) \
+ .org . + ((else_size) > (body_size));
+#endif
+
#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
label##4: \
.popsection; \
@@ -48,9 +66,7 @@ label##5: \
FTR_ENTRY_OFFSET label##2b-label##5b; \
FTR_ENTRY_OFFSET label##3b-label##5b; \
FTR_ENTRY_OFFSET label##4b-label##5b; \
- .ifgt (label##4b- label##3b)-(label##2b- label##1b); \
- .error "Feature section else case larger than body"; \
- .endif; \
+ CHECK_ALT_SIZE((label##4b-label##3b), (label##2b-label##1b)); \
.popsection;
@@ -100,6 +116,9 @@ label##5: \
#define END_MMU_FTR_SECTION_NESTED_IFSET(msk, label) \
END_MMU_FTR_SECTION_NESTED((msk), (msk), label)
+#define END_MMU_FTR_SECTION_NESTED_IFCLR(msk, label) \
+ END_MMU_FTR_SECTION_NESTED((msk), 0, label)
+
#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk))
#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0)
@@ -205,6 +224,30 @@ label##3: \
FTR_ENTRY_OFFSET 955b-956b; \
.popsection;
+#define UACCESS_FLUSH_FIXUP_SECTION \
+959: \
+ .pushsection __uaccess_flush_fixup,"a"; \
+ .align 2; \
+960: \
+ FTR_ENTRY_OFFSET 959b-960b; \
+ .popsection;
+
+#define ENTRY_FLUSH_FIXUP_SECTION \
+957: \
+ .pushsection __entry_flush_fixup,"a"; \
+ .align 2; \
+958: \
+ FTR_ENTRY_OFFSET 957b-958b; \
+ .popsection;
+
+#define SCV_ENTRY_FLUSH_FIXUP_SECTION \
+957: \
+ .pushsection __scv_entry_flush_fixup,"a"; \
+ .align 2; \
+958: \
+ FTR_ENTRY_OFFSET 957b-958b; \
+ .popsection;
+
#define RFI_FLUSH_FIXUP_SECTION \
951: \
.pushsection __rfi_flush_fixup,"a"; \
@@ -237,8 +280,13 @@ label##3: \
#include <linux/types.h>
extern long stf_barrier_fallback;
+extern long entry_flush_fallback;
+extern long scv_entry_flush_fallback;
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
+extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index ca33f4ef6cb4..ed6db13a1d7c 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -44,7 +44,7 @@
#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
-#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
+#define FW_FEATURE_FORM1_AFFINITY ASM_CONST(0x0000000100000000)
#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
#define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000)
#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000)
@@ -52,6 +52,10 @@
#define FW_FEATURE_PAPR_SCM ASM_CONST(0x0000002000000000)
#define FW_FEATURE_ULTRAVISOR ASM_CONST(0x0000004000000000)
#define FW_FEATURE_STUFF_TCE ASM_CONST(0x0000008000000000)
+#define FW_FEATURE_RPT_INVALIDATE ASM_CONST(0x0000010000000000)
+#define FW_FEATURE_FORM2_AFFINITY ASM_CONST(0x0000020000000000)
+#define FW_FEATURE_ENERGY_SCALE_INFO ASM_CONST(0x0000040000000000)
+#define FW_FEATURE_WATCHDOG ASM_CONST(0x0000080000000000)
#ifndef __ASSEMBLY__
@@ -68,10 +72,12 @@ enum {
FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
- FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN |
+ FW_FEATURE_FORM1_AFFINITY | FW_FEATURE_PRRN |
FW_FEATURE_HPT_RESIZE | FW_FEATURE_DRMEM_V2 |
FW_FEATURE_DRC_INFO | FW_FEATURE_BLOCK_REMOVE |
- FW_FEATURE_PAPR_SCM | FW_FEATURE_ULTRAVISOR,
+ FW_FEATURE_PAPR_SCM | FW_FEATURE_ULTRAVISOR |
+ FW_FEATURE_RPT_INVALIDATE | FW_FEATURE_FORM2_AFFINITY |
+ FW_FEATURE_ENERGY_SCALE_INFO | FW_FEATURE_WATCHDOG,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_ULTRAVISOR,
FW_FEATURE_POWERNV_ALWAYS = 0,
@@ -89,7 +95,7 @@ enum {
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_POSSIBLE |
#endif
-#ifdef CONFIG_PPC_NATIVE
+#ifdef CONFIG_PPC_HASH_MMU_NATIVE
FW_FEATURE_NATIVE_ALWAYS |
#endif
0,
@@ -103,7 +109,7 @@ enum {
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_ALWAYS &
#endif
-#ifdef CONFIG_PPC_NATIVE
+#ifdef CONFIG_PPC_HASH_MMU_NATIVE
FW_FEATURE_NATIVE_ALWAYS &
#endif
FW_FEATURE_POSSIBLE,
@@ -128,13 +134,14 @@ extern void machine_check_fwnmi(void);
/* This is true if we are using the firmware NMI handler (typically LPAR) */
extern int fwnmi_active;
+extern int ibm_nmi_interlock_token;
extern unsigned int __start___fw_ftr_fixup, __stop___fw_ftr_fixup;
#ifdef CONFIG_PPC_PSERIES
void pseries_probe_fw_features(void);
#else
-static inline void pseries_probe_fw_features(void) { };
+static inline void pseries_probe_fw_features(void) { }
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 2ef155a3c821..a832aeafe560 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -16,19 +16,24 @@
#ifndef __ASSEMBLY__
#include <linux/sizes.h>
+#include <linux/pgtable.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
-#include <asm/kmap_types.h>
+#include <asm/kmap_size.h>
#endif
+#ifdef CONFIG_PPC64
+#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
+#else
+#define FIXADDR_SIZE 0
#ifdef CONFIG_KASAN
#include <asm/kasan.h>
#define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif
+#endif
/*
* Here we define all the compile-time 'special' virtual
@@ -50,12 +55,13 @@
*/
enum fixed_addresses {
FIX_HOLE,
+#ifdef CONFIG_PPC32
/* reserve the top 128K for early debugging purposes */
FIX_EARLY_DEBUG_TOP = FIX_HOLE,
- FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+((128*1024)/PAGE_SIZE)-1,
+ FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
#endif
#ifdef CONFIG_PPC_8xx
/* For IMMR we need an aligned 512K area */
@@ -72,6 +78,7 @@ enum fixed_addresses {
FIX_IMMR_SIZE,
#endif
/* FIX_PCIE_MCFG, */
+#endif /* CONFIG_PPC32 */
__end_of_permanent_fixed_addresses,
#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
@@ -86,6 +93,10 @@ enum fixed_addresses {
#define __FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
+#define FIXMAP_ALIGNED_SIZE (ALIGN(FIXADDR_TOP, PGDIR_SIZE) - \
+ ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE))
+#define FIXMAP_PTE_SIZE (FIXMAP_ALIGNED_SIZE / PGDIR_SIZE * PTE_TABLE_SIZE)
+
#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NCG
#define FIXMAP_PAGE_IO PAGE_KERNEL_NCG
@@ -94,12 +105,16 @@ enum fixed_addresses {
static inline void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
+
if (__builtin_constant_p(idx))
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
else if (WARN_ON(idx >= __end_of_fixed_addresses))
return;
-
- map_kernel_page(__fix_to_virt(idx), phys, flags);
+ if (pgprot_val(flags))
+ map_kernel_page(__fix_to_virt(idx), phys, flags);
+ else
+ unmap_kernel_page(__fix_to_virt(idx));
}
#define __early_set_fixmap __set_fixmap
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
index 167c44b58848..f8ce178b43b7 100644
--- a/arch/powerpc/include/asm/floppy.h
+++ b/arch/powerpc/include/asm/floppy.h
@@ -13,8 +13,8 @@
#include <asm/machdep.h>
-#define fd_inb(port) inb_p(port)
-#define fd_outb(value,port) outb_p(value,port)
+#define fd_inb(base, reg) inb_p((base) + (reg))
+#define fd_outb(value, base, reg) outb_p(value, (base) + (reg))
#define fd_enable_dma() enable_dma(FLOPPY_DMA)
#define fd_disable_dma() fd_ops->_disable_dma(FLOPPY_DMA)
@@ -61,21 +61,22 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
st = 1;
for (lcount=virtual_dma_count, lptr=virtual_dma_addr;
lcount; lcount--, lptr++) {
- st=inb(virtual_dma_port+4) & 0xa0 ;
- if (st != 0xa0)
+ st = inb(virtual_dma_port + FD_STATUS);
+ st &= STATUS_DMA | STATUS_READY;
+ if (st != (STATUS_DMA | STATUS_READY))
break;
if (virtual_dma_mode)
- outb_p(*lptr, virtual_dma_port+5);
+ outb_p(*lptr, virtual_dma_port + FD_DATA);
else
- *lptr = inb_p(virtual_dma_port+5);
+ *lptr = inb_p(virtual_dma_port + FD_DATA);
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;
- st = inb(virtual_dma_port+4);
+ st = inb(virtual_dma_port + FD_STATUS);
- if (st == 0x20)
+ if (st == STATUS_DMA)
return IRQ_HANDLED;
- if (!(st & 0x20)) {
+ if (!(st & STATUS_DMA)) {
virtual_dma_residue += virtual_dma_count;
virtual_dma_count=0;
doing_vdma = 0;
@@ -133,17 +134,19 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
int dir;
doing_vdma = 0;
- dir = (mode == DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
+ dir = (mode == DMA_MODE_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (bus_addr
&& (addr != prev_addr || size != prev_size || dir != prev_dir)) {
/* different from last time -- unmap prev */
- pci_unmap_single(isa_bridge_pcidev, bus_addr, prev_size, prev_dir);
+ dma_unmap_single(&isa_bridge_pcidev->dev, bus_addr, prev_size,
+ prev_dir);
bus_addr = 0;
}
if (!bus_addr) /* need to map it */
- bus_addr = pci_map_single(isa_bridge_pcidev, addr, size, dir);
+ bus_addr = dma_map_single(&isa_bridge_pcidev->dev, addr, size,
+ dir);
/* remember this one as prev */
prev_addr = addr;
diff --git a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h
deleted file mode 100644
index 0235a0447baa..000000000000
--- a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2009 Freescale Semiconductor, Inc.
- *
- * Cache SRAM handling for QorIQ platform
- *
- * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
-
- * This file is derived from the original work done
- * by Sylvain Munaut for the Bestcomm SRAM allocator.
- */
-
-#ifndef __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__
-#define __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__
-
-#include <asm/rheap.h>
-#include <linux/spinlock.h>
-
-/*
- * Cache-SRAM
- */
-
-struct mpc85xx_cache_sram {
- phys_addr_t base_phys;
- void *base_virt;
- unsigned int size;
- rh_info_t *rh;
- spinlock_t lock;
-};
-
-extern void mpc85xx_cache_sram_free(void *ptr);
-extern void *mpc85xx_cache_sram_alloc(unsigned int size,
- phys_addr_t *phys, unsigned int align);
-
-#endif /* __AMS_POWERPC_FSL_85XX_CACHE_SRAM_H__ */
diff --git a/arch/powerpc/include/asm/fsl_pamu_stash.h b/arch/powerpc/include/asm/fsl_pamu_stash.h
index 30a31ad2123d..c0fbadb70b5d 100644
--- a/arch/powerpc/include/asm/fsl_pamu_stash.h
+++ b/arch/powerpc/include/asm/fsl_pamu_stash.h
@@ -7,6 +7,8 @@
#ifndef __FSL_PAMU_STASH_H
#define __FSL_PAMU_STASH_H
+struct iommu_domain;
+
/* cache stash targets */
enum pamu_stash_target {
PAMU_ATTR_CACHE_L1 = 1,
@@ -14,14 +16,6 @@ enum pamu_stash_target {
PAMU_ATTR_CACHE_L3,
};
-/*
- * This attribute allows configuring stashig specific parameters
- * in the PAMU hardware.
- */
-
-struct pamu_stash_attribute {
- u32 cpu; /* cpu number */
- u32 cache; /* cache to stash to: L1,L2,L3 */
-};
+int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu);
#endif /* __FSL_PAMU_STASH_H */
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index f54a08a2cd70..3cee7115441b 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -10,55 +10,45 @@
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
-#ifdef __ASSEMBLY__
-
-/* Based off of objdump optput from glibc */
-
-#define MCOUNT_SAVE_FRAME \
- stwu r1,-48(r1); \
- stw r3, 12(r1); \
- stw r4, 16(r1); \
- stw r5, 20(r1); \
- stw r6, 24(r1); \
- mflr r3; \
- lwz r4, 52(r1); \
- mfcr r5; \
- stw r7, 28(r1); \
- stw r8, 32(r1); \
- stw r9, 36(r1); \
- stw r10,40(r1); \
- stw r3, 44(r1); \
- stw r5, 8(r1)
-
-#define MCOUNT_RESTORE_FRAME \
- lwz r6, 8(r1); \
- lwz r0, 44(r1); \
- lwz r3, 12(r1); \
- mtctr r0; \
- lwz r4, 16(r1); \
- mtcr r6; \
- lwz r5, 20(r1); \
- lwz r6, 24(r1); \
- lwz r0, 52(r1); \
- lwz r7, 28(r1); \
- lwz r8, 32(r1); \
- mtlr r0; \
- lwz r9, 36(r1); \
- lwz r10,40(r1); \
- addi r1, r1, 48
-
-#else /* !__ASSEMBLY__ */
+#ifndef __ASSEMBLY__
extern void _mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
- /* reloction of mcount call site is the same as the address */
+ /* relocation of mcount call site is the same as the address */
return addr;
}
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
+ unsigned long sp);
+
struct dyn_arch_ftrace {
struct module *mod;
};
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+struct ftrace_regs {
+ struct pt_regs regs;
+};
+
+static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
+{
+ /* We clear regs.msr in ftrace_call */
+ return fregs->regs.msr ? &fregs->regs : NULL;
+}
+
+static __always_inline void ftrace_instruction_pointer_set(struct ftrace_regs *fregs,
+ unsigned long ip)
+{
+ regs_set_return_ip(&fregs->regs, ip);
+}
+
+struct ftrace_ops;
+
+#define ftrace_graph_func ftrace_graph_func
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs);
+#endif
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
@@ -74,7 +64,7 @@ struct dyn_arch_ftrace {
* those.
*/
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
/* We need to skip past the initial dot, and the __se_sys alias */
@@ -93,10 +83,10 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
(!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
(!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
}
-#endif /* PPC64_ELF_ABI_v1 */
+#endif /* CONFIG_PPC64_ELF_ABI_V1 */
#endif /* CONFIG_FTRACE_SYSCALLS */
-#ifdef CONFIG_PPC64
+#if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER)
#include <asm/paca.h>
static inline void this_cpu_disable_ftrace(void)
@@ -108,9 +98,25 @@ static inline void this_cpu_enable_ftrace(void)
{
get_paca()->ftrace_enabled = 1;
}
+
+/* Disable ftrace on this CPU if possible (may not be implemented) */
+static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled)
+{
+ get_paca()->ftrace_enabled = ftrace_enabled;
+}
+
+static inline u8 this_cpu_get_ftrace_enabled(void)
+{
+ return get_paca()->ftrace_enabled;
+}
+
+void ftrace_free_init_tramp(void);
#else /* CONFIG_PPC64 */
static inline void this_cpu_disable_ftrace(void) { }
static inline void this_cpu_enable_ftrace(void) { }
+static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
+static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
+static inline void ftrace_free_init_tramp(void) { }
#endif /* CONFIG_PPC64 */
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index bc7d9d06a6d9..b3001f8b2c1e 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -8,14 +8,12 @@
#include <linux/uaccess.h>
#include <asm/errno.h>
#include <asm/synch.h>
-#include <asm/asm-405.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
PPC_ATOMIC_ENTRY_BARRIER \
"1: lwarx %0,0,%2\n" \
insn \
- PPC405_ERR77(0, %2) \
"2: stwcx. %1,0,%2\n" \
"bne- 1b\n" \
PPC_ATOMIC_EXIT_BARRIER \
@@ -35,8 +33,8 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
{
int oldval = 0, ret;
- allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
- pagefault_disable();
+ if (!user_access_begin(uaddr, sizeof(u32)))
+ return -EFAULT;
switch (op) {
case FUTEX_OP_SET:
@@ -57,12 +55,10 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
default:
ret = -ENOSYS;
}
-
- pagefault_enable();
+ user_access_end();
*oval = oldval;
- prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
return ret;
}
@@ -73,17 +69,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
int ret = 0;
u32 prev;
- if (!access_ok(uaddr, sizeof(u32)))
+ if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
- allow_read_write_user(uaddr, uaddr, sizeof(*uaddr));
-
__asm__ __volatile__ (
PPC_ATOMIC_ENTRY_BARRIER
"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
cmpw 0,%1,%4\n\
bne- 3f\n"
- PPC405_ERR77(0,%3)
"2: stwcx. %5,0,%3\n\
bne- 1b\n"
PPC_ATOMIC_EXIT_BARRIER
@@ -97,8 +90,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
: "cc", "memory");
+ user_access_end();
+
*uval = prev;
- prevent_read_write_user(uaddr, uaddr, sizeof(*uaddr));
return ret;
}
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index f1e9067bd5ac..f133b5930ae1 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -13,7 +13,6 @@ typedef struct {
unsigned int pmu_irqs;
unsigned int mce_exceptions;
unsigned int spurious_irqs;
- unsigned int hmi_exceptions;
unsigned int sreset_irqs;
#ifdef CONFIG_PPC_WATCHDOG
unsigned int soft_nmi_irqs;
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index 2dabcf668292..d73153b0275d 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -16,7 +16,7 @@
.section ".head.data.\name\()","a",@progbits
.endm
.macro use_ftsec name
- .section ".head.text.\name\()"
+ .section ".head.text.\name\()","ax",@progbits
.endm
/*
@@ -98,13 +98,9 @@ start_text:
. = sname##_len;
#define USE_FIXED_SECTION(sname) \
- fs_label = start_##sname; \
- fs_start = sname##_start; \
use_ftsec sname;
#define USE_TEXT_SECTION() \
- fs_label = start_text; \
- fs_start = text_start; \
.text
#define CLOSE_FIXED_SECTION(sname) \
@@ -128,7 +124,7 @@ name:
.if ((start) % (size) != 0); \
.error "Fixed section exception vector misalignment"; \
.endif; \
- .if ((size) != 0x20) && ((size) != 0x80) && ((size) != 0x100); \
+ .if ((size) != 0x20) && ((size) != 0x80) && ((size) != 0x100) && ((size) != 0x1000); \
.error "Fixed section exception vector bad size"; \
.endif; \
.if (start) < sname##_start; \
@@ -161,13 +157,15 @@ name:
* - ABS_ADDR is used to find the absolute address of any symbol, from within
* a fixed section.
*/
-#define DEFINE_FIXED_SYMBOL(label) \
- label##_absolute = (label - fs_label + fs_start)
+// define label as being _in_ sname
+#define DEFINE_FIXED_SYMBOL(label, sname) \
+ label##_absolute = (label - start_ ## sname + sname ## _start)
#define FIXED_SYMBOL_ABS_ADDR(label) \
(label##_absolute)
-#define ABS_ADDR(label) (label - fs_label + fs_start)
+// find label from _within_ sname
+#define ABS_ADDR(label, sname) (label - start_ ## sname + sname ## _start)
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index a4b65b186ec6..c0fcd1bbdba9 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -24,13 +24,10 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
-#include <asm/kmap_types.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/fixmap.h>
-extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
/*
@@ -59,35 +56,15 @@ extern pte_t *pkmap_page_table;
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
-extern void *kmap_atomic_prot(struct page *page, pgprot_t prot);
-extern void __kunmap_atomic(void *kvaddr);
-
-static inline void *kmap(struct page *page)
-{
- might_sleep();
- if (!PageHighMem(page))
- return page_address(page);
- return kmap_high(page);
-}
-
-static inline void kunmap(struct page *page)
-{
- BUG_ON(in_interrupt());
- if (!PageHighMem(page))
- return;
- kunmap_high(page);
-}
-
-static inline void *kmap_atomic(struct page *page)
-{
- return kmap_atomic_prot(page, kmap_prot);
-}
-
-
#define flush_cache_kmaps() flush_cache_all()
+#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
+ __set_pte_at(mm, vaddr, ptep, ptev, 1)
+#define arch_kmap_local_post_map(vaddr, pteval) \
+ local_flush_tlb_page(NULL, vaddr)
+#define arch_kmap_local_post_unmap(vaddr) \
+ local_flush_tlb_page(NULL, vaddr)
+
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index bd6504c28c2f..ea71f7245a63 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -7,17 +7,15 @@
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/hugetlb.h>
-#elif defined(CONFIG_PPC_FSL_BOOK3E)
-#include <asm/nohash/hugetlb-book3e.h>
+#elif defined(CONFIG_PPC_E500)
+#include <asm/nohash/hugetlb-e500.h>
#elif defined(CONFIG_PPC_8xx)
#include <asm/nohash/32/hugetlb-8xx.h>
#endif /* CONFIG_PPC_BOOK3S_64 */
extern bool hugetlb_disabled;
-void hugetlbpage_init_default(void);
-
-void flush_dcache_icache_hugepage(struct page *page);
+void __init hugetlbpage_init_defaultsize(void);
int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len);
@@ -26,10 +24,11 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
unsigned long addr,
unsigned long len)
{
- if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled())
+ if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU) && !radix_enabled())
return slice_is_hugepage_only_range(mm, addr, len);
return 0;
}
+#define is_hugepage_only_range is_hugepage_only_range
#define __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
@@ -40,19 +39,18 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
-#ifdef CONFIG_PPC64
return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
-#else
- return __pte(pte_update(ptep, ~0UL, 0));
-#endif
}
#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
+static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
- huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ pte_t pte;
+
+ pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
flush_hugetlb_page(vma, addr);
+ return pte;
}
#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
@@ -60,10 +58,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty);
-static inline void arch_clear_hugepage_flags(struct page *page)
-{
-}
-
+void gigantic_hugetlb_cma_reserve(void) __init;
#include <asm-generic/hugetlb.h>
#else /* ! CONFIG_HUGETLB_PAGE */
@@ -78,6 +73,15 @@ static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
{
return NULL;
}
+
+
+static inline void __init gigantic_hugetlb_cma_reserve(void)
+{
+}
+
+static inline void __init hugetlbpage_init_defaultsize(void)
+{
+}
#endif /* CONFIG_HUGETLB_PAGE */
#endif /* _ASM_POWERPC_HUGETLB_H */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index e90c073e437e..8abae463f6c1 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -79,6 +79,7 @@
#define H_NOT_ENOUGH_RESOURCES -44
#define H_R_STATE -45
#define H_RESCINDED -46
+#define H_P1 -54
#define H_P2 -55
#define H_P3 -56
#define H_P4 -57
@@ -87,6 +88,7 @@
#define H_P7 -60
#define H_P8 -61
#define H_P9 -62
+#define H_NOOP -63
#define H_TOO_BIG -64
#define H_UNSUPPORTED -67
#define H_OVERLAP -68
@@ -97,6 +99,8 @@
#define H_OP_MODE -73
#define H_COP_HW -74
#define H_STATE -75
+#define H_IN_USE -77
+#define H_ABORTED -78
#define H_UNSUPPORTED_FLAG_START -256
#define H_UNSUPPORTED_FLAG_END -511
#define H_MULTI_THREADS_ACTIVE -9005
@@ -155,6 +159,14 @@
#define H_VASI_RESUMED 5
#define H_VASI_COMPLETED 6
+/* VASI signal codes. Only the Cancel code is valid for H_VASI_SIGNAL. */
+#define H_VASI_SIGNAL_CANCEL 1
+#define H_VASI_SIGNAL_ABORT 2
+#define H_VASI_SIGNAL_SUSPEND 3
+#define H_VASI_SIGNAL_COMPLETE 4
+#define H_VASI_SIGNAL_ENABLE 5
+#define H_VASI_SIGNAL_FAILOVER 6
+
/* Each control block has to be on a 4K boundary */
#define H_CB_ALIGNMENT 4096
@@ -237,7 +249,7 @@
#define H_CREATE_RPT 0x1A4
#define H_REMOVE_RPT 0x1A8
#define H_REGISTER_RPAGES 0x1AC
-#define H_DISABLE_AND_GETC 0x1B0
+#define H_DISABLE_AND_GET 0x1B0
#define H_ERROR_DATA 0x1B4
#define H_GET_HCA_INFO 0x1B8
#define H_GET_PERF_COUNT 0x1BC
@@ -261,6 +273,7 @@
#define H_ADD_CONN 0x284
#define H_DEL_CONN 0x288
#define H_JOIN 0x298
+#define H_VASI_SIGNAL 0x2A0
#define H_VASI_STATE 0x2A4
#define H_VIOCTL 0x2A8
#define H_ENABLE_CRQ 0x2B0
@@ -285,6 +298,13 @@
#define H_RESIZE_HPT_COMMIT 0x370
#define H_REGISTER_PROC_TBL 0x37C
#define H_SIGNAL_SYS_RESET 0x380
+#define H_ALLOCATE_VAS_WINDOW 0x388
+#define H_MODIFY_VAS_WINDOW 0x38C
+#define H_DEALLOCATE_VAS_WINDOW 0x390
+#define H_QUERY_VAS_WINDOW 0x394
+#define H_QUERY_VAS_CAPABILITIES 0x398
+#define H_QUERY_NX_CAPABILITIES 0x39C
+#define H_GET_NX_FAULT 0x3A0
#define H_INT_GET_SOURCE_INFO 0x3A8
#define H_INT_SET_SOURCE_CONFIG 0x3AC
#define H_INT_GET_SOURCE_CONFIG 0x3B0
@@ -305,7 +325,19 @@
#define H_SCM_UNBIND_ALL 0x3FC
#define H_SCM_HEALTH 0x400
#define H_SCM_PERFORMANCE_STATS 0x418
-#define MAX_HCALL_OPCODE H_SCM_PERFORMANCE_STATS
+#define H_PKS_GET_CONFIG 0x41C
+#define H_PKS_SET_PASSWORD 0x420
+#define H_PKS_GEN_PASSWORD 0x424
+#define H_PKS_WRITE_OBJECT 0x42C
+#define H_PKS_GEN_KEY 0x430
+#define H_PKS_READ_OBJECT 0x434
+#define H_PKS_REMOVE_OBJECT 0x438
+#define H_PKS_CONFIRM_OBJECT_FLUSHED 0x43C
+#define H_RPT_INVALIDATE 0x448
+#define H_SCM_FLUSH 0x44C
+#define H_GET_ENERGY_SCALE_INFO 0x450
+#define H_WATCHDOG 0x45C
+#define MAX_HCALL_OPCODE H_WATCHDOG
/* Scope args for H_SCM_UNBIND_ALL */
#define H_UNBIND_SCOPE_ALL (0x1)
@@ -331,6 +363,14 @@
/* Platform specific hcalls, used by KVM */
#define H_RTAS 0xf000
+/*
+ * Platform specific hcalls, used by QEMU/SLOF. These are ignored by
+ * KVM and only kept here so we can identify them during tracing.
+ */
+#define H_LOGICAL_MEMOP 0xF001
+#define H_CAS 0XF002
+#define H_UPDATE_DT 0XF003
+
/* "Platform specific hcalls", provided by PHYP */
#define H_GET_24X7_CATALOG_PAGE 0xF078
#define H_GET_24X7_DATA 0xF07C
@@ -354,9 +394,10 @@
/* Values for 2nd argument to H_SET_MODE */
#define H_SET_MODE_RESOURCE_SET_CIABR 1
-#define H_SET_MODE_RESOURCE_SET_DAWR 2
+#define H_SET_MODE_RESOURCE_SET_DAWR0 2
#define H_SET_MODE_RESOURCE_ADDR_TRANS_MODE 3
#define H_SET_MODE_RESOURCE_LE 4
+#define H_SET_MODE_RESOURCE_SET_DAWR1 5
/* Values for argument to H_SIGNAL_SYS_RESET */
#define H_SIGNAL_SYS_RESET_ALL -1
@@ -373,11 +414,17 @@
#define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
#define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9
+#define H_CPU_CHAR_BCCTR_LINK_FLUSH_ASSIST (1ull << 52) // IBM bit 11
#define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
#define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
#define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
+#define H_CPU_BEHAV_FAVOUR_SECURITY_H (1ull << 60) // IBM bit 3
#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
+#define H_CPU_BEHAV_FLUSH_LINK_STACK (1ull << 57) // IBM bit 6
+#define H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY (1ull << 56) // IBM bit 7
+#define H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS (1ull << 55) // IBM bit 8
+#define H_CPU_BEHAV_NO_STF_BARRIER (1ull << 54) // IBM bit 9
/* Flag values used in H_REGISTER_PROC_TBL hcall */
#define PROC_TABLE_OP_MASK 0x18
@@ -389,6 +436,37 @@
#define PROC_TABLE_RADIX 0x04
#define PROC_TABLE_GTSE 0x01
+/*
+ * Defines for
+ * H_RPT_INVALIDATE - Invalidate RPT translation lookaside information.
+ */
+
+/* Type of translation to invalidate (type) */
+#define H_RPTI_TYPE_NESTED 0x0001 /* Invalidate nested guest partition-scope */
+#define H_RPTI_TYPE_TLB 0x0002 /* Invalidate TLB */
+#define H_RPTI_TYPE_PWC 0x0004 /* Invalidate Page Walk Cache */
+/* Invalidate caching of Process Table Entries if H_RPTI_TYPE_NESTED is clear */
+#define H_RPTI_TYPE_PRT 0x0008
+/* Invalidate caching of Partition Table Entries if H_RPTI_TYPE_NESTED is set */
+#define H_RPTI_TYPE_PAT 0x0008
+#define H_RPTI_TYPE_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \
+ H_RPTI_TYPE_PRT)
+#define H_RPTI_TYPE_NESTED_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \
+ H_RPTI_TYPE_PAT)
+
+/* Invalidation targets (target) */
+#define H_RPTI_TARGET_CMMU 0x01 /* All virtual processors in the partition */
+#define H_RPTI_TARGET_CMMU_LOCAL 0x02 /* Current virtual processor */
+/* All nest/accelerator agents in use by the partition */
+#define H_RPTI_TARGET_NMMU 0x04
+
+/* Page size mask (page sizes) */
+#define H_RPTI_PAGE_4K 0x01
+#define H_RPTI_PAGE_64K 0x02
+#define H_RPTI_PAGE_2M 0x04
+#define H_RPTI_PAGE_1G 0x08
+#define H_RPTI_PAGE_ALL (-1UL)
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -402,6 +480,9 @@
*/
long plpar_hcall_norets(unsigned long opcode, ...);
+/* Variant which does not do hcall tracing */
+long plpar_hcall_norets_notrace(unsigned long opcode, ...);
+
/**
* plpar_hcall: - Make a pseries hypervisor call
* @opcode: The hypervisor call to make.
@@ -441,6 +522,11 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
+/* pseries hcall tracing */
+extern struct static_key hcall_tracepoint_key;
+void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
+void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
+
struct hvcall_mpp_data {
unsigned long entitled_mem;
unsigned long mapped_mem;
@@ -491,9 +577,12 @@ struct h_cpu_char_result {
u64 behaviour;
};
-/* Register state for entering a nested guest with H_ENTER_NESTED */
+/*
+ * Register state for entering a nested guest with H_ENTER_NESTED.
+ * New member must be added at the end.
+ */
struct hv_guest_state {
- u64 version; /* version of this structure layout */
+ u64 version; /* version of this structure layout, must be first */
u32 lpid;
u32 vcpu_token;
/* These registers are hypervisor privileged (at least for writing) */
@@ -522,10 +611,62 @@ struct hv_guest_state {
u64 pidr;
u64 cfar;
u64 ppr;
+ /* Version 1 ends here */
+ u64 dawr1;
+ u64 dawrx1;
+ /* Version 2 ends here */
};
/* Latest version of hv_guest_state structure */
-#define HV_GUEST_STATE_VERSION 1
+#define HV_GUEST_STATE_VERSION 2
+
+static inline int hv_guest_state_size(unsigned int version)
+{
+ switch (version) {
+ case 1:
+ return offsetofend(struct hv_guest_state, ppr);
+ case 2:
+ return offsetofend(struct hv_guest_state, dawrx1);
+ default:
+ return -1;
+ }
+}
+
+/*
+ * From the document "H_GetPerformanceCounterInfo Interface" v1.07
+ *
+ * H_GET_PERF_COUNTER_INFO argument
+ */
+struct hv_get_perf_counter_info_params {
+ __be32 counter_request; /* I */
+ __be32 starting_index; /* IO */
+ __be16 secondary_index; /* IO */
+ __be16 returned_values; /* O */
+ __be32 detail_rc; /* O, only needed when called via *_norets() */
+
+ /*
+ * O, size each of counter_value element in bytes, only set for version
+ * >= 0x3
+ */
+ __be16 cv_element_size;
+
+ /* I, 0 (zero) for versions < 0x3 */
+ __u8 counter_info_version_in;
+
+ /* O, 0 (zero) if version < 0x3. Must be set to 0 when making hcall */
+ __u8 counter_info_version_out;
+ __u8 reserved[0xC];
+ __u8 counter_value[];
+} __packed;
+
+#define HGPCI_REQ_BUFFER_SIZE 4096
+#define HGPCI_MAX_DATA_BYTES \
+ (HGPCI_REQ_BUFFER_SIZE - sizeof(struct hv_get_perf_counter_info_params))
+
+struct hv_gpci_request_buffer {
+ struct hv_get_perf_counter_info_params params;
+ uint8_t bytes[HGPCI_MAX_DATA_BYTES];
+} __packed;
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hvconsole.h b/arch/powerpc/include/asm/hvconsole.h
index 999ed5ac9053..ccb2034506f0 100644
--- a/arch/powerpc/include/asm/hvconsole.h
+++ b/arch/powerpc/include/asm/hvconsole.h
@@ -24,5 +24,8 @@
extern int hvc_get_chars(uint32_t vtermno, char *buf, int count);
extern int hvc_put_chars(uint32_t vtermno, const char *buf, int count);
+/* Provided by HVC VIO */
+void hvc_vio_init_early(void);
+
#endif /* __KERNEL__ */
#endif /* _PPC64_HVCONSOLE_H */
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index f2f8d8aa8e3b..84d39fd42f71 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -9,15 +9,18 @@
#ifndef _PPC_BOOK3S_64_HW_BREAKPOINT_H
#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
+#include <asm/cpu_has_feature.h>
+
#ifdef __KERNEL__
struct arch_hw_breakpoint {
unsigned long address;
u16 type;
u16 len; /* length of the target data symbol */
u16 hw_len; /* length programmed in hw */
+ u8 flags;
};
-/* Note: Don't change the the first 6 bits below as they are in the same order
+/* Note: Don't change the first 6 bits below as they are in the same order
* as the dabr and dabrx.
*/
#define HW_BRK_TYPE_READ 0x01
@@ -34,15 +37,31 @@ struct arch_hw_breakpoint {
#define HW_BRK_TYPE_PRIV_ALL (HW_BRK_TYPE_USER | HW_BRK_TYPE_KERNEL | \
HW_BRK_TYPE_HYP)
+#define HW_BRK_FLAG_DISABLED 0x1
+
+/* Minimum granularity */
#ifdef CONFIG_PPC_8xx
-#define HW_BREAKPOINT_ALIGN 0x3
+#define HW_BREAKPOINT_SIZE 0x4
#else
-#define HW_BREAKPOINT_ALIGN 0x7
+#define HW_BREAKPOINT_SIZE 0x8
#endif
+#define HW_BREAKPOINT_SIZE_QUADWORD 0x10
#define DABR_MAX_LEN 8
#define DAWR_MAX_LEN 512
+static inline int nr_wp_slots(void)
+{
+ return cpu_has_feature(CPU_FTR_DAWR1) ? 2 : 1;
+}
+
+bool wp_check_constraints(struct pt_regs *regs, ppc_inst_t instr,
+ unsigned long ea, int type, int size,
+ struct arch_hw_breakpoint *info);
+
+void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
+ int *type, int *size, unsigned long *ea);
+
#ifdef CONFIG_HAVE_HW_BREAKPOINT
#include <linux/kdebug.h>
#include <asm/reg.h>
@@ -64,7 +83,6 @@ extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data);
int arch_install_hw_breakpoint(struct perf_event *bp);
void arch_uninstall_hw_breakpoint(struct perf_event *bp);
-void arch_unregister_hw_breakpoint(struct perf_event *bp);
void hw_breakpoint_pmu_read(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
@@ -73,14 +91,14 @@ extern void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs);
static inline void hw_breakpoint_disable(void)
{
- struct arch_hw_breakpoint brk;
-
- brk.address = 0;
- brk.type = 0;
- brk.len = 0;
- brk.hw_len = 0;
- if (ppc_breakpoint_available())
- __set_breakpoint(&brk);
+ int i;
+ struct arch_hw_breakpoint null_brk = {0};
+
+ if (!ppc_breakpoint_available())
+ return;
+
+ for (i = 0; i < nr_wp_slots(); i++)
+ __set_breakpoint(i, &null_brk);
}
extern void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs);
int hw_breakpoint_handler(struct die_args *args);
@@ -99,10 +117,10 @@ static inline bool dawr_enabled(void)
{
return dawr_force_enable;
}
-int set_dawr(struct arch_hw_breakpoint *brk);
+int set_dawr(int nr, struct arch_hw_breakpoint *brk);
#else
static inline bool dawr_enabled(void) { return false; }
-static inline int set_dawr(struct arch_hw_breakpoint *brk) { return -1; }
+static inline int set_dawr(int nr, struct arch_hw_breakpoint *brk) { return -1; }
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index e3a905e3d573..77fa88c2aed0 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -18,16 +18,24 @@
* PACA flags in paca->irq_happened.
*
* This bits are set when interrupts occur while soft-disabled
- * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
- * is set whenever we manually hard disable.
+ * and allow a proper replay.
+ *
+ * The PACA_IRQ_HARD_DIS is set whenever we hard disable. It is almost
+ * always in synch with the MSR[EE] state, except:
+ * - A window in interrupt entry, where hardware disables MSR[EE] and that
+ * must be "reconciled" with the soft mask state.
+ * - NMI interrupts that hit in awkward places, until they fix the state.
+ * - When local irqs are being enabled and state is being fixed up.
+ * - When returning from an interrupt there are some windows where this
+ * can become out of synch, but gets fixed before the RFI or before
+ * executing the next user instruction (see arch/powerpc/kernel/interrupt.c).
*/
#define PACA_IRQ_HARD_DIS 0x01
#define PACA_IRQ_DBELL 0x02
#define PACA_IRQ_EE 0x04
#define PACA_IRQ_DEC 0x08 /* Or FIT */
-#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
-#define PACA_IRQ_HMI 0x20
-#define PACA_IRQ_PMI 0x40
+#define PACA_IRQ_HMI 0x10
+#define PACA_IRQ_PMI 0x20
/*
* Some soft-masked interrupts must be hard masked until they are replayed
@@ -39,6 +47,8 @@
#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
#endif
+#endif /* CONFIG_PPC64 */
+
/*
* flags for paca->irq_soft_mask
*/
@@ -47,18 +57,56 @@
#define IRQS_PMI_DISABLED 2
#define IRQS_ALL_DISABLED (IRQS_DISABLED | IRQS_PMI_DISABLED)
-#endif /* CONFIG_PPC64 */
-
#ifndef __ASSEMBLY__
-extern void replay_system_reset(void);
-extern void __replay_interrupt(unsigned int vector);
+static inline void __hard_irq_enable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
+ wrtee(MSR_EE);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EIE);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(MSR_EE | MSR_RI, 1);
+ else
+ mtmsr(mfmsr() | MSR_EE);
+}
+
+static inline void __hard_irq_disable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(MSR_RI, 1);
+ else
+ mtmsr(mfmsr() & ~MSR_EE);
+}
+
+static inline void __hard_EE_RI_disable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
+ wrtee(0);
+ else if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_NRI);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(0, 1);
+ else
+ mtmsr(mfmsr() & ~(MSR_EE | MSR_RI));
+}
+
+static inline void __hard_RI_enable(void)
+{
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
+ return;
-extern void timer_interrupt(struct pt_regs *);
-extern void timer_broadcast_interrupt(void);
-extern void performance_monitor_exception(struct pt_regs *regs);
-extern void WatchdogException(struct pt_regs *regs);
-extern void unknown_exception(struct pt_regs *regs);
+ if (IS_ENABLED(CONFIG_PPC_8xx))
+ wrtspr(SPRN_EID);
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ __mtmsrd(MSR_RI, 1);
+ else
+ mtmsr(mfmsr() | MSR_RI);
+}
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -82,7 +130,6 @@ static inline notrace unsigned long irq_soft_mask_return(void)
*/
static inline notrace void irq_soft_mask_set(unsigned long mask)
{
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
/*
* The irq mask must always include the STD bit if any are set.
*
@@ -97,8 +144,8 @@ static inline notrace void irq_soft_mask_set(unsigned long mask)
* unmasks to be replayed, among other things. For now, take
* the simple approach.
*/
- WARN_ON(mask && !(mask & IRQS_DISABLED));
-#endif
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON(mask && !(mask & IRQS_DISABLED));
asm volatile(
"stb %0,%1(13)"
@@ -110,36 +157,18 @@ static inline notrace void irq_soft_mask_set(unsigned long mask)
static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
{
- unsigned long flags;
-
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON(mask && !(mask & IRQS_DISABLED));
-#endif
+ unsigned long flags = irq_soft_mask_return();
- asm volatile(
- "lbz %0,%1(13); stb %2,%1(13)"
- : "=&r" (flags)
- : "i" (offsetof(struct paca_struct, irq_soft_mask)),
- "r" (mask)
- : "memory");
+ irq_soft_mask_set(mask);
return flags;
}
static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
{
- unsigned long flags, tmp;
-
- asm volatile(
- "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
- : "=&r" (flags), "=r" (tmp)
- : "i" (offsetof(struct paca_struct, irq_soft_mask)),
- "r" (mask)
- : "memory");
+ unsigned long flags = irq_soft_mask_return();
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
-#endif
+ irq_soft_mask_set(flags | mask);
return flags;
}
@@ -176,6 +205,42 @@ static inline bool arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
+static inline void set_pmi_irq_pending(void)
+{
+ /*
+ * Invoked from PMU callback functions to set PMI bit in the paca.
+ * This has to be called with irq's disabled (via hard_irq_disable()).
+ */
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ get_paca()->irq_happened |= PACA_IRQ_PMI;
+}
+
+static inline void clear_pmi_irq_pending(void)
+{
+ /*
+ * Invoked from PMU callback functions to clear the pending PMI bit
+ * in the paca.
+ */
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ get_paca()->irq_happened &= ~PACA_IRQ_PMI;
+}
+
+static inline bool pmi_irq_pending(void)
+{
+ /*
+ * Invoked from PMU callback functions to check if there is a pending
+ * PMI bit in the paca.
+ */
+ if (get_paca()->irq_happened & PACA_IRQ_PMI)
+ return true;
+
+ return false;
+}
+
#ifdef CONFIG_PPC_BOOK3S
/*
* To support disabling and enabling of irq with PMI, set of
@@ -200,17 +265,14 @@ static inline bool arch_irqs_disabled(void)
#define powerpc_local_irq_pmu_save(flags) \
do { \
raw_local_irq_pmu_save(flags); \
- trace_hardirqs_off(); \
+ if (!raw_irqs_disabled_flags(flags)) \
+ trace_hardirqs_off(); \
} while(0)
#define powerpc_local_irq_pmu_restore(flags) \
do { \
- if (raw_irqs_disabled_flags(flags)) { \
- raw_local_irq_pmu_restore(flags); \
- trace_hardirqs_off(); \
- } else { \
+ if (!raw_irqs_disabled_flags(flags)) \
trace_hardirqs_on(); \
- raw_local_irq_pmu_restore(flags); \
- } \
+ raw_local_irq_pmu_restore(flags); \
} while(0)
#else
#define powerpc_local_irq_pmu_save(flags) \
@@ -225,44 +287,91 @@ static inline bool arch_irqs_disabled(void)
#endif /* CONFIG_PPC_BOOK3S */
-#ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable() wrtee(MSR_EE)
-#define __hard_irq_disable() wrtee(0)
-#else
-#define __hard_irq_enable() __mtmsrd(MSR_EE|MSR_RI, 1)
-#define __hard_irq_disable() __mtmsrd(MSR_RI, 1)
-#endif
-
#define hard_irq_disable() do { \
unsigned long flags; \
__hard_irq_disable(); \
flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) { \
- asm ("stdx %%r1, 0, %1 ;" \
- : "=m" (local_paca->saved_r1) \
- : "b" (&local_paca->saved_r1)); \
+ asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \
+ : "r" (current_stack_pointer)); \
trace_hardirqs_off(); \
} \
} while(0)
+static inline bool __lazy_irq_pending(u8 irq_happened)
+{
+ return !!(irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
+/*
+ * Check if a lazy IRQ is pending. Should be called with IRQs hard disabled.
+ */
static inline bool lazy_irq_pending(void)
{
- return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+ return __lazy_irq_pending(get_paca()->irq_happened);
+}
+
+/*
+ * Check if a lazy IRQ is pending, with no debugging checks.
+ * Should be called with IRQs hard disabled.
+ * For use in RI disabled code or other constrained situations.
+ */
+static inline bool lazy_irq_pending_nocheck(void)
+{
+ return __lazy_irq_pending(local_paca->irq_happened);
+}
+
+bool power_pmu_wants_prompt_pmi(void);
+
+/*
+ * This is called by asynchronous interrupts to check whether to
+ * conditionally re-enable hard interrupts after having cleared
+ * the source of the interrupt. They are kept disabled if there
+ * is a different soft-masked interrupt pending that requires hard
+ * masking.
+ */
+static inline bool should_hard_irq_enable(void)
+{
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+ WARN_ON(mfmsr() & MSR_EE);
+ }
+
+ if (!IS_ENABLED(CONFIG_PERF_EVENTS))
+ return false;
+ /*
+ * If the PMU is not running, there is not much reason to enable
+ * MSR[EE] in irq handlers because any interrupts would just be
+ * soft-masked.
+ *
+ * TODO: Add test for 64e
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
+ return false;
+
+ if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
+ return false;
+
+ return true;
}
/*
- * This is called by asynchronous interrupts to conditionally
- * re-enable hard interrupts after having cleared the source
- * of the interrupt. They are kept disabled if there is a different
- * soft-masked interrupt pending that requires hard masking.
+ * Do the hard enabling, only call this if should_hard_irq_enable is true.
*/
-static inline void may_hard_irq_enable(void)
+static inline void do_hard_irq_enable(void)
{
- if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
- get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
- __hard_irq_enable();
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+ WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
+ WARN_ON(mfmsr() & MSR_EE);
}
+ /*
+ * This allows PMI interrupts (and watchdog soft-NMIs) through.
+ * There is no other reason to enable this way.
+ */
+ get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ __hard_irq_enable();
}
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
@@ -278,8 +387,17 @@ extern void irq_set_pending_from_srr1(unsigned long srr1);
extern void force_external_irq_replay(void);
+static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
+{
+ regs->softe = val;
+}
#else /* CONFIG_PPC64 */
+static inline notrace unsigned long irq_soft_mask_return(void)
+{
+ return 0;
+}
+
static inline unsigned long arch_local_save_flags(void)
{
return mfmsr();
@@ -309,22 +427,12 @@ static inline unsigned long arch_local_irq_save(void)
static inline void arch_local_irq_disable(void)
{
- if (IS_ENABLED(CONFIG_BOOKE))
- wrtee(0);
- else if (IS_ENABLED(CONFIG_PPC_8xx))
- wrtspr(SPRN_EID);
- else
- mtmsr(mfmsr() & ~MSR_EE);
+ __hard_irq_disable();
}
static inline void arch_local_irq_enable(void)
{
- if (IS_ENABLED(CONFIG_BOOKE))
- wrtee(MSR_EE);
- else if (IS_ENABLED(CONFIG_PPC_8xx))
- wrtspr(SPRN_EIE);
- else
- mtmsr(mfmsr() | MSR_EE);
+ __hard_irq_enable();
}
static inline bool arch_irqs_disabled_flags(unsigned long flags)
@@ -344,17 +452,50 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
-static inline void may_hard_irq_enable(void) { }
+static __always_inline bool should_hard_irq_enable(void)
+{
+ return false;
+}
+static inline void do_hard_irq_enable(void)
+{
+ BUILD_BUG();
+}
+
+static inline void clear_pmi_irq_pending(void) { }
+static inline void set_pmi_irq_pending(void) { }
+static inline bool pmi_irq_pending(void) { return false; }
+
+static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
+{
+}
#endif /* CONFIG_PPC64 */
-#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
+static inline unsigned long mtmsr_isync_irqsafe(unsigned long msr)
+{
+#ifdef CONFIG_PPC64
+ if (arch_irqs_disabled()) {
+ /*
+ * With soft-masking, MSR[EE] can change from 1 to 0
+ * asynchronously when irqs are disabled, and we don't want to
+ * set MSR[EE] back to 1 here if that has happened. A race-free
+ * way to do this is ensure EE is already 0. Another way it
+ * could be done is with a RESTART_TABLE handler, but that's
+ * probably overkill here.
+ */
+ msr &= ~MSR_EE;
+ mtmsr_isync(msr);
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ } else
+#endif
+ mtmsr_isync(msr);
-/*
- * interrupt-retrigger: should we handle this via lost interrupts and IPIs
- * or should we not care like we do now ? --BenH.
- */
-struct irq_chip;
+ return msr;
+}
+
+
+#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h
index b3b0f2d020f0..d024447283a0 100644
--- a/arch/powerpc/include/asm/hydra.h
+++ b/arch/powerpc/include/asm/hydra.h
@@ -10,7 +10,7 @@
*
* © Copyright 1995 Apple Computer, Inc. All rights reserved.
*
- * It's available online from http://www.cpu.lu/~mlan/ftp/MacTech.pdf
+ * It's available online from https://www.cpu.lu/~mlan/ftp/MacTech.pdf
* You can obtain paper copies of this book from computer bookstores or by
* writing Morgan Kaufmann Publishers, Inc., 340 Pine Street, Sixth Floor, San
* Francisco, CA 94104. Reference ISBN 1-55860-393-X.
@@ -94,8 +94,6 @@ extern volatile struct Hydra __iomem *Hydra;
#define HYDRA_INT_EXT7 18 /* Power Off Request */
#define HYDRA_INT_SPARE 19
-extern int hydra_init(void);
-
#endif /* __KERNEL__ */
#endif /* _ASMPPC_HYDRA_H */
diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h
index d7f08ae49e12..75481d363cd8 100644
--- a/arch/powerpc/include/asm/i8259.h
+++ b/arch/powerpc/include/asm/i8259.h
@@ -7,7 +7,7 @@
extern void i8259_init(struct device_node *node, unsigned long intack_addr);
extern unsigned int i8259_irq(void);
-extern struct irq_domain *i8259_get_host(void);
+struct irq_domain *__init i8259_get_host(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_I8259_H */
diff --git a/arch/powerpc/include/asm/icswx.h b/arch/powerpc/include/asm/icswx.h
index 9872f85d356f..f6599ccb3012 100644
--- a/arch/powerpc/include/asm/icswx.h
+++ b/arch/powerpc/include/asm/icswx.h
@@ -77,6 +77,8 @@ struct coprocessor_completion_block {
#define CSB_CC_CHAIN (37)
#define CSB_CC_SEQUENCE (38)
#define CSB_CC_HW (39)
+/* P9 DD2 NX Workbook 3.2 (Table 4-36): Address translation fault */
+#define CSB_CC_FAULT_ADDRESS (250)
#define CSB_SIZE (0x10)
#define CSB_ALIGN CSB_SIZE
@@ -108,6 +110,17 @@ struct data_descriptor_entry {
__be64 address;
} __packed __aligned(DDE_ALIGN);
+/* 4.3.2 NX-stamped Fault CRB */
+
+#define NX_STAMP_ALIGN (0x10)
+
+struct nx_fault_stamp {
+ __be64 fault_storage_addr;
+ __be16 reserved;
+ __u8 flags;
+ __u8 fault_status;
+ __be32 pswid;
+} __packed __aligned(NX_STAMP_ALIGN);
/* Chapter 6.5.2 Coprocessor-Request Block (CRB) */
@@ -135,11 +148,15 @@ struct coprocessor_request_block {
struct coprocessor_completion_block ccb;
- u8 reserved[48];
+ union {
+ struct nx_fault_stamp nx;
+ u8 reserved[16];
+ } stamp;
- struct coprocessor_status_block csb;
-} __packed __aligned(CRB_ALIGN);
+ u8 reserved[32];
+ struct coprocessor_status_block csb;
+} __aligned(128);
/* RFC02167 Initiate Coprocessor Instructions document
* Chapter 8.2.1.1.1 RS
@@ -170,6 +187,9 @@ static inline int icswx(__be32 ccw, struct coprocessor_request_block *crb)
__be64 ccw_reg = ccw;
u32 cr;
+ /* NB: the same structures are used by VAS-NX */
+ BUILD_BUG_ON(sizeof(*crb) != 128);
+
__asm__ __volatile__(
PPC_ICSWX(%1,0,%2) "\n"
"mfcr %0\n"
diff --git a/arch/powerpc/include/asm/idle.h b/arch/powerpc/include/asm/idle.h
new file mode 100644
index 000000000000..accd1f50085a
--- /dev/null
+++ b/arch/powerpc/include/asm/idle.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_IDLE_H
+#define _ASM_POWERPC_IDLE_H
+#include <asm/runlatch.h>
+#include <asm/paca.h>
+
+#ifdef CONFIG_PPC_PSERIES
+DECLARE_PER_CPU(u64, idle_spurr_cycles);
+DECLARE_PER_CPU(u64, idle_entry_purr_snap);
+DECLARE_PER_CPU(u64, idle_entry_spurr_snap);
+
+static inline void snapshot_purr_idle_entry(void)
+{
+ *this_cpu_ptr(&idle_entry_purr_snap) = mfspr(SPRN_PURR);
+}
+
+static inline void snapshot_spurr_idle_entry(void)
+{
+ *this_cpu_ptr(&idle_entry_spurr_snap) = mfspr(SPRN_SPURR);
+}
+
+static inline void update_idle_purr_accounting(void)
+{
+ u64 wait_cycles;
+ u64 in_purr = *this_cpu_ptr(&idle_entry_purr_snap);
+
+ wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
+ wait_cycles += mfspr(SPRN_PURR) - in_purr;
+ get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
+}
+
+static inline void update_idle_spurr_accounting(void)
+{
+ u64 *idle_spurr_cycles_ptr = this_cpu_ptr(&idle_spurr_cycles);
+ u64 in_spurr = *this_cpu_ptr(&idle_entry_spurr_snap);
+
+ *idle_spurr_cycles_ptr += mfspr(SPRN_SPURR) - in_spurr;
+}
+
+static inline void pseries_idle_prolog(void)
+{
+ ppc64_runlatch_off();
+ snapshot_purr_idle_entry();
+ snapshot_spurr_idle_entry();
+ /*
+ * Indicate to the HV that we are idle. Now would be
+ * a good time to find other work to dispatch.
+ */
+ get_lppaca()->idle = 1;
+}
+
+static inline void pseries_idle_epilog(void)
+{
+ update_idle_purr_accounting();
+ update_idle_spurr_accounting();
+ get_lppaca()->idle = 0;
+ ppc64_runlatch_on();
+}
+
+static inline u64 read_this_idle_purr(void)
+{
+ /*
+ * If we are reading from an idle context, update the
+ * idle-purr cycles corresponding to the last idle period.
+ * Since the idle context is not yet over, take a fresh
+ * snapshot of the idle-purr.
+ */
+ if (unlikely(get_lppaca()->idle == 1)) {
+ update_idle_purr_accounting();
+ snapshot_purr_idle_entry();
+ }
+
+ return be64_to_cpu(get_lppaca()->wait_state_cycles);
+}
+
+static inline u64 read_this_idle_spurr(void)
+{
+ /*
+ * If we are reading from an idle context, update the
+ * idle-spurr cycles corresponding to the last idle period.
+ * Since the idle context is not yet over, take a fresh
+ * snapshot of the idle-spurr.
+ */
+ if (get_lppaca()->idle == 1) {
+ update_idle_spurr_accounting();
+ snapshot_spurr_idle_entry();
+ }
+
+ return *this_cpu_ptr(&idle_spurr_cycles);
+}
+
+#endif /* CONFIG_PPC_PSERIES */
+#endif
diff --git a/arch/powerpc/include/asm/ima.h b/arch/powerpc/include/asm/ima.h
deleted file mode 100644
index ead488cf3981..000000000000
--- a/arch/powerpc/include/asm/ima.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_IMA_H
-#define _ASM_POWERPC_IMA_H
-
-struct kimage;
-
-int ima_get_kexec_buffer(void **addr, size_t *size);
-int ima_free_kexec_buffer(void);
-
-#ifdef CONFIG_IMA
-void remove_ima_buffer(void *fdt, int chosen_node);
-#else
-static inline void remove_ima_buffer(void *fdt, int chosen_node) {}
-#endif
-
-#ifdef CONFIG_IMA_KEXEC
-int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
- size_t size);
-
-int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node);
-#else
-static inline int setup_ima_buffer(const struct kimage *image, void *fdt,
- int chosen_node)
-{
- remove_ima_buffer(fdt, chosen_node);
- return 0;
-}
-#endif /* CONFIG_IMA_KEXEC */
-
-#endif /* _ASM_POWERPC_IMA_H */
diff --git a/arch/powerpc/include/asm/imc-pmu.h b/arch/powerpc/include/asm/imc-pmu.h
index 4da4fcba0684..4f897993b710 100644
--- a/arch/powerpc/include/asm/imc-pmu.h
+++ b/arch/powerpc/include/asm/imc-pmu.h
@@ -99,6 +99,11 @@ struct trace_imc_data {
*/
#define IMC_TRACE_RECORD_TB1_MASK 0x3ffffffffffULL
+/*
+ * Bit 0:1 in third DW of IMC trace record
+ * specifies the MSR[HV PR] values.
+ */
+#define IMC_TRACE_RECORD_VAL_HVPR(x) ((x) >> 62)
/*
* Device tree parser code detects IMC pmu support and
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
new file mode 100644
index 000000000000..684d3f453282
--- /dev/null
+++ b/arch/powerpc/include/asm/inst.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_INST_H
+#define _ASM_POWERPC_INST_H
+
+#include <asm/ppc-opcode.h>
+#include <asm/reg.h>
+#include <asm/disassemble.h>
+#include <asm/uaccess.h>
+
+#define ___get_user_instr(gu_op, dest, ptr) \
+({ \
+ long __gui_ret; \
+ u32 __user *__gui_ptr = (u32 __user *)ptr; \
+ ppc_inst_t __gui_inst; \
+ unsigned int __prefix, __suffix; \
+ \
+ __chk_user_ptr(ptr); \
+ __gui_ret = gu_op(__prefix, __gui_ptr); \
+ if (__gui_ret == 0) { \
+ if (IS_ENABLED(CONFIG_PPC64) && (__prefix >> 26) == OP_PREFIX) { \
+ __gui_ret = gu_op(__suffix, __gui_ptr + 1); \
+ __gui_inst = ppc_inst_prefix(__prefix, __suffix); \
+ } else { \
+ __gui_inst = ppc_inst(__prefix); \
+ } \
+ if (__gui_ret == 0) \
+ (dest) = __gui_inst; \
+ } \
+ __gui_ret; \
+})
+
+#define get_user_instr(x, ptr) ___get_user_instr(get_user, x, ptr)
+
+#define __get_user_instr(x, ptr) ___get_user_instr(__get_user, x, ptr)
+
+/*
+ * Instruction data type for POWER
+ */
+
+#if defined(CONFIG_PPC64) || defined(__CHECKER__)
+static inline u32 ppc_inst_val(ppc_inst_t x)
+{
+ return x.val;
+}
+
+#define ppc_inst(x) ((ppc_inst_t){ .val = (x) })
+
+#else
+static inline u32 ppc_inst_val(ppc_inst_t x)
+{
+ return x;
+}
+#define ppc_inst(x) (x)
+#endif
+
+static inline int ppc_inst_primary_opcode(ppc_inst_t x)
+{
+ return ppc_inst_val(x) >> 26;
+}
+
+#ifdef CONFIG_PPC64
+#define ppc_inst_prefix(x, y) ((ppc_inst_t){ .val = (x), .suffix = (y) })
+
+static inline u32 ppc_inst_suffix(ppc_inst_t x)
+{
+ return x.suffix;
+}
+
+#else
+#define ppc_inst_prefix(x, y) ((void)y, ppc_inst(x))
+
+static inline u32 ppc_inst_suffix(ppc_inst_t x)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PPC64 */
+
+static inline ppc_inst_t ppc_inst_read(const u32 *ptr)
+{
+ if (IS_ENABLED(CONFIG_PPC64) && (*ptr >> 26) == OP_PREFIX)
+ return ppc_inst_prefix(*ptr, *(ptr + 1));
+ else
+ return ppc_inst(*ptr);
+}
+
+static inline bool ppc_inst_prefixed(ppc_inst_t x)
+{
+ return IS_ENABLED(CONFIG_PPC64) && ppc_inst_primary_opcode(x) == OP_PREFIX;
+}
+
+static inline ppc_inst_t ppc_inst_swab(ppc_inst_t x)
+{
+ return ppc_inst_prefix(swab32(ppc_inst_val(x)), swab32(ppc_inst_suffix(x)));
+}
+
+static inline bool ppc_inst_equal(ppc_inst_t x, ppc_inst_t y)
+{
+ if (ppc_inst_val(x) != ppc_inst_val(y))
+ return false;
+ if (!ppc_inst_prefixed(x))
+ return true;
+ return ppc_inst_suffix(x) == ppc_inst_suffix(y);
+}
+
+static inline int ppc_inst_len(ppc_inst_t x)
+{
+ return ppc_inst_prefixed(x) ? 8 : 4;
+}
+
+/*
+ * Return the address of the next instruction, if the instruction @value was
+ * located at @location.
+ */
+static inline u32 *ppc_inst_next(u32 *location, u32 *value)
+{
+ ppc_inst_t tmp;
+
+ tmp = ppc_inst_read(value);
+
+ return (void *)location + ppc_inst_len(tmp);
+}
+
+static inline unsigned long ppc_inst_as_ulong(ppc_inst_t x)
+{
+ if (IS_ENABLED(CONFIG_PPC32))
+ return ppc_inst_val(x);
+ else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ return (u64)ppc_inst_suffix(x) << 32 | ppc_inst_val(x);
+ else
+ return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
+}
+
+static inline void ppc_inst_write(u32 *ptr, ppc_inst_t x)
+{
+ if (!ppc_inst_prefixed(x))
+ *ptr = ppc_inst_val(x);
+ else
+ *(u64 *)ptr = ppc_inst_as_ulong(x);
+}
+
+static inline int __copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src)
+{
+ unsigned int val, suffix;
+
+/* See https://github.com/ClangBuiltLinux/linux/issues/1521 */
+#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 140000
+ val = suffix = 0;
+#endif
+ __get_kernel_nofault(&val, src, u32, Efault);
+ if (IS_ENABLED(CONFIG_PPC64) && get_op(val) == OP_PREFIX) {
+ __get_kernel_nofault(&suffix, src + 1, u32, Efault);
+ *inst = ppc_inst_prefix(val, suffix);
+ } else {
+ *inst = ppc_inst(val);
+ }
+ return 0;
+Efault:
+ return -EFAULT;
+}
+
+static inline int copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src)
+{
+ if (unlikely(!is_kernel_addr((unsigned long)src)))
+ return -ERANGE;
+
+ return __copy_inst_from_kernel_nofault(inst, src);
+}
+
+#endif /* _ASM_POWERPC_INST_H */
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
new file mode 100644
index 000000000000..4745bb9998bd
--- /dev/null
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -0,0 +1,688 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_INTERRUPT_H
+#define _ASM_POWERPC_INTERRUPT_H
+
+/* BookE/4xx */
+#define INTERRUPT_CRITICAL_INPUT 0x100
+
+/* BookE */
+#define INTERRUPT_DEBUG 0xd00
+#ifdef CONFIG_BOOKE
+#define INTERRUPT_PERFMON 0x260
+#define INTERRUPT_DOORBELL 0x280
+#endif
+
+/* BookS/4xx/8xx */
+#define INTERRUPT_MACHINE_CHECK 0x200
+
+/* BookS/8xx */
+#define INTERRUPT_SYSTEM_RESET 0x100
+
+/* BookS */
+#define INTERRUPT_DATA_SEGMENT 0x380
+#define INTERRUPT_INST_SEGMENT 0x480
+#define INTERRUPT_TRACE 0xd00
+#define INTERRUPT_H_DATA_STORAGE 0xe00
+#define INTERRUPT_HMI 0xe60
+#define INTERRUPT_H_FAC_UNAVAIL 0xf80
+#ifdef CONFIG_PPC_BOOK3S
+#define INTERRUPT_DOORBELL 0xa00
+#define INTERRUPT_PERFMON 0xf00
+#define INTERRUPT_ALTIVEC_UNAVAIL 0xf20
+#endif
+
+/* BookE/BookS/4xx/8xx */
+#define INTERRUPT_DATA_STORAGE 0x300
+#define INTERRUPT_INST_STORAGE 0x400
+#define INTERRUPT_EXTERNAL 0x500
+#define INTERRUPT_ALIGNMENT 0x600
+#define INTERRUPT_PROGRAM 0x700
+#define INTERRUPT_SYSCALL 0xc00
+#define INTERRUPT_TRACE 0xd00
+
+/* BookE/BookS/44x */
+#define INTERRUPT_FP_UNAVAIL 0x800
+
+/* BookE/BookS/44x/8xx */
+#define INTERRUPT_DECREMENTER 0x900
+
+#ifndef INTERRUPT_PERFMON
+#define INTERRUPT_PERFMON 0x0
+#endif
+
+/* 8xx */
+#define INTERRUPT_SOFT_EMU_8xx 0x1000
+#define INTERRUPT_INST_TLB_MISS_8xx 0x1100
+#define INTERRUPT_DATA_TLB_MISS_8xx 0x1200
+#define INTERRUPT_INST_TLB_ERROR_8xx 0x1300
+#define INTERRUPT_DATA_TLB_ERROR_8xx 0x1400
+#define INTERRUPT_DATA_BREAKPOINT_8xx 0x1c00
+#define INTERRUPT_INST_BREAKPOINT_8xx 0x1d00
+
+/* 603 */
+#define INTERRUPT_INST_TLB_MISS_603 0x1000
+#define INTERRUPT_DATA_LOAD_TLB_MISS_603 0x1100
+#define INTERRUPT_DATA_STORE_TLB_MISS_603 0x1200
+
+#ifndef __ASSEMBLY__
+
+#include <linux/context_tracking.h>
+#include <linux/hardirq.h>
+#include <asm/cputime.h>
+#include <asm/firmware.h>
+#include <asm/ftrace.h>
+#include <asm/kprobes.h>
+#include <asm/runlatch.h>
+
+#ifdef CONFIG_PPC64
+/*
+ * WARN/BUG is handled with a program interrupt so minimise checks here to
+ * avoid recursion and maximise the chance of getting the first oops handled.
+ */
+#define INT_SOFT_MASK_BUG_ON(regs, cond) \
+do { \
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && \
+ (user_mode(regs) || (TRAP(regs) != INTERRUPT_PROGRAM))) \
+ BUG_ON(cond); \
+} while (0)
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+extern char __end_soft_masked[];
+bool search_kernel_soft_mask_table(unsigned long addr);
+unsigned long search_kernel_restart_table(unsigned long addr);
+
+DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ if (regs->msr & MSR_PR)
+ return false;
+
+ if (regs->nip >= (unsigned long)__end_soft_masked)
+ return false;
+
+ return search_kernel_soft_mask_table(regs->nip);
+}
+
+static inline void srr_regs_clobbered(void)
+{
+ local_paca->srr_valid = 0;
+ local_paca->hsrr_valid = 0;
+}
+#else
+static inline unsigned long search_kernel_restart_table(unsigned long addr)
+{
+ return 0;
+}
+
+static inline bool is_implicit_soft_masked(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline void srr_regs_clobbered(void)
+{
+}
+#endif
+
+static inline void nap_adjust_return(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC_970_NAP
+ if (unlikely(test_thread_local_flags(_TLF_NAPPING))) {
+ /* Can avoid a test-and-clear because NMIs do not call this */
+ clear_thread_local_flags(_TLF_NAPPING);
+ regs_set_return_ip(regs, (unsigned long)power4_idle_nap_return);
+ }
+#endif
+}
+
+static inline void booke_restore_dbcr0(void)
+{
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+ unsigned long dbcr0 = current->thread.debug.dbcr0;
+
+ if (IS_ENABLED(CONFIG_PPC32) && unlikely(dbcr0 & DBCR0_IDM)) {
+ mtspr(SPRN_DBSR, -1);
+ mtspr(SPRN_DBCR0, global_dbcr0[smp_processor_id()]);
+ }
+#endif
+}
+
+static inline void interrupt_enter_prepare(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC32
+ if (!arch_irq_disabled_regs(regs))
+ trace_hardirqs_off();
+
+ if (user_mode(regs))
+ kuap_lock();
+ else
+ kuap_save_and_lock(regs);
+
+ if (user_mode(regs))
+ account_cpu_user_entry();
+#endif
+
+#ifdef CONFIG_PPC64
+ bool trace_enable = false;
+
+ if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
+ if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
+ trace_enable = true;
+ } else {
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ }
+
+ /*
+ * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
+ * Asynchronous interrupts get here with HARD_DIS set (see below), so
+ * this enables MSR[EE] for synchronous interrupts. IRQs remain
+ * soft-masked. The interrupt handler may later call
+ * interrupt_cond_local_irq_enable() to achieve a regular process
+ * context.
+ */
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
+ INT_SOFT_MASK_BUG_ON(regs, !(regs->msr & MSR_EE));
+ __hard_irq_enable();
+ } else {
+ __hard_RI_enable();
+ }
+
+ /* Do this when RI=1 because it can cause SLB faults */
+ if (trace_enable)
+ trace_hardirqs_off();
+
+ if (user_mode(regs)) {
+ kuap_lock();
+ CT_WARN_ON(ct_state() != CONTEXT_USER);
+ user_exit_irqoff();
+
+ account_cpu_user_entry();
+ account_stolen_time();
+ } else {
+ kuap_save_and_lock(regs);
+ /*
+ * CT_WARN_ON comes here via program_check_exception,
+ * so avoid recursion.
+ */
+ if (TRAP(regs) != INTERRUPT_PROGRAM)
+ CT_WARN_ON(ct_state() != CONTEXT_KERNEL &&
+ ct_state() != CONTEXT_IDLE);
+ INT_SOFT_MASK_BUG_ON(regs, is_implicit_soft_masked(regs));
+ INT_SOFT_MASK_BUG_ON(regs, arch_irq_disabled_regs(regs) &&
+ search_kernel_restart_table(regs->nip));
+ }
+ INT_SOFT_MASK_BUG_ON(regs, !arch_irq_disabled_regs(regs) &&
+ !(regs->msr & MSR_EE));
+#endif
+
+ booke_restore_dbcr0();
+}
+
+/*
+ * Care should be taken to note that interrupt_exit_prepare and
+ * interrupt_async_exit_prepare do not necessarily return immediately to
+ * regs context (e.g., if regs is usermode, we don't necessarily return to
+ * user mode). Other interrupts might be taken between here and return,
+ * context switch / preemption may occur in the exit path after this, or a
+ * signal may be delivered, etc.
+ *
+ * The real interrupt exit code is platform specific, e.g.,
+ * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s.
+ *
+ * However interrupt_nmi_exit_prepare does return directly to regs, because
+ * NMIs do not do "exit work" or replay soft-masked interrupts.
+ */
+static inline void interrupt_exit_prepare(struct pt_regs *regs)
+{
+}
+
+static inline void interrupt_async_enter_prepare(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC64
+ /* Ensure interrupt_enter_prepare does not enable MSR[EE] */
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+#endif
+ interrupt_enter_prepare(regs);
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * RI=1 is set by interrupt_enter_prepare, so this thread flags access
+ * has to come afterward (it can cause SLB faults).
+ */
+ if (cpu_has_feature(CPU_FTR_CTRL) &&
+ !test_thread_local_flags(_TLF_RUNLATCH))
+ __ppc64_runlatch_on();
+#endif
+ irq_enter();
+}
+
+static inline void interrupt_async_exit_prepare(struct pt_regs *regs)
+{
+ /*
+ * Adjust at exit so the main handler sees the true NIA. This must
+ * come before irq_exit() because irq_exit can enable interrupts, and
+ * if another interrupt is taken before nap_adjust_return has run
+ * here, then that interrupt would return directly to idle nap return.
+ */
+ nap_adjust_return(regs);
+
+ irq_exit();
+ interrupt_exit_prepare(regs);
+}
+
+struct interrupt_nmi_state {
+#ifdef CONFIG_PPC64
+ u8 irq_soft_mask;
+ u8 irq_happened;
+ u8 ftrace_enabled;
+ u64 softe;
+#endif
+};
+
+static inline bool nmi_disables_ftrace(struct pt_regs *regs)
+{
+ /* Allow DEC and PMI to be traced when they are soft-NMI */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
+ if (TRAP(regs) == INTERRUPT_DECREMENTER)
+ return false;
+ if (TRAP(regs) == INTERRUPT_PERFMON)
+ return false;
+ }
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
+ if (TRAP(regs) == INTERRUPT_PERFMON)
+ return false;
+ }
+
+ return true;
+}
+
+static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
+{
+#ifdef CONFIG_PPC64
+ state->irq_soft_mask = local_paca->irq_soft_mask;
+ state->irq_happened = local_paca->irq_happened;
+ state->softe = regs->softe;
+
+ /*
+ * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
+ * the right thing, and set IRQ_HARD_DIS. We do not want to reconcile
+ * because that goes through irq tracing which we don't want in NMI.
+ */
+ local_paca->irq_soft_mask = IRQS_ALL_DISABLED;
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ if (!(regs->msr & MSR_EE) || is_implicit_soft_masked(regs)) {
+ /*
+ * Adjust regs->softe to be soft-masked if it had not been
+ * reconcied (e.g., interrupt entry with MSR[EE]=0 but softe
+ * not yet set disabled), or if it was in an implicit soft
+ * masked state. This makes arch_irq_disabled_regs(regs)
+ * behave as expected.
+ */
+ regs->softe = IRQS_ALL_DISABLED;
+ }
+
+ __hard_RI_enable();
+
+ /* Don't do any per-CPU operations until interrupt state is fixed */
+
+ if (nmi_disables_ftrace(regs)) {
+ state->ftrace_enabled = this_cpu_get_ftrace_enabled();
+ this_cpu_set_ftrace_enabled(0);
+ }
+#endif
+
+ /* If data relocations are enabled, it's safe to use nmi_enter() */
+ if (mfmsr() & MSR_DR) {
+ nmi_enter();
+ return;
+ }
+
+ /*
+ * But do not use nmi_enter() for pseries hash guest taking a real-mode
+ * NMI because not everything it touches is within the RMA limit.
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+ firmware_has_feature(FW_FEATURE_LPAR) &&
+ !radix_enabled())
+ return;
+
+ /*
+ * Likewise, don't use it if we have some form of instrumentation (like
+ * KASAN shadow) that is not safe to access in real mode (even on radix)
+ */
+ if (IS_ENABLED(CONFIG_KASAN))
+ return;
+
+ /* Otherwise, it should be safe to call it */
+ nmi_enter();
+}
+
+static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state)
+{
+ if (mfmsr() & MSR_DR) {
+ // nmi_exit if relocations are on
+ nmi_exit();
+ } else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+ firmware_has_feature(FW_FEATURE_LPAR) &&
+ !radix_enabled()) {
+ // no nmi_exit for a pseries hash guest taking a real mode exception
+ } else if (IS_ENABLED(CONFIG_KASAN)) {
+ // no nmi_exit for KASAN in real mode
+ } else {
+ nmi_exit();
+ }
+
+ /*
+ * nmi does not call nap_adjust_return because nmi should not create
+ * new work to do (must use irq_work for that).
+ */
+
+#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S
+ if (arch_irq_disabled_regs(regs)) {
+ unsigned long rst = search_kernel_restart_table(regs->nip);
+ if (rst)
+ regs_set_return_ip(regs, rst);
+ }
+#endif
+
+ if (nmi_disables_ftrace(regs))
+ this_cpu_set_ftrace_enabled(state->ftrace_enabled);
+
+ /* Check we didn't change the pending interrupt mask. */
+ WARN_ON_ONCE((state->irq_happened | PACA_IRQ_HARD_DIS) != local_paca->irq_happened);
+ regs->softe = state->softe;
+ local_paca->irq_happened = state->irq_happened;
+ local_paca->irq_soft_mask = state->irq_soft_mask;
+#endif
+}
+
+/*
+ * Don't use noinstr here like x86, but rather add NOKPROBE_SYMBOL to each
+ * function definition. The reason for this is the noinstr section is placed
+ * after the main text section, i.e., very far away from the interrupt entry
+ * asm. That creates problems with fitting linker stubs when building large
+ * kernels.
+ */
+#define interrupt_handler __visible noinline notrace __no_kcsan __no_sanitize_address
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ */
+#define DECLARE_INTERRUPT_HANDLER_RAW(func) \
+ __visible long func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_RAW - Define raw interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ *
+ * @func is called from ASM entry code.
+ *
+ * This is a plain function which does no tracing, reconciling, etc.
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ *
+ * raw interrupt handlers must not enable or disable interrupts, or
+ * schedule, tracing and instrumentation (ftrace, lockdep, etc) would
+ * not be advisable either, although may be possible in a pinch, the
+ * trace will look odd at least.
+ *
+ * A raw handler may call one of the other interrupt handler functions
+ * to be converted into that interrupt context without these restrictions.
+ *
+ * On PPC64, _RAW handlers may return with fast_interrupt_return.
+ *
+ * Specific handlers may have additional restrictions.
+ */
+#define DEFINE_INTERRUPT_HANDLER_RAW(func) \
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs); \
+ \
+interrupt_handler long func(struct pt_regs *regs) \
+{ \
+ long ret; \
+ \
+ __hard_RI_enable(); \
+ \
+ ret = ____##func (regs); \
+ \
+ return ret; \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER - Declare synchronous interrupt handler function
+ * @func: Function name of the entry point
+ */
+#define DECLARE_INTERRUPT_HANDLER(func) \
+ __visible void func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER - Define synchronous interrupt handler function
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER(func) \
+static __always_inline void ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler void func(struct pt_regs *regs) \
+{ \
+ interrupt_enter_prepare(regs); \
+ \
+ ____##func (regs); \
+ \
+ interrupt_exit_prepare(regs); \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline void ____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_RET - Declare synchronous interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ */
+#define DECLARE_INTERRUPT_HANDLER_RET(func) \
+ __visible long func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_RET - Define synchronous interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER_RET(func) \
+static __always_inline long ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler long func(struct pt_regs *regs) \
+{ \
+ long ret; \
+ \
+ interrupt_enter_prepare(regs); \
+ \
+ ret = ____##func (regs); \
+ \
+ interrupt_exit_prepare(regs); \
+ \
+ return ret; \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline long ____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_ASYNC - Declare asynchronous interrupt handler function
+ * @func: Function name of the entry point
+ */
+#define DECLARE_INTERRUPT_HANDLER_ASYNC(func) \
+ __visible void func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_ASYNC - Define asynchronous interrupt handler function
+ * @func: Function name of the entry point
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER_ASYNC(func) \
+static __always_inline void ____##func(struct pt_regs *regs); \
+ \
+interrupt_handler void func(struct pt_regs *regs) \
+{ \
+ interrupt_async_enter_prepare(regs); \
+ \
+ ____##func (regs); \
+ \
+ interrupt_async_exit_prepare(regs); \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline void ____##func(struct pt_regs *regs)
+
+/**
+ * DECLARE_INTERRUPT_HANDLER_NMI - Declare NMI interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ */
+#define DECLARE_INTERRUPT_HANDLER_NMI(func) \
+ __visible long func(struct pt_regs *regs)
+
+/**
+ * DEFINE_INTERRUPT_HANDLER_NMI - Define NMI interrupt handler function
+ * @func: Function name of the entry point
+ * @returns: Returns a value back to asm caller
+ *
+ * @func is called from ASM entry code.
+ *
+ * The macro is written so it acts as function definition. Append the
+ * body with a pair of curly brackets.
+ */
+#define DEFINE_INTERRUPT_HANDLER_NMI(func) \
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs); \
+ \
+interrupt_handler long func(struct pt_regs *regs) \
+{ \
+ struct interrupt_nmi_state state; \
+ long ret; \
+ \
+ interrupt_nmi_enter_prepare(regs, &state); \
+ \
+ ret = ____##func (regs); \
+ \
+ interrupt_nmi_exit_prepare(regs, &state); \
+ \
+ return ret; \
+} \
+NOKPROBE_SYMBOL(func); \
+ \
+static __always_inline __no_sanitize_address __no_kcsan long \
+____##func(struct pt_regs *regs)
+
+
+/* Interrupt handlers */
+/* kernel/traps.c */
+DECLARE_INTERRUPT_HANDLER_NMI(system_reset_exception);
+#ifdef CONFIG_PPC_BOOK3S_64
+DECLARE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async);
+#endif
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_exception);
+DECLARE_INTERRUPT_HANDLER(SMIException);
+DECLARE_INTERRUPT_HANDLER(handle_hmi_exception);
+DECLARE_INTERRUPT_HANDLER(unknown_exception);
+DECLARE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception);
+DECLARE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception);
+DECLARE_INTERRUPT_HANDLER(instruction_breakpoint_exception);
+DECLARE_INTERRUPT_HANDLER(RunModeException);
+DECLARE_INTERRUPT_HANDLER(single_step_exception);
+DECLARE_INTERRUPT_HANDLER(program_check_exception);
+DECLARE_INTERRUPT_HANDLER(emulation_assist_interrupt);
+DECLARE_INTERRUPT_HANDLER(alignment_exception);
+DECLARE_INTERRUPT_HANDLER(StackOverflow);
+DECLARE_INTERRUPT_HANDLER(stack_overflow_exception);
+DECLARE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(altivec_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(vsx_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(facility_unavailable_exception);
+DECLARE_INTERRUPT_HANDLER(fp_unavailable_tm);
+DECLARE_INTERRUPT_HANDLER(altivec_unavailable_tm);
+DECLARE_INTERRUPT_HANDLER(vsx_unavailable_tm);
+DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
+DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
+DECLARE_INTERRUPT_HANDLER_RAW(performance_monitor_exception);
+DECLARE_INTERRUPT_HANDLER(DebugException);
+DECLARE_INTERRUPT_HANDLER(altivec_assist_exception);
+DECLARE_INTERRUPT_HANDLER(CacheLockingException);
+DECLARE_INTERRUPT_HANDLER(SPEFloatingPointException);
+DECLARE_INTERRUPT_HANDLER(SPEFloatingPointRoundException);
+DECLARE_INTERRUPT_HANDLER_NMI(WatchdogException);
+DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
+
+/* slb.c */
+DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
+DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt);
+
+/* hash_utils.c */
+DECLARE_INTERRUPT_HANDLER(do_hash_fault);
+
+/* fault.c */
+DECLARE_INTERRUPT_HANDLER(do_page_fault);
+DECLARE_INTERRUPT_HANDLER(do_bad_page_fault_segv);
+
+/* process.c */
+DECLARE_INTERRUPT_HANDLER(do_break);
+
+/* time.c */
+DECLARE_INTERRUPT_HANDLER_ASYNC(timer_interrupt);
+
+/* mce.c */
+DECLARE_INTERRUPT_HANDLER_NMI(machine_check_early);
+DECLARE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode);
+
+DECLARE_INTERRUPT_HANDLER_ASYNC(TAUException);
+
+/* irq.c */
+DECLARE_INTERRUPT_HANDLER_ASYNC(do_IRQ);
+
+void __noreturn unrecoverable_exception(struct pt_regs *regs);
+
+void replay_system_reset(void);
+void replay_soft_interrupts(void);
+
+static inline void interrupt_cond_local_irq_enable(struct pt_regs *regs)
+{
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+}
+
+long system_call_exception(struct pt_regs *regs, unsigned long r0);
+notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs);
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs);
+#ifdef CONFIG_PPC64
+unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs);
+unsigned long interrupt_exit_user_restart(struct pt_regs *regs);
+unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs);
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_INTERRUPT_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 635969b5b58e..fc112a91d0c2 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -33,14 +33,10 @@ extern struct pci_dev *isa_bridge_pcidev;
#include <asm/delay.h>
#include <asm/mmiowb.h>
#include <asm/mmu.h>
-#include <asm/ppc_asm.h>
-#include <asm/pgtable.h>
#define SIO_CONFIG_RA 0x398
#define SIO_CONFIG_RD 0x399
-#define SLOW_DOWN_IO
-
/* 32 bits uses slightly different variables for the various IO
* bases. Most of this file only uses _IO_BASE though which we
* define properly based on the platform
@@ -123,7 +119,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
{ \
u##size ret; \
__asm__ __volatile__("sync;"#insn"%U1%X1 %0,%1;twi 0,%0,0;isync"\
- : "=r" (ret) : "m" (*addr) : "memory"); \
+ : "=r" (ret) : "m<>" (*addr) : "memory"); \
return ret; \
}
@@ -131,7 +127,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \
static inline void name(volatile u##size __iomem *addr, u##size val) \
{ \
__asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \
- : "=m" (*addr) : "r" (val) : "memory"); \
+ : "=m<>" (*addr) : "r" (val) : "memory"); \
mmiowb_set_pending(); \
}
@@ -303,41 +299,56 @@ static inline unsigned char __raw_readb(const volatile void __iomem *addr)
{
return *(volatile unsigned char __force *)PCI_FIX_ADDR(addr);
}
+#define __raw_readb __raw_readb
+
static inline unsigned short __raw_readw(const volatile void __iomem *addr)
{
return *(volatile unsigned short __force *)PCI_FIX_ADDR(addr);
}
+#define __raw_readw __raw_readw
+
static inline unsigned int __raw_readl(const volatile void __iomem *addr)
{
return *(volatile unsigned int __force *)PCI_FIX_ADDR(addr);
}
+#define __raw_readl __raw_readl
+
static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
{
*(volatile unsigned char __force *)PCI_FIX_ADDR(addr) = v;
}
+#define __raw_writeb __raw_writeb
+
static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
{
*(volatile unsigned short __force *)PCI_FIX_ADDR(addr) = v;
}
+#define __raw_writew __raw_writew
+
static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
{
*(volatile unsigned int __force *)PCI_FIX_ADDR(addr) = v;
}
+#define __raw_writel __raw_writel
#ifdef __powerpc64__
static inline unsigned long __raw_readq(const volatile void __iomem *addr)
{
return *(volatile unsigned long __force *)PCI_FIX_ADDR(addr);
}
+#define __raw_readq __raw_readq
+
static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
{
*(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v;
}
+#define __raw_writeq __raw_writeq
static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr)
{
__raw_writeq((__force unsigned long)cpu_to_be64(v), addr);
}
+#define __raw_writeq_be __raw_writeq_be
/*
* Real mode versions of the above. Those instructions are only supposed
@@ -345,25 +356,37 @@ static inline void __raw_writeq_be(unsigned long v, volatile void __iomem *addr)
*/
static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr)
{
- __asm__ __volatile__("stbcix %0,0,%1"
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ stbcix %0,0,%1; \
+ .machine pop;"
: : "r" (val), "r" (paddr) : "memory");
}
static inline void __raw_rm_writew(u16 val, volatile void __iomem *paddr)
{
- __asm__ __volatile__("sthcix %0,0,%1"
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ sthcix %0,0,%1; \
+ .machine pop;"
: : "r" (val), "r" (paddr) : "memory");
}
static inline void __raw_rm_writel(u32 val, volatile void __iomem *paddr)
{
- __asm__ __volatile__("stwcix %0,0,%1"
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ stwcix %0,0,%1; \
+ .machine pop;"
: : "r" (val), "r" (paddr) : "memory");
}
static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
{
- __asm__ __volatile__("stdcix %0,0,%1"
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ stdcix %0,0,%1; \
+ .machine pop;"
: : "r" (val), "r" (paddr) : "memory");
}
@@ -375,7 +398,10 @@ static inline void __raw_rm_writeq_be(u64 val, volatile void __iomem *paddr)
static inline u8 __raw_rm_readb(volatile void __iomem *paddr)
{
u8 ret;
- __asm__ __volatile__("lbzcix %0,0, %1"
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ lbzcix %0,0, %1; \
+ .machine pop;"
: "=r" (ret) : "r" (paddr) : "memory");
return ret;
}
@@ -383,7 +409,10 @@ static inline u8 __raw_rm_readb(volatile void __iomem *paddr)
static inline u16 __raw_rm_readw(volatile void __iomem *paddr)
{
u16 ret;
- __asm__ __volatile__("lhzcix %0,0, %1"
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ lhzcix %0,0, %1; \
+ .machine pop;"
: "=r" (ret) : "r" (paddr) : "memory");
return ret;
}
@@ -391,7 +420,10 @@ static inline u16 __raw_rm_readw(volatile void __iomem *paddr)
static inline u32 __raw_rm_readl(volatile void __iomem *paddr)
{
u32 ret;
- __asm__ __volatile__("lwzcix %0,0, %1"
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ lwzcix %0,0, %1; \
+ .machine pop;"
: "=r" (ret) : "r" (paddr) : "memory");
return ret;
}
@@ -399,7 +431,10 @@ static inline u32 __raw_rm_readl(volatile void __iomem *paddr)
static inline u64 __raw_rm_readq(volatile void __iomem *paddr)
{
u64 ret;
- __asm__ __volatile__("ldcix %0,0, %1"
+ __asm__ __volatile__(".machine push; \
+ .machine power6; \
+ ldcix %0,0, %1; \
+ .machine pop;"
: "=r" (ret) : "r" (paddr) : "memory");
return ret;
}
@@ -610,10 +645,37 @@ static inline void name at \
/* Some drivers check for the presence of readq & writeq with
* a #ifdef, so we make them happy here.
*/
+#define readb readb
+#define readw readw
+#define readl readl
+#define writeb writeb
+#define writew writew
+#define writel writel
+#define readsb readsb
+#define readsw readsw
+#define readsl readsl
+#define writesb writesb
+#define writesw writesw
+#define writesl writesl
+#define inb inb
+#define inw inw
+#define inl inl
+#define outb outb
+#define outw outw
+#define outl outl
+#define insb insb
+#define insw insw
+#define insl insl
+#define outsb outsb
+#define outsw outsw
+#define outsl outsl
#ifdef __powerpc64__
#define readq readq
#define writeq writeq
#endif
+#define memset_io memset_io
+#define memcpy_fromio memcpy_fromio
+#define memcpy_toio memcpy_toio
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
@@ -622,11 +684,6 @@ static inline void name at \
#define xlate_dev_mem_ptr(p) __va(p)
/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p) p
-
-/*
* We don't do relaxed operations yet, at least not with this semantic
*/
#define readb_relaxed(addr) readb(addr)
@@ -638,7 +695,106 @@ static inline void name at \
#define writel_relaxed(v, addr) writel(v, addr)
#define writeq_relaxed(v, addr) writeq(v, addr)
+#ifdef CONFIG_GENERIC_IOMAP
#include <asm-generic/iomap.h>
+#else
+/*
+ * Here comes the implementation of the IOMAP interfaces.
+ */
+static inline unsigned int ioread16be(const void __iomem *addr)
+{
+ return readw_be(addr);
+}
+#define ioread16be ioread16be
+
+static inline unsigned int ioread32be(const void __iomem *addr)
+{
+ return readl_be(addr);
+}
+#define ioread32be ioread32be
+
+#ifdef __powerpc64__
+static inline u64 ioread64_lo_hi(const void __iomem *addr)
+{
+ return readq(addr);
+}
+#define ioread64_lo_hi ioread64_lo_hi
+
+static inline u64 ioread64_hi_lo(const void __iomem *addr)
+{
+ return readq(addr);
+}
+#define ioread64_hi_lo ioread64_hi_lo
+
+static inline u64 ioread64be(const void __iomem *addr)
+{
+ return readq_be(addr);
+}
+#define ioread64be ioread64be
+
+static inline u64 ioread64be_lo_hi(const void __iomem *addr)
+{
+ return readq_be(addr);
+}
+#define ioread64be_lo_hi ioread64be_lo_hi
+
+static inline u64 ioread64be_hi_lo(const void __iomem *addr)
+{
+ return readq_be(addr);
+}
+#define ioread64be_hi_lo ioread64be_hi_lo
+#endif /* __powerpc64__ */
+
+static inline void iowrite16be(u16 val, void __iomem *addr)
+{
+ writew_be(val, addr);
+}
+#define iowrite16be iowrite16be
+
+static inline void iowrite32be(u32 val, void __iomem *addr)
+{
+ writel_be(val, addr);
+}
+#define iowrite32be iowrite32be
+
+#ifdef __powerpc64__
+static inline void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ writeq(val, addr);
+}
+#define iowrite64_lo_hi iowrite64_lo_hi
+
+static inline void iowrite64_hi_lo(u64 val, void __iomem *addr)
+{
+ writeq(val, addr);
+}
+#define iowrite64_hi_lo iowrite64_hi_lo
+
+static inline void iowrite64be(u64 val, void __iomem *addr)
+{
+ writeq_be(val, addr);
+}
+#define iowrite64be iowrite64be
+
+static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr)
+{
+ writeq_be(val, addr);
+}
+#define iowrite64be_lo_hi iowrite64be_lo_hi
+
+static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr)
+{
+ writeq_be(val, addr);
+}
+#define iowrite64be_hi_lo iowrite64be_hi_lo
+#endif /* __powerpc64__ */
+
+struct pci_dev;
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
+#define pci_iounmap pci_iounmap
+void __iomem *ioport_map(unsigned long port, unsigned int len);
+#define ioport_map ioport_map
+#endif
static inline void iosync(void)
{
@@ -671,7 +827,6 @@ static inline void iosync(void)
#define IO_SPACE_LIMIT ~(0UL)
-
/**
* ioremap - map bus memory into CPU space
* @address: bus address of the memory
@@ -699,10 +854,6 @@ static inline void iosync(void)
*
* * iounmap undoes such a mapping and can be hooked
*
- * * __ioremap_at (and the pending __iounmap_at) are low level functions to
- * create hand-made mappings for use only by the PCI code and cannot
- * currently be hooked. Must be page aligned.
- *
* * __ioremap_caller is the same as above but takes an explicit caller
* reference rather than using __builtin_return_address(0)
*
@@ -711,7 +862,13 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
unsigned long flags);
extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
+#define ioremap_wc ioremap_wc
+
+#ifdef CONFIG_PPC32
void __iomem *ioremap_wt(phys_addr_t address, unsigned long size);
+#define ioremap_wt ioremap_wt
+#endif
+
void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
#define ioremap_uc(addr, size) ioremap((addr), (size))
#define ioremap_cache(addr, size) \
@@ -719,6 +876,8 @@ void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
extern void iounmap(volatile void __iomem *addr);
+void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size);
+
int early_ioremap_range(unsigned long ea, phys_addr_t pa,
unsigned long size, pgprot_t prot);
void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
@@ -727,10 +886,6 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
pgprot_t prot, void *caller);
-extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
- unsigned long size, pgprot_t prot);
-extern void __iounmap_at(void *ea, unsigned long size);
-
/*
* When CONFIG_PPC_INDIRECT_PIO is set, we use the generic iomap implementation
* which needs some additional definitions here. They basically allow PIO
@@ -773,6 +928,7 @@ static inline unsigned long virt_to_phys(volatile void * address)
return __pa((unsigned long)address);
}
+#define virt_to_phys virt_to_phys
/**
* phys_to_virt - map physical address to virtual
@@ -790,6 +946,7 @@ static inline void * phys_to_virt(unsigned long address)
{
return (void *)__va(address);
}
+#define phys_to_virt phys_to_virt
/*
* Change "struct page" to physical address.
@@ -817,6 +974,7 @@ static inline unsigned long virt_to_bus(volatile void * address)
return 0;
return __pa(address) + PCI_DRAM_OFFSET;
}
+#define virt_to_bus virt_to_bus
static inline void * bus_to_virt(unsigned long address)
{
@@ -824,8 +982,7 @@ static inline void * bus_to_virt(unsigned long address)
return NULL;
return __va(address - PCI_DRAM_OFFSET);
}
-
-#define page_to_bus(page) (page_to_phys(page) + PCI_DRAM_OFFSET)
+#define bus_to_virt bus_to_virt
#endif /* CONFIG_PPC32 */
@@ -862,6 +1019,8 @@ static inline void * bus_to_virt(unsigned long address)
#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
+#include <asm-generic/io.h>
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_IO_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 350101e11ddb..7e29c73e3dd4 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -12,7 +12,7 @@
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/bitops.h>
#include <asm/machdep.h>
#include <asm/types.h>
@@ -22,11 +22,11 @@
#define IOMMU_PAGE_SHIFT_4K 12
#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
-#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
+#define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K)
#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
-#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
+#define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr))
/* Boot time flags */
extern int iommu_is_off;
@@ -51,13 +51,11 @@ struct iommu_table_ops {
int (*xchg_no_kill)(struct iommu_table *tbl,
long index,
unsigned long *hpa,
- enum dma_data_direction *direction,
- bool realmode);
+ enum dma_data_direction *direction);
void (*tce_kill)(struct iommu_table *tbl,
unsigned long index,
- unsigned long pages,
- bool realmode);
+ unsigned long pages);
__be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
#endif
@@ -154,6 +152,7 @@ extern int iommu_tce_table_put(struct iommu_table *tbl);
*/
extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
int nid, unsigned long res_start, unsigned long res_end);
+bool iommu_table_in_use(struct iommu_table *tbl);
#define IOMMU_TABLE_GROUP_MAX_TABLES 2
@@ -274,17 +273,11 @@ extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
unsigned long attrs);
-extern void iommu_init_early_pSeries(void);
+void __init iommu_init_early_pSeries(void);
extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
extern void iommu_init_early_pasemi(void);
#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
-static inline void iommu_save(void)
-{
- if (ppc_md.iommu_save)
- ppc_md.iommu_save();
-}
-
static inline void iommu_restore(void)
{
if (ppc_md.iommu_restore)
diff --git a/arch/powerpc/include/asm/ipic.h b/arch/powerpc/include/asm/ipic.h
index 0524df31a7e6..b47ca7dc7199 100644
--- a/arch/powerpc/include/asm/ipic.h
+++ b/arch/powerpc/include/asm/ipic.h
@@ -65,7 +65,7 @@ enum ipic_mcp_irq {
IPIC_MCP_MU = 7,
};
-extern void ipic_set_default_priority(void);
+void __init ipic_set_default_priority(void);
extern u32 ipic_get_mcp_status(void);
extern void ipic_clear_mcp_status(u32 mask);
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 814dfab7e392..5c1516a5ba8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -6,7 +6,6 @@
/*
*/
-#include <linux/irqdomain.h>
#include <linux/threads.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
@@ -23,8 +22,8 @@ extern atomic_t ppc_n_lost_interrupts;
/* Total number of virq in the platform */
#define NR_IRQS CONFIG_NR_IRQS
-/* Same thing, used by the generic IRQ code */
-#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
+/* Number of irqs reserved for a legacy isa controller */
+#define NR_IRQS_LEGACY 16
extern irq_hw_number_t virq_to_hw(unsigned int virq);
@@ -35,12 +34,9 @@ static __inline__ int irq_canonicalize(int irq)
extern int distribute_irqs;
-struct irqaction;
struct pt_regs;
-#define __ARCH_HAS_DO_SOFTIRQ
-
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
/*
* Per-cpu stacks for handling critical, debug and machine check
* level interrupts.
@@ -56,11 +52,8 @@ extern void *mcheckirq_ctx[NR_CPUS];
extern void *hardirq_ctx[NR_CPUS];
extern void *softirq_ctx[NR_CPUS];
-void call_do_softirq(void *sp);
-void call_do_irq(struct pt_regs *regs, void *sp);
-extern void do_IRQ(struct pt_regs *regs);
+void __do_IRQ(struct pt_regs *regs);
extern void __init init_IRQ(void);
-extern void __do_irq(struct pt_regs *regs);
int irq_choose_cpu(const struct cpumask *mask);
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 09297ec9fa52..93ce3ec25387 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -20,7 +20,8 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
asm_volatile_goto("1:\n\t"
"nop # arch_static_branch\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".long 1b - ., %l[l_yes] - .\n\t"
+ JUMP_ENTRY_TYPE "%c0 - .\n\t"
".popsection \n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
@@ -34,7 +35,8 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool
asm_volatile_goto("1:\n\t"
"b %l[l_yes] # arch_static_branch_jump\n\t"
".pushsection __jump_table, \"aw\"\n\t"
- JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
+ ".long 1b - ., %l[l_yes] - .\n\t"
+ JUMP_ENTRY_TYPE "%c0 - .\n\t"
".popsection \n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
@@ -43,23 +45,12 @@ l_yes:
return true;
}
-#ifdef CONFIG_PPC64
-typedef u64 jump_label_t;
-#else
-typedef u32 jump_label_t;
-#endif
-
-struct jump_entry {
- jump_label_t code;
- jump_label_t target;
- jump_label_t key;
-};
-
#else
#define ARCH_STATIC_BRANCH(LABEL, KEY) \
1098: nop; \
.pushsection __jump_table, "aw"; \
- FTR_ENTRY_LONG 1098b, LABEL, KEY; \
+ .long 1098b - ., LABEL - .; \
+ FTR_ENTRY_LONG KEY - .; \
.popsection
#endif
diff --git a/arch/powerpc/include/asm/kasan.h b/arch/powerpc/include/asm/kasan.h
index fbff9ff9032e..92a968202ba7 100644
--- a/arch/powerpc/include/asm/kasan.h
+++ b/arch/powerpc/include/asm/kasan.h
@@ -15,19 +15,57 @@
#ifndef __ASSEMBLY__
#include <asm/page.h>
+#include <linux/sizes.h>
#define KASAN_SHADOW_SCALE_SHIFT 3
+#if defined(CONFIG_MODULES) && defined(CONFIG_PPC32)
+#define KASAN_KERN_START ALIGN_DOWN(PAGE_OFFSET - SZ_256M, SZ_256M)
+#else
+#define KASAN_KERN_START PAGE_OFFSET
+#endif
+
#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
- (PAGE_OFFSET >> KASAN_SHADOW_SCALE_SHIFT))
+ (KASAN_KERN_START >> KASAN_SHADOW_SCALE_SHIFT))
#define KASAN_SHADOW_OFFSET ASM_CONST(CONFIG_KASAN_SHADOW_OFFSET)
-#define KASAN_SHADOW_END 0UL
+#ifdef CONFIG_PPC32
+#define KASAN_SHADOW_END (-(-KASAN_SHADOW_START >> KASAN_SHADOW_SCALE_SHIFT))
+#elif defined(CONFIG_PPC_BOOK3S_64)
+/*
+ * The shadow ends before the highest accessible address
+ * because we don't need a shadow for the shadow. Instead:
+ * c00e000000000000 << 3 + a80e000000000000 = c00fc00000000000
+ */
+#define KASAN_SHADOW_END 0xc00fc00000000000UL
+
+#else
+
+/*
+ * The shadow ends before the highest accessible address
+ * because we don't need a shadow for the shadow.
+ * But it doesn't hurt to have a shadow for the shadow,
+ * keep shadow end aligned eases things.
+ */
+#define KASAN_SHADOW_END 0xc000200000000000UL
-#define KASAN_SHADOW_SIZE (KASAN_SHADOW_END - KASAN_SHADOW_START)
+#endif
#ifdef CONFIG_KASAN
+#ifdef CONFIG_PPC_BOOK3S_64
+DECLARE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key);
+
+static __always_inline bool kasan_arch_is_ready(void)
+{
+ if (static_branch_likely(&powerpc_kasan_enabled_key))
+ return true;
+ return false;
+}
+
+#define kasan_arch_is_ready kasan_arch_is_ready
+#endif
+
void kasan_early_init(void);
void kasan_mmu_init(void);
void kasan_init(void);
@@ -38,5 +76,9 @@ static inline void kasan_mmu_init(void) { }
static inline void kasan_late_init(void) { }
#endif
+void kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte);
+int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end);
+int kasan_init_region(void *start, size_t size);
+
#endif /* __ASSEMBLY */
#endif
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index c68476818753..a1ddba01e7d1 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -3,7 +3,7 @@
#define _ASM_POWERPC_KEXEC_H
#ifdef __KERNEL__
-#if defined(CONFIG_FSL_BOOKE) || defined(CONFIG_44x)
+#if defined(CONFIG_PPC_85xx) || defined(CONFIG_44x)
/*
* On FSL-BookE we setup a 1:1 mapping which covers the first 2GiB of memory
@@ -79,13 +79,13 @@ extern int crash_wake_offline;
struct kimage;
struct pt_regs;
extern void default_machine_kexec(struct kimage *image);
-extern int default_machine_kexec_prepare(struct kimage *image);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
extern int crash_shutdown_register(crash_shutdown_t handler);
extern int crash_shutdown_unregister(crash_shutdown_t handler);
+extern void crash_kexec_prepare(void);
extern void crash_kexec_secondary(struct pt_regs *regs);
-extern int overlaps_crashkernel(unsigned long start, unsigned long size);
+int __init overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
@@ -97,25 +97,55 @@ static inline bool kdump_in_progress(void)
void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
unsigned long start_address) __noreturn;
+void kexec_copy_flush(struct kimage *image);
+
+#if defined(CONFIG_CRASH_DUMP) && defined(CONFIG_PPC_RTAS)
+void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
+#define crash_free_reserved_phys_range crash_free_reserved_phys_range
+#endif
+
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;
-#ifdef CONFIG_IMA_KEXEC
#define ARCH_HAS_KIMAGE_ARCH
struct kimage_arch {
- phys_addr_t ima_buffer_addr;
- size_t ima_buffer_size;
+ struct crash_mem *exclude_ranges;
+
+ unsigned long backup_start;
+ void *backup_buf;
+ void *fdt;
};
-#endif
+char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
+ unsigned long cmdline_len);
int setup_purgatory(struct kimage *image, const void *slave_code,
const void *fdt, unsigned long kernel_load_addr,
unsigned long fdt_load_addr);
-int setup_new_fdt(const struct kimage *image, void *fdt,
- unsigned long initrd_load_addr, unsigned long initrd_len,
- const char *cmdline);
-int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size);
+
+#ifdef CONFIG_PPC64
+struct kexec_buf;
+
+int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len);
+#define arch_kexec_kernel_image_probe arch_kexec_kernel_image_probe
+
+int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
+
+int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf);
+#define arch_kexec_locate_mem_hole arch_kexec_locate_mem_hole
+
+int load_crashdump_segments_ppc64(struct kimage *image,
+ struct kexec_buf *kbuf);
+int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
+ const void *fdt, unsigned long kernel_load_addr,
+ unsigned long fdt_load_addr);
+unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image);
+int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
+ unsigned long initrd_load_addr,
+ unsigned long initrd_len, const char *cmdline);
+#endif /* CONFIG_PPC64 */
+
#endif /* CONFIG_KEXEC_FILE */
#else /* !CONFIG_KEXEC_CORE */
@@ -150,6 +180,18 @@ static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
}
#endif /* CONFIG_KEXEC_CORE */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/kexec.h>
+#endif
+
+#ifndef reset_sprs
+#define reset_sprs reset_sprs
+static inline void reset_sprs(void)
+{
+}
+#endif
+
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_KEXEC_H */
diff --git a/arch/powerpc/include/asm/kexec_ranges.h b/arch/powerpc/include/asm/kexec_ranges.h
new file mode 100644
index 000000000000..f83866a19e87
--- /dev/null
+++ b/arch/powerpc/include/asm/kexec_ranges.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_POWERPC_KEXEC_RANGES_H
+#define _ASM_POWERPC_KEXEC_RANGES_H
+
+#define MEM_RANGE_CHUNK_SZ 2048 /* Memory ranges size chunk */
+
+void sort_memory_ranges(struct crash_mem *mrngs, bool merge);
+struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges);
+int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size);
+int add_tce_mem_ranges(struct crash_mem **mem_ranges);
+int add_initrd_mem_range(struct crash_mem **mem_ranges);
+#ifdef CONFIG_PPC_64S_HASH_MMU
+int add_htab_mem_range(struct crash_mem **mem_ranges);
+#else
+static inline int add_htab_mem_range(struct crash_mem **mem_ranges)
+{
+ return 0;
+}
+#endif
+int add_kernel_mem_range(struct crash_mem **mem_ranges);
+int add_rtas_mem_range(struct crash_mem **mem_ranges);
+int add_opal_mem_range(struct crash_mem **mem_ranges);
+int add_reserved_mem_ranges(struct crash_mem **mem_ranges);
+
+#endif /* _ASM_POWERPC_KEXEC_RANGES_H */
diff --git a/arch/powerpc/include/asm/kfence.h b/arch/powerpc/include/asm/kfence.h
new file mode 100644
index 000000000000..6fd2b4d486c5
--- /dev/null
+++ b/arch/powerpc/include/asm/kfence.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * powerpc KFENCE support.
+ *
+ * Copyright (C) 2020 CS GROUP France
+ */
+
+#ifndef __ASM_POWERPC_KFENCE_H
+#define __ASM_POWERPC_KFENCE_H
+
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+#ifdef CONFIG_PPC64_ELF_ABI_V1
+#define ARCH_FUNC_PREFIX "."
+#endif
+
+static inline bool arch_kfence_init_pool(void)
+{
+ return true;
+}
+
+#ifdef CONFIG_PPC64
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ struct page *page = virt_to_page(addr);
+
+ __kernel_map_pages(page, 1, !protect);
+
+ return true;
+}
+#else
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *kpte = virt_to_kpte(addr);
+
+ if (protect) {
+ pte_update(&init_mm, addr, kpte, _PAGE_PRESENT, 0, 0);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ } else {
+ pte_update(&init_mm, addr, kpte, 0, _PAGE_PRESENT, 0);
+ }
+
+ return true;
+}
+#endif
+
+#endif /* __ASM_POWERPC_KFENCE_H */
diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h
index a9e098a3b881..715c18b75334 100644
--- a/arch/powerpc/include/asm/kgdb.h
+++ b/arch/powerpc/include/asm/kgdb.h
@@ -52,7 +52,7 @@ static inline void arch_kgdb_breakpoint(void)
/* On non-E500 family PPC32 we determine the size by picking the last
* register we need, but on E500 we skip sections so we list what we
* need to store, and add it up. */
-#ifndef CONFIG_E500
+#ifndef CONFIG_PPC_E500
#define MAXREG (PT_FPSCR+1)
#else
/* 32 GPRs (8 bytes), nip, msr, ccr, link, ctr, xer, acc (8 bytes), spefscr*/
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
deleted file mode 100644
index c8fa182d48c8..000000000000
--- a/arch/powerpc/include/asm/kmap_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _ASM_POWERPC_KMAP_TYPES_H
-#define _ASM_POWERPC_KMAP_TYPES_H
-
-#ifdef __KERNEL__
-
-/*
- */
-
-#define KM_TYPE_NR 16
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_KMAP_TYPES_H */
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h
index 66b3f2983b22..c8e4b4fd4e33 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -29,7 +29,7 @@
struct pt_regs;
struct kprobe;
-typedef ppc_opcode_t kprobe_opcode_t;
+typedef u32 kprobe_opcode_t;
extern kprobe_opcode_t optinsn_slot;
@@ -43,7 +43,7 @@ extern kprobe_opcode_t optprobe_template_ret[];
extern kprobe_opcode_t optprobe_template_end[];
/* Fixed instruction size for powerpc */
-#define MAX_INSN_SIZE 1
+#define MAX_INSN_SIZE 2
#define MAX_OPTIMIZED_LENGTH sizeof(kprobe_opcode_t) /* 4 bytes */
#define MAX_OPTINSN_SIZE (optprobe_template_end - optprobe_template_entry)
#define RELATIVEJUMP_SIZE sizeof(kprobe_opcode_t) /* 4 bytes */
@@ -51,7 +51,7 @@ extern kprobe_opcode_t optprobe_template_end[];
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
-void kretprobe_trampoline(void);
+void __kretprobe_trampoline(void);
extern void arch_remove_kprobe(struct kprobe *p);
/* Architecture specific copy of original instruction */
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index 92bcd1a26d73..d751ddd08110 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -5,100 +5,209 @@
#define KUAP_READ 1
#define KUAP_WRITE 2
#define KUAP_READ_WRITE (KUAP_READ | KUAP_WRITE)
-/*
- * For prevent_user_access() only.
- * Use the current saved situation instead of the to/from/size params.
- * Used on book3s/32
- */
-#define KUAP_CURRENT 4
-#ifdef CONFIG_PPC64
-#include <asm/book3s/64/kup-radix.h>
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/kup.h>
#endif
+
#ifdef CONFIG_PPC_8xx
#include <asm/nohash/32/kup-8xx.h>
#endif
+
+#ifdef CONFIG_BOOKE_OR_40x
+#include <asm/nohash/kup-booke.h>
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_32
#include <asm/book3s/32/kup.h>
#endif
#ifdef __ASSEMBLY__
#ifndef CONFIG_PPC_KUAP
-.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
-.endm
-
-.macro kuap_restore sp, current, gpr1, gpr2, gpr3
-.endm
-
-.macro kuap_check current, gpr
+.macro kuap_check_amr gpr1, gpr2
.endm
#endif
#else /* !__ASSEMBLY__ */
-#include <asm/pgtable.h>
+extern bool disable_kuep;
+extern bool disable_kuap;
-void setup_kup(void);
+#include <linux/pgtable.h>
-#ifdef CONFIG_PPC_KUEP
+void setup_kup(void);
void setup_kuep(bool disabled);
-#else
-static inline void setup_kuep(bool disabled) { }
-#endif /* CONFIG_PPC_KUEP */
#ifdef CONFIG_PPC_KUAP
void setup_kuap(bool disabled);
#else
static inline void setup_kuap(bool disabled) { }
-static inline void allow_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir) { }
-static inline void prevent_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir) { }
-static inline unsigned long prevent_user_access_return(void) { return 0UL; }
-static inline void restore_user_access(unsigned long flags) { }
+
+static __always_inline bool kuap_is_disabled(void) { return true; }
+
static inline bool
-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return false;
}
+
+static inline void __kuap_lock(void) { }
+static inline void __kuap_save_and_lock(struct pt_regs *regs) { }
+static inline void kuap_user_restore(struct pt_regs *regs) { }
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
+
+static inline unsigned long __kuap_get_and_assert_locked(void)
+{
+ return 0;
+}
+
+/*
+ * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
+ * the L1D cache after user accesses. Only include the empty stubs for other
+ * platforms.
+ */
+#ifndef CONFIG_PPC_BOOK3S_64
+static inline void __allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir) { }
+static inline void __prevent_user_access(unsigned long dir) { }
+static inline unsigned long __prevent_user_access_return(void) { return 0UL; }
+static inline void __restore_user_access(unsigned long flags) { }
+#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* CONFIG_PPC_KUAP */
-static inline void allow_read_from_user(const void __user *from, unsigned long size)
+static __always_inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ if (kuap_is_disabled())
+ return false;
+
+ return __bad_kuap_fault(regs, address, is_write);
+}
+
+static __always_inline void kuap_assert_locked(void)
+{
+ if (kuap_is_disabled())
+ return;
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ __kuap_get_and_assert_locked();
+}
+
+static __always_inline void kuap_lock(void)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __kuap_lock();
+}
+
+static __always_inline void kuap_save_and_lock(struct pt_regs *regs)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __kuap_save_and_lock(regs);
+}
+
+static __always_inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __kuap_kernel_restore(regs, amr);
+}
+
+static __always_inline unsigned long kuap_get_and_assert_locked(void)
+{
+ if (kuap_is_disabled())
+ return 0;
+
+ return __kuap_get_and_assert_locked();
+}
+
+#ifndef CONFIG_PPC_BOOK3S_64
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __allow_user_access(to, from, size, dir);
+}
+
+static __always_inline void prevent_user_access(unsigned long dir)
{
+ if (kuap_is_disabled())
+ return;
+
+ __prevent_user_access(dir);
+}
+
+static __always_inline unsigned long prevent_user_access_return(void)
+{
+ if (kuap_is_disabled())
+ return 0;
+
+ return __prevent_user_access_return();
+}
+
+static __always_inline void restore_user_access(unsigned long flags)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __restore_user_access(flags);
+}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+static __always_inline void allow_read_from_user(const void __user *from, unsigned long size)
+{
+ barrier_nospec();
allow_user_access(NULL, from, size, KUAP_READ);
}
-static inline void allow_write_to_user(void __user *to, unsigned long size)
+static __always_inline void allow_write_to_user(void __user *to, unsigned long size)
{
allow_user_access(to, NULL, size, KUAP_WRITE);
}
-static inline void allow_read_write_user(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void allow_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
{
+ barrier_nospec();
allow_user_access(to, from, size, KUAP_READ_WRITE);
}
-static inline void prevent_read_from_user(const void __user *from, unsigned long size)
+static __always_inline void prevent_read_from_user(const void __user *from, unsigned long size)
+{
+ prevent_user_access(KUAP_READ);
+}
+
+static __always_inline void prevent_write_to_user(void __user *to, unsigned long size)
+{
+ prevent_user_access(KUAP_WRITE);
+}
+
+static __always_inline void prevent_read_write_user(void __user *to, const void __user *from,
+ unsigned long size)
{
- prevent_user_access(NULL, from, size, KUAP_READ);
+ prevent_user_access(KUAP_READ_WRITE);
}
-static inline void prevent_write_to_user(void __user *to, unsigned long size)
+static __always_inline void prevent_current_access_user(void)
{
- prevent_user_access(to, NULL, size, KUAP_WRITE);
+ prevent_user_access(KUAP_READ_WRITE);
}
-static inline void prevent_read_write_user(void __user *to, const void __user *from,
- unsigned long size)
+static __always_inline void prevent_current_read_from_user(void)
{
- prevent_user_access(to, from, size, KUAP_READ_WRITE);
+ prevent_user_access(KUAP_READ);
}
-static inline void prevent_current_access_user(void)
+static __always_inline void prevent_current_write_to_user(void)
{
- prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT);
+ prevent_user_access(KUAP_WRITE);
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 635fb154b33f..d68d71987d5c 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -79,6 +79,7 @@
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
#define BOOK3S_INTERRUPT_DECREMENTER 0x900
#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
+#define BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER 0x1980
#define BOOK3S_INTERRUPT_DOORBELL 0xa00
#define BOOK3S_INTERRUPT_SYSCALL 0xc00
#define BOOK3S_INTERRUPT_TRACE 0xd00
@@ -147,7 +148,11 @@
#define KVM_GUEST_MODE_SKIP 2
#define KVM_GUEST_MODE_GUEST_HV 3
#define KVM_GUEST_MODE_HOST_HV 4
+#define KVM_GUEST_MODE_HV_P9 5 /* ISA >= v3.0 path */
#define KVM_INST_FETCH_FAILED -1
+/* Extract PO and XOP opcode fields */
+#define PO_XOP_OPCODE_MASK 0xfc0007fe
+
#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 506e4df2d730..bbf5e2c5fe09 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -78,7 +78,7 @@ struct kvmppc_vcore {
struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
struct list_head preempt_list;
spinlock_t lock;
- struct swait_queue_head wq;
+ struct rcuwait wait;
spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
u64 stolen_tb;
u64 preempt_tb;
@@ -155,12 +155,11 @@ extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
-extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
- struct kvm_vcpu *vcpu, unsigned long addr,
- unsigned long status);
+extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
+ unsigned long addr, unsigned long status);
extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
unsigned long slb_v, unsigned long valid);
-extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store);
extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
@@ -174,8 +173,7 @@ extern void kvmppc_mmu_hpte_sysexit(void);
extern int kvmppc_mmu_hv_init(void);
extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
-extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
+extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr);
extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from,
@@ -198,7 +196,7 @@ extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
unsigned int shift,
const struct kvm_memory_slot *memslot,
unsigned int lpid);
-extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable,
+extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
bool writing, unsigned long gpa,
unsigned int lpid);
extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
@@ -212,12 +210,12 @@ extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
unsigned int lpid);
extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void);
-extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn);
-extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn);
-extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn);
+extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
+extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
+extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn);
extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
struct kvm_memory_slot *memslot, unsigned long *map);
extern void kvmppc_radix_flush_memslot(struct kvm *kvm,
@@ -234,7 +232,7 @@ extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
bool upper, u32 val);
extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
-extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu);
extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
bool writing, bool *writable);
extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
@@ -260,6 +258,8 @@ extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
struct kvm_memory_slot *memslot,
unsigned long *map);
+extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm,
+ unsigned long lpcr);
extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
unsigned long mask);
extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
@@ -279,6 +279,10 @@ extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
+long kvmppc_read_intr(void);
+void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
+void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
@@ -300,12 +304,15 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
+long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
u64 time_limit, unsigned long lpcr);
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
struct hv_guest_state *hr);
-long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
@@ -396,6 +403,12 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dar;
}
+/* Expiry time of vcpu DEC relative to host TB */
+static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.dec_expires - vcpu->arch.vcore->tb_offset;
+}
+
static inline bool is_kvmppc_resume_guest(int r)
{
return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
@@ -424,7 +437,7 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
#define SPLIT_HACK_OFFS 0xfb000000
/*
- * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the
+ * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the
* [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
* (but not its actual threading mode, which is not available) to avoid
* collisions.
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 04b2b927bb5a..d49065af08e9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -14,18 +14,7 @@
#include <asm/book3s/64/mmu-hash.h>
#include <asm/cpu_has_feature.h>
#include <asm/ppc-opcode.h>
-
-#ifdef CONFIG_PPC_PSERIES
-static inline bool kvmhv_on_pseries(void)
-{
- return !cpu_has_feature(CPU_FTR_HVMODE);
-}
-#else
-static inline bool kvmhv_on_pseries(void)
-{
- return false;
-}
-#endif
+#include <asm/pte-walk.h>
/*
* Structure for a nested guest, that is, for a guest that is managed by
@@ -42,7 +31,6 @@ struct kvm_nested_guest {
struct mutex tlb_lock; /* serialize page faults and tlbies */
struct kvm_nested_guest *next;
cpumask_t need_tlb_flush;
- cpumask_t cpu_in_guest;
short prev_cpu[NR_CPUS];
u8 radix; /* is this nested guest radix */
};
@@ -152,10 +140,20 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
return radix;
}
+unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr);
+
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
+
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif
/*
+ * Invalid HDSISR value which is used to indicate when HW has not set the reg.
+ * Used to work around an errata.
+ */
+#define HDSISR_CANARY 0x7fff
+
+/*
* We use a lock bit in HPTE dword 0 to synchronize updates and
* accesses to each HPTE, and another bit to indicate non-present
* HPTEs.
@@ -368,6 +366,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
rb |= 1; /* L field */
rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
}
+ /*
+ * This sets both bits of the B field in the PTE. 0b1x values are
+ * reserved, but those will have been filtered by kvmppc_do_h_enter.
+ */
rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
return rb;
}
@@ -434,7 +436,7 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
continue;
}
/* If pte is not present return None */
- if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
+ if (unlikely(!pte_present(old_pte)))
return __pte(0);
new_pte = pte_mkyoung(old_pte);
@@ -634,6 +636,47 @@ extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
unsigned long gpa, unsigned long hpa,
unsigned long nbytes);
+static inline pte_t *
+find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
+ unsigned *hshift)
+{
+ pte_t *pte;
+
+ pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
+ return pte;
+}
+
+static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
+ unsigned *hshift)
+{
+ pte_t *pte;
+
+ VM_WARN(!spin_is_locked(&kvm->mmu_lock),
+ "%s called with kvm mmu_lock not held \n", __func__);
+ pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
+
+ return pte;
+}
+
+static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
+ unsigned long ea, unsigned *hshift)
+{
+ pte_t *pte;
+
+ VM_WARN(!spin_is_locked(&kvm->mmu_lock),
+ "%s called with kvm mmu_lock not held \n", __func__);
+
+ if (mmu_invalidate_retry(kvm, mmu_seq))
+ return NULL;
+
+ pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
+
+ return pte;
+}
+
+extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
+ unsigned long ea, unsigned *hshift);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 45704f2716e2..c8882d9b86c2 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -14,9 +14,6 @@
#define XICS_MFRR 0xc
#define XICS_IPI 2 /* interrupt source # for IPIs */
-/* LPIDs we support with this build -- runtime limit may be lower */
-#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
-
/* Maximum number of threads per physical core */
#define MAX_SMT_THREADS 8
@@ -74,16 +71,6 @@ struct kvm_split_mode {
u8 do_nap;
u8 napped[MAX_SMT_THREADS];
struct kvmppc_vcore *vc[MAX_SUBCORES];
- /* Bits for changing lpcr on P9 */
- unsigned long lpcr_req;
- unsigned long lpidr_req;
- unsigned long host_lpcr;
- u32 do_set;
- u32 do_restore;
- union {
- u32 allphases;
- u8 phase[4];
- } lpcr_sync;
};
/*
@@ -110,7 +97,6 @@ struct kvmppc_host_state {
u8 hwthread_state;
u8 host_ipi;
u8 ptid; /* thread number within subcore when split */
- u8 tid; /* thread number within whole core */
u8 fake_suspend;
struct kvm_vcpu *kvm_vcpu;
struct kvmppc_vcore *kvm_vcore;
@@ -119,7 +105,7 @@ struct kvmppc_host_state {
void __iomem *xive_tima_virt;
u32 saved_xirr;
u64 dabr;
- u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
+ u64 host_mmcr[10]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER, MMCR3, SIER2/3 */
u32 host_pmc[8];
u64 host_purr;
u64 host_spurr;
diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
index 5a9834e0e2d1..0a6319448cb6 100644
--- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h
+++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h
@@ -5,6 +5,7 @@
#ifdef CONFIG_PPC_UV
int kvmppc_uvmem_init(void);
void kvmppc_uvmem_free(void);
+bool kvmppc_uvmem_available(void);
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot);
void kvmppc_uvmem_slot_free(struct kvm *kvm,
const struct kvm_memory_slot *slot);
@@ -22,6 +23,10 @@ int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm);
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
struct kvm *kvm, bool skip_page_out);
+int kvmppc_uvmem_memslot_create(struct kvm *kvm,
+ const struct kvm_memory_slot *new);
+void kvmppc_uvmem_memslot_delete(struct kvm *kvm,
+ const struct kvm_memory_slot *old);
#else
static inline int kvmppc_uvmem_init(void)
{
@@ -30,6 +35,11 @@ static inline int kvmppc_uvmem_init(void)
static inline void kvmppc_uvmem_free(void) { }
+static inline bool kvmppc_uvmem_available(void)
+{
+ return false;
+}
+
static inline int
kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
{
@@ -76,5 +86,15 @@ static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
static inline void
kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
struct kvm *kvm, bool skip_page_out) { }
+
+static inline int kvmppc_uvmem_memslot_create(struct kvm *kvm,
+ const struct kvm_memory_slot *new)
+{
+ return H_UNSUPPORTED;
+}
+
+static inline void kvmppc_uvmem_memslot_delete(struct kvm *kvm,
+ const struct kvm_memory_slot *old) { }
+
#endif /* CONFIG_PPC_UV */
#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 310ba48d13f0..0c3401b2e19e 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -89,10 +89,12 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
return vcpu->arch.regs.nip;
}
+#ifdef CONFIG_BOOKE
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault_dear;
}
+#endif
static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
{
diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h
new file mode 100644
index 000000000000..68e499abdb24
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_guest.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 IBM Corporation
+ */
+
+#ifndef _ASM_POWERPC_KVM_GUEST_H_
+#define _ASM_POWERPC_KVM_GUEST_H_
+
+#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
+#include <linux/jump_label.h>
+
+DECLARE_STATIC_KEY_FALSE(kvm_guest);
+
+static inline bool is_kvm_guest(void)
+{
+ return static_branch_unlikely(&kvm_guest);
+}
+
+int __init check_kvm_guest(void);
+#else
+static inline bool is_kvm_guest(void) { return false; }
+static inline int check_kvm_guest(void) { return 0; }
+#endif
+
+#endif /* _ASM_POWERPC_KVM_GUEST_H_ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 6e8b8ffd06ad..caea15dcb91d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -26,19 +26,25 @@
#include <asm/hvcall.h>
#include <asm/mce.h>
+#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
+
#define KVM_MAX_VCPUS NR_CPUS
#define KVM_MAX_VCORES NR_CPUS
-#define KVM_USER_MEM_SLOTS 512
#include <asm/cputhreads.h>
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */
-#define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES)
-#define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS
+#define KVM_MAX_VCPU_IDS (MAX_SMT_THREADS * KVM_MAX_VCORES)
+
+/*
+ * Limit the nested partition table to 4096 entries (because that's what
+ * hardware supports). Both guest and host use this value.
+ */
+#define KVM_MAX_NESTED_GUESTS_SHIFT 12
#else
-#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
+#define KVM_MAX_VCPU_IDS KVM_MAX_VCPUS
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -52,17 +58,12 @@
/* PPC-specific vcpu->requests bit members */
#define KVM_REQ_WATCHDOG KVM_ARCH_REQ(0)
#define KVM_REQ_EPR_EXIT KVM_ARCH_REQ(1)
+#define KVM_REQ_PENDING_TIMER KVM_ARCH_REQ(2)
#include <linux/mmu_notifier.h>
#define KVM_ARCH_WANT_MMU_NOTIFIER
-extern int kvm_unmap_hva_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
-extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
-extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
-extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
-
#define HPTEG_CACHE_NUM (1 << 15)
#define HPTEG_HASH_BITS_PTE 13
#define HPTEG_HASH_BITS_PTE_LONG 12
@@ -87,12 +88,13 @@ struct kvmppc_book3s_shadow_vcpu;
struct kvm_nested_guest;
struct kvm_vm_stat {
- ulong remote_tlb_flush;
- ulong num_2M_pages;
- ulong num_1G_pages;
+ struct kvm_vm_stat_generic generic;
+ u64 num_2M_pages;
+ u64 num_1G_pages;
};
struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
u64 sum_exits;
u64 mmio_exits;
u64 signal_exits;
@@ -108,14 +110,7 @@ struct kvm_vcpu_stat {
u64 emulated_inst_exits;
u64 dec_exits;
u64 ext_intr_exits;
- u64 halt_poll_success_ns;
- u64 halt_poll_fail_ns;
- u64 halt_wait_ns;
- u64 halt_successful_poll;
- u64 halt_attempted_poll;
u64 halt_successful_wait;
- u64 halt_poll_invalid;
- u64 halt_wakeup;
u64 dbell_exits;
u64 gdbell_exits;
u64 ld;
@@ -202,7 +197,7 @@ struct kvmppc_spapr_tce_table {
u64 size; /* window size in pages */
struct list_head iommu_tables;
struct mutex alloc_lock;
- struct page *pages[0];
+ struct page *pages[];
};
/* XICS components, defined in book3s_xics.c */
@@ -299,17 +294,14 @@ struct kvm_arch {
u32 online_vcores;
atomic_t hpte_mod_interest;
cpumask_t need_tlb_flush;
- cpumask_t cpu_in_guest;
u8 radix;
u8 fwnmi_enabled;
u8 secure_guest;
- bool threads_indep;
+ u8 svm_enabled;
bool nested_enable;
+ bool dawr1_enabled;
pgd_t *pgtable;
u64 process_table;
- struct dentry *debugfs_dir;
- struct dentry *htab_dentry;
- struct dentry *radix_dentry;
struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -326,6 +318,7 @@ struct kvm_arch {
#endif
#ifdef CONFIG_KVM_XICS
struct kvmppc_xics *xics;
+ struct kvmppc_xics *xics_device;
struct kvmppc_xive *xive; /* Current XIVE device in use */
struct {
struct kvmppc_xive *native;
@@ -339,8 +332,7 @@ struct kvm_arch {
struct list_head uvmem_pfns;
struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
u64 l1_ptcr;
- int max_nested_lpid;
- struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
+ struct idr kvm_nested_guest_idr;
/* This array can grow quite large, keep it at the end */
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
#endif
@@ -451,7 +443,7 @@ struct kvmppc_passthru_irqmap {
};
#endif
-# ifdef CONFIG_PPC_FSL_BOOK3E
+# ifdef CONFIG_PPC_E500
#define KVMPPC_BOOKE_IAC_NUM 2
#define KVMPPC_BOOKE_DAC_NUM 2
# else
@@ -531,7 +523,11 @@ struct kvm_vcpu_arch {
struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
#endif
- struct pt_regs regs;
+ /*
+ * This is passed along to the HV via H_ENTER_NESTED. Align to
+ * prevent it crossing a real 4K page.
+ */
+ struct pt_regs regs __aligned(512);
struct thread_fp_state fp;
@@ -583,12 +579,18 @@ struct kvm_vcpu_arch {
u32 ctrl;
u32 dabrx;
ulong dabr;
- ulong dawr;
- ulong dawrx;
+ ulong dawr0;
+ ulong dawrx0;
+ ulong dawr1;
+ ulong dawrx1;
ulong ciabr;
ulong cfar;
ulong ppr;
u32 pspb;
+ u8 load_ebb;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u8 load_tm;
+#endif
ulong fscr;
ulong shadow_fscr;
ulong ebbhr;
@@ -638,12 +640,14 @@ struct kvm_vcpu_arch {
u32 ccr1;
u32 dbsr;
- u64 mmcr[5];
+ u64 mmcr[4]; /* MMCR0, MMCR1, MMCR2, MMCR3 */
+ u64 mmcra;
+ u64 mmcrs;
u32 pmc[8];
u32 spmc[2];
u64 siar;
u64 sdar;
- u64 sier;
+ u64 sier[3];
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
u64 tfhar;
u64 texasr;
@@ -678,14 +682,18 @@ struct kvm_vcpu_arch {
u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
u64 timing_last_exit;
- struct dentry *debugfs_exit_timing;
#endif
#ifdef CONFIG_PPC_BOOK3S
ulong fault_dar;
u32 fault_dsisr;
unsigned long intr_msr;
- ulong fault_gpa; /* guest real address of page fault (POWER9) */
+ /*
+ * POWER9 and later: fault_gpa contains the guest real address of page
+ * fault for a radix guest, or segment descriptor (equivalent to result
+ * from slbmfev of SLB entry that translated the EA) for hash guests.
+ */
+ ulong fault_gpa;
#endif
#ifdef CONFIG_BOOKE
@@ -744,7 +752,7 @@ struct kvm_vcpu_arch {
struct hrtimer dec_timer;
u64 dec_jiffies;
- u64 dec_expires;
+ u64 dec_expires; /* Relative to guest timebase. */
unsigned long pending_exceptions;
u8 ceded;
u8 prodded;
@@ -752,7 +760,8 @@ struct kvm_vcpu_arch {
u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
u32 last_inst;
- struct swait_queue_head *wqp;
+ struct rcuwait wait;
+ struct rcuwait *waitp;
struct kvmppc_vcore *vcore;
int ret;
int trap;
@@ -796,7 +805,6 @@ struct kvm_vcpu_arch {
struct mmio_hpte_cache_entry *pgfault_cache;
struct task_struct *run_task;
- struct kvm_run *kvm_run;
spinlock_t vpa_update_lock;
struct kvmppc_vpa vpa;
@@ -814,8 +822,11 @@ struct kvm_vcpu_arch {
u32 online;
+ u64 hfscr_permitted; /* A mask of permitted HFSCR facilities */
+
/* For support of nested guests */
struct kvm_nested_guest *nested;
+ u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
u32 nested_vcpu_id;
gpa_t nested_io_gpr;
#endif
@@ -823,14 +834,21 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
struct kvmhv_tb_accumulator *cur_activity; /* What we're timing */
u64 cur_tb_start; /* when it started */
+#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
+ struct kvmhv_tb_accumulator vcpu_entry;
+ struct kvmhv_tb_accumulator vcpu_exit;
+ struct kvmhv_tb_accumulator in_guest;
+ struct kvmhv_tb_accumulator hcall;
+ struct kvmhv_tb_accumulator pg_fault;
+ struct kvmhv_tb_accumulator guest_entry;
+ struct kvmhv_tb_accumulator guest_exit;
+#else
struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */
struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */
struct kvmhv_tb_accumulator rm_exit; /* real-mode exit code */
struct kvmhv_tb_accumulator guest_time; /* guest execution */
struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
-
- struct dentry *debugfs_dir;
- struct dentry *debugfs_timings;
+#endif
#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
};
@@ -867,6 +885,5 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_exit(void) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 9c1f6b4b9bbf..abe1b5e82547 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -8,35 +8,15 @@
#ifndef __POWERPC_KVM_PARA_H__
#define __POWERPC_KVM_PARA_H__
-#include <uapi/asm/kvm_para.h>
-
-#ifdef CONFIG_KVM_GUEST
-
-#include <linux/of.h>
-
-static inline int kvm_para_available(void)
-{
- struct device_node *hyper_node;
-
- hyper_node = of_find_node_by_path("/hypervisor");
- if (!hyper_node)
- return 0;
+#include <asm/kvm_guest.h>
- if (!of_device_is_compatible(hyper_node, "linux,kvm"))
- return 0;
-
- return 1;
-}
-
-#else
+#include <uapi/asm/kvm_para.h>
static inline int kvm_para_available(void)
{
- return 0;
+ return IS_ENABLED(CONFIG_KVM_GUEST) && is_kvm_guest();
}
-#endif
-
static inline unsigned int kvm_arch_para_features(void)
{
unsigned long r;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index bc2494e5710a..bfacf12784dd 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -58,28 +58,28 @@ enum xlate_readwrite {
XLATE_WRITE /* check for write permissions */
};
-extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
-extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
+extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
extern void kvmppc_handler_highmem(void);
extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
-extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian);
-extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian);
-extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend);
-extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_default_endian);
-extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
unsigned int rs, unsigned int bytes, int is_default_endian);
-extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes,
int is_default_endian);
-extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
int rs, unsigned int bytes,
int is_default_endian);
@@ -90,10 +90,9 @@ extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data);
extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
bool data);
-extern int kvmppc_emulate_instruction(struct kvm_run *run,
- struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
-extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
@@ -105,10 +104,7 @@ extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
unsigned int gtlb_idx);
-extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
-extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
-extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
@@ -132,6 +128,7 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags);
+extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
@@ -155,7 +152,6 @@ extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
extern int kvmppc_booke_init(void);
extern void kvmppc_booke_exit(void);
-extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
@@ -164,8 +160,6 @@ extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
extern void kvmppc_rmap_reset(struct kvm *kvm);
-extern long kvmppc_prepare_vrma(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem);
extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot, unsigned long porder);
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
@@ -179,8 +173,6 @@ extern void kvmppc_setup_partition_table(struct kvm *kvm);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args);
-extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
- struct kvm *kvm, unsigned long liobn);
#define kvmppc_ioba_validate(stt, ioba, npages) \
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \
@@ -200,17 +192,13 @@ extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
extern void kvmppc_core_free_memslot(struct kvm *kvm,
- struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont);
-extern int kvmppc_core_create_memslot(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- unsigned long npages);
+ struct kvm_memory_slot *slot);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem);
-extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
+extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
@@ -272,34 +260,29 @@ struct kvmppc_ops {
void (*vcpu_put)(struct kvm_vcpu *vcpu);
void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
- int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ int (*vcpu_run)(struct kvm_vcpu *vcpu);
int (*vcpu_create)(struct kvm_vcpu *vcpu);
void (*vcpu_free)(struct kvm_vcpu *vcpu);
int (*check_requests)(struct kvm_vcpu *vcpu);
int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
int (*prepare_memory_region)(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem);
- void (*commit_memory_region)(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change);
+ void (*commit_memory_region)(struct kvm *kvm,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
- int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
- unsigned long end);
- int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
- int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
- void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
- void (*mmu_destroy)(struct kvm_vcpu *vcpu);
- void (*free_memslot)(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont);
- int (*create_memslot)(struct kvm_memory_slot *slot,
- unsigned long npages);
+ bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
+ bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
+ bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
+ bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
+ void (*free_memslot)(struct kvm_memory_slot *slot);
int (*init_vm)(struct kvm *kvm);
void (*destroy_vm)(struct kvm *kvm);
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
- int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int (*emulate_op)(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
@@ -321,7 +304,12 @@ struct kvmppc_ops {
int size);
int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
int size);
+ int (*enable_svm)(struct kvm *kvm);
int (*svm_off)(struct kvm *kvm);
+ int (*enable_dawr1)(struct kvm *kvm);
+ bool (*hash_v3_possible)(void);
+ int (*create_vm_debugfs)(struct kvm *kvm);
+ int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
@@ -558,8 +546,7 @@ extern void kvm_hv_vm_activated(void);
extern void kvm_hv_vm_deactivated(void);
extern bool kvm_hv_mode_active(void);
-extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
- struct kvm_nested_guest *nested);
+extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
#else
static inline void __init kvm_cma_reserve(void)
@@ -593,6 +580,18 @@ static inline bool kvm_hv_mode_active(void) { return false; }
#endif
+#ifdef CONFIG_PPC_PSERIES
+static inline bool kvmhv_on_pseries(void)
+{
+ return !cpu_has_feature(CPU_FTR_HVMODE);
+}
+#else
+static inline bool kvmhv_on_pseries(void)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_KVM_XICS
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
{
@@ -613,6 +612,7 @@ extern void kvmppc_free_pimap(struct kvm *kvm);
extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
+extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
@@ -635,9 +635,9 @@ extern int h_ipi_redirect;
static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
struct kvm *kvm)
{ return NULL; }
-static inline void kvmppc_alloc_host_rm_ops(void) {};
-static inline void kvmppc_free_host_rm_ops(void) {};
-static inline void kvmppc_free_pimap(struct kvm *kvm) {};
+static inline void kvmppc_alloc_host_rm_ops(void) {}
+static inline void kvmppc_free_host_rm_ops(void) {}
+static inline void kvmppc_free_pimap(struct kvm *kvm) {}
static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
{ return 0; }
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
@@ -645,6 +645,8 @@ static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{ return 0; }
+static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+ { return 0; }
#endif
#ifdef CONFIG_KVM_XIVE
@@ -662,22 +664,22 @@ extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority);
extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
-extern void kvmppc_xive_init_module(void);
-extern void kvmppc_xive_exit_module(void);
extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu);
extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
- struct irq_desc *host_desc);
+ unsigned long host_irq);
extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
- struct irq_desc *host_desc);
+ unsigned long host_irq);
extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status);
extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
+extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
+extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{
@@ -687,8 +689,6 @@ static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu);
extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
-extern void kvmppc_xive_native_init_module(void);
-extern void kvmppc_xive_native_exit_module(void);
extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val);
extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
@@ -702,8 +702,6 @@ static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
u32 *priority) { return -1; }
static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
-static inline void kvmppc_xive_init_module(void) { }
-static inline void kvmppc_xive_exit_module(void) { }
static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
@@ -718,14 +716,14 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status) { return -ENODEV; }
static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
+static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
+static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
{ return 0; }
static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
-static inline void kvmppc_xive_native_init_module(void) { }
-static inline void kvmppc_xive_native_exit_module(void) { }
static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
union kvmppc_one_reg *val)
{ return 0; }
@@ -761,20 +759,20 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
unsigned long tce_value, unsigned long npages);
long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
unsigned int yield_count);
-long kvmppc_h_random(struct kvm_vcpu *vcpu);
+long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
void kvmhv_commence_exit(int trap);
void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
void kvmppc_subcore_enter_guest(void);
void kvmppc_subcore_exit_guest(void);
long kvmppc_realmode_hmi_handler(void);
+long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel);
long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
- unsigned long pte_index, unsigned long avpn,
- unsigned long va);
+ unsigned long pte_index, unsigned long avpn);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
@@ -785,13 +783,6 @@ long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long dest, unsigned long src);
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
unsigned long slb_v, unsigned int status, bool data);
-unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
-unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
-unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
-int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
-int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
/*
@@ -873,7 +864,6 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
struct kvm_dirty_tlb *cfg);
long kvmppc_alloc_lpid(void);
-void kvmppc_claim_lpid(long lpid);
void kvmppc_free_lpid(long lpid);
void kvmppc_init_lpid(unsigned long nr_lpids);
@@ -889,9 +879,9 @@ static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
/* Clear i-cache for new pages */
page = pfn_to_page(pfn);
- if (!test_bit(PG_arch_1, &page->flags)) {
+ if (!test_bit(PG_dcache_clean, &page->flags)) {
flush_dcache_icache_page(page);
- set_bit(PG_arch_1, &page->flags);
+ set_bit(PG_dcache_clean, &page->flags);
}
}
diff --git a/arch/powerpc/include/asm/linkage.h b/arch/powerpc/include/asm/linkage.h
index 1f00d2891d69..b71b9582e754 100644
--- a/arch/powerpc/include/asm/linkage.h
+++ b/arch/powerpc/include/asm/linkage.h
@@ -4,7 +4,7 @@
#include <asm/types.h>
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
#define cond_syscall(x) \
asm ("\t.weak " #x "\n\t.set " #x ", sys_ni_syscall\n" \
"\t.weak ." #x "\n\t.set ." #x ", .sys_ni_syscall\n")
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h
index 4a3d5d25fed5..d044a1fd4f44 100644
--- a/arch/powerpc/include/asm/livepatch.h
+++ b/arch/powerpc/include/asm/livepatch.h
@@ -7,26 +7,10 @@
#ifndef _ASM_POWERPC_LIVEPATCH_H
#define _ASM_POWERPC_LIVEPATCH_H
-#include <linux/module.h>
-#include <linux/ftrace.h>
+#include <linux/sched.h>
#include <linux/sched/task_stack.h>
-#ifdef CONFIG_LIVEPATCH
-static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
-{
- regs->nip = ip;
-}
-
-#define klp_get_ftrace_location klp_get_ftrace_location
-static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
-{
- /*
- * Live patch works only with -mprofile-kernel on PPC. In this case,
- * the ftrace location is always within the first 16 bytes.
- */
- return ftrace_location_range(faddr, faddr + 16);
-}
-
+#ifdef CONFIG_LIVEPATCH_64
static inline void klp_init_thread_info(struct task_struct *p)
{
/* + 1 to account for STACK_END_MAGIC */
@@ -34,6 +18,6 @@ static inline void klp_init_thread_info(struct task_struct *p)
}
#else
static inline void klp_init_thread_info(struct task_struct *p) { }
-#endif /* CONFIG_LIVEPATCH */
+#endif
#endif /* _ASM_POWERPC_LIVEPATCH_H */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 3b4b305796ae..34d44cb17c87 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -42,7 +42,6 @@
*/
#include <linux/cache.h>
#include <linux/threads.h>
-#include <linux/spinlock_types.h>
#include <asm/types.h>
#include <asm/mmu.h>
#include <asm/firmware.h>
@@ -105,14 +104,18 @@ struct lppaca {
volatile __be32 dispersion_count; /* dispatch changed physical cpu */
volatile __be64 cmo_faults; /* CMO page fault count */
volatile __be64 cmo_fault_time; /* CMO page fault time */
- u8 reserved10[104];
+ u8 reserved10[64]; /* [S]PURR expropriated/donated */
+ volatile __be64 enqueue_dispatch_tb; /* Total TB enqueue->dispatch */
+ volatile __be64 ready_enqueue_tb; /* Total TB ready->enqueue */
+ volatile __be64 wait_ready_tb; /* Total TB wait->ready */
+ u8 reserved11[16];
/* cacheline 4-5 */
__be32 page_ins; /* CMO Hint - # page ins by OS */
- u8 reserved11[148];
+ u8 reserved12[148];
volatile __be64 dtl_idx; /* Dispatch Trace Log head index */
- u8 reserved12[96];
+ u8 reserved13[96];
} ____cacheline_aligned;
#define lppaca_of(cpu) (*paca_ptrs[cpu]->lppaca_ptr)
@@ -146,49 +149,6 @@ struct slb_shadow {
} save_area[SLB_NUM_BOLTED];
} ____cacheline_aligned;
-/*
- * Layout of entries in the hypervisor's dispatch trace log buffer.
- */
-struct dtl_entry {
- u8 dispatch_reason;
- u8 preempt_reason;
- __be16 processor_id;
- __be32 enqueue_to_dispatch_time;
- __be32 ready_to_enqueue_time;
- __be32 waiting_to_ready_time;
- __be64 timebase;
- __be64 fault_addr;
- __be64 srr0;
- __be64 srr1;
-};
-
-#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
-#define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry))
-
-/*
- * Dispatch trace log event enable mask:
- * 0x1: voluntary virtual processor waits
- * 0x2: time-slice preempts
- * 0x4: virtual partition memory page faults
- */
-#define DTL_LOG_CEDE 0x1
-#define DTL_LOG_PREEMPT 0x2
-#define DTL_LOG_FAULT 0x4
-#define DTL_LOG_ALL (DTL_LOG_CEDE | DTL_LOG_PREEMPT | DTL_LOG_FAULT)
-
-extern struct kmem_cache *dtl_cache;
-extern rwlock_t dtl_access_lock;
-
-/*
- * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
- * reading from the dispatch trace log. If other code wants to consume
- * DTL entries, it can set this pointer to a function that will get
- * called once for each DTL entry that gets processed.
- */
-extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
-
-extern void register_dtl_buffer(int cpu);
-extern void alloc_dtl_buffers(unsigned long *time_limit);
extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
#endif /* CONFIG_PPC_BOOK3S */
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 7bcb64444a39..378b8d5836a7 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -8,13 +8,6 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
-#include <asm/setup.h>
-
-/* We export this macro for external modules like Alsa to know if
- * ppc_md.feature_call is implemented or not
- */
-#define CONFIG_PPC_HAS_FEATURE_CALLS
-
struct pt_regs;
struct pci_bus;
struct device_node;
@@ -29,10 +22,9 @@ struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
#ifdef CONFIG_PM
- void (*iommu_save)(void);
void (*iommu_restore)(void);
#endif
-#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifdef CONFIG_MEMORY_HOTPLUG
unsigned long (*memory_block_size)(void);
#endif
#endif /* CONFIG_PPC64 */
@@ -43,7 +35,6 @@ struct machdep_calls {
void (*setup_arch)(void); /* Optional, may be NULL */
/* Optional, may be NULL. */
void (*show_cpuinfo)(struct seq_file *m);
- void (*show_percpuinfo)(struct seq_file *m, int i);
/* Returns the current operating frequency of "cpu" in Hz */
unsigned long (*get_proc_freq)(unsigned int cpu);
@@ -59,21 +50,21 @@ struct machdep_calls {
int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
*bridge);
+ /* finds all the pci_controllers present at boot */
+ void (*discover_phbs)(void);
+
/* To setup PHBs when using automatic OF platform driver for PCI */
int (*pci_setup_phb)(struct pci_controller *host);
void __noreturn (*restart)(char *cmd);
void __noreturn (*halt)(void);
void (*panic)(char *str);
- void (*cpu_die)(void);
long (*time_init)(void); /* Optional, may be NULL */
int (*set_rtc_time)(struct rtc_time *);
void (*get_rtc_time)(struct rtc_time *);
time64_t (*get_boot_time)(void);
- unsigned char (*rtc_read_val)(int addr);
- void (*rtc_write_val)(int addr, unsigned char val);
void (*calibrate_decr)(void);
@@ -101,6 +92,8 @@ struct machdep_calls {
/* Called during machine check exception to retrive fixup address. */
bool (*mce_check_early_recovery)(struct pt_regs *regs);
+ void (*machine_check_log_err)(void);
+
/* Motherboard/chipset features. This is a kind of general purpose
* hook used to control some machine specific features (like reset
* lines, chip power control, etc...).
@@ -131,7 +124,7 @@ struct machdep_calls {
unsigned long dabrx);
/* Set DAWR for this platform, leave empty for default implementation */
- int (*set_dawr)(unsigned long dawr,
+ int (*set_dawr)(int nr, unsigned long dawr,
unsigned long dawrx);
#ifdef CONFIG_PPC32 /* XXX for now */
@@ -139,8 +132,6 @@ struct machdep_calls {
May be NULL. */
void (*init)(void);
- void (*kgdb_map_scc)(void);
-
/*
* optional PCI "hooks"
*/
@@ -185,13 +176,6 @@ struct machdep_calls {
#ifdef CONFIG_KEXEC_CORE
void (*kexec_cpu_down)(int crash_shutdown, int secondary);
- /* Called to do what every setup is needed on image and the
- * reboot code buffer. Returns 0 on success.
- * Provide your own (maybe dummy) implementation if your platform
- * claims to support kexec.
- */
- int (*machine_kexec_prepare)(struct kimage *image);
-
/* Called to perform the _real_ kexec.
* Do NOT allocate memory or fail here. We are past the point of
* no return.
@@ -208,24 +192,18 @@ struct machdep_calls {
void (*suspend_disable_irqs)(void);
void (*suspend_enable_irqs)(void);
#endif
- int (*suspend_disable_cpu)(void);
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
ssize_t (*cpu_probe)(const char *, size_t);
ssize_t (*cpu_release)(const char *, size_t);
#endif
-#ifdef CONFIG_ARCH_RANDOM
int (*get_random_seed)(unsigned long *v);
-#endif
};
extern void e500_idle(void);
extern void power4_idle(void);
-extern void power7_idle(void);
-extern void power9_idle(void);
extern void ppc6xx_idle(void);
-extern void book3e_idle(void);
/*
* ppc_md contains a copy of the machine description structure for the
@@ -235,7 +213,7 @@ extern void book3e_idle(void);
extern struct machdep_calls ppc_md;
extern struct machdep_calls *machine_id;
-#define __machine_desc __attribute__ ((__section__ (".machine.desc")))
+#define __machine_desc __section(".machine.desc")
#define define_machine(name) \
extern struct machdep_calls mach_##name; \
@@ -249,23 +227,6 @@ extern struct machdep_calls *machine_id;
machine_id == &mach_##name; \
})
-extern void probe_machine(void);
-
-#ifdef CONFIG_PPC_PMAC
-/*
- * Power macintoshes have either a CUDA, PMU or SMU controlling
- * system reset, power, NVRAM, RTC.
- */
-typedef enum sys_ctrler_kind {
- SYS_CTRLER_UNKNOWN = 0,
- SYS_CTRLER_CUDA = 1,
- SYS_CTRLER_PMU = 2,
- SYS_CTRLER_SMU = 3,
-} sys_ctrler_t;
-extern sys_ctrler_t sys_ctrler;
-
-#endif /* CONFIG_PPC_PMAC */
-
static inline void log_error(char *buf, unsigned int err_type, int fatal)
{
if (ppc_md.log_error)
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index 6a6ddaabdb34..c9f0936bd3c9 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -86,6 +86,7 @@ enum MCE_TlbErrorType {
enum MCE_UserErrorType {
MCE_USER_ERROR_INDETERMINATE = 0,
MCE_USER_ERROR_TLBIE = 1,
+ MCE_USER_ERROR_SCV = 2,
};
enum MCE_RaErrorType {
@@ -203,12 +204,26 @@ struct mce_error_info {
bool ignore_event;
};
-#define MAX_MC_EVT 100
+#define MAX_MC_EVT 10
+
+struct mce_info {
+ int mce_nest_count;
+ struct machine_check_event mce_event[MAX_MC_EVT];
+ /* Queue for delayed MCE events. */
+ int mce_queue_count;
+ struct machine_check_event mce_event_queue[MAX_MC_EVT];
+ /* Queue for delayed MCE UE events. */
+ int mce_ue_count;
+ struct machine_check_event mce_ue_event_queue[MAX_MC_EVT];
+};
/* Release flags for get_mce_event() */
#define MCE_EVENT_RELEASE true
#define MCE_EVENT_DONTRELEASE false
+struct pt_regs;
+struct notifier_block;
+
extern void save_mce_event(struct pt_regs *regs, long handled,
struct mce_error_info *mce_err, uint64_t nip,
uint64_t addr, uint64_t phys_addr);
@@ -218,7 +233,36 @@ extern void machine_check_queue_event(void);
extern void machine_check_print_event_info(struct machine_check_event *evt,
bool user_mode, bool in_guest);
unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr);
+extern void mce_common_process_ue(struct pt_regs *regs,
+ struct mce_error_info *mce_err);
+void mce_irq_work_queue(void);
+int mce_register_notifier(struct notifier_block *nb);
+int mce_unregister_notifier(struct notifier_block *nb);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void mce_run_irq_context_handlers(void);
+#else
+static inline void mce_run_irq_context_handlers(void) { };
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void set_mce_pending_irq_work(void);
+void clear_mce_pending_irq_work(void);
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
#ifdef CONFIG_PPC_BOOK3S_64
void flush_and_reload_slb(void);
+void flush_erat(void);
+long __machine_check_early_realmode_p7(struct pt_regs *regs);
+long __machine_check_early_realmode_p8(struct pt_regs *regs);
+long __machine_check_early_realmode_p9(struct pt_regs *regs);
+long __machine_check_early_realmode_p10(struct pt_regs *regs);
#endif /* CONFIG_PPC_BOOK3S_64 */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void mce_init(void);
+#else
+static inline void mce_init(void) { };
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mem_encrypt.h b/arch/powerpc/include/asm/mem_encrypt.h
index ba9dab07c1be..2f26b8fc8d29 100644
--- a/arch/powerpc/include/asm/mem_encrypt.h
+++ b/arch/powerpc/include/asm/mem_encrypt.h
@@ -10,11 +10,6 @@
#include <asm/svm.h>
-static inline bool mem_encrypt_active(void)
-{
- return is_secure_guest();
-}
-
static inline bool force_dma_unencrypted(struct device *dev)
{
return is_secure_guest();
diff --git a/arch/powerpc/include/asm/membarrier.h b/arch/powerpc/include/asm/membarrier.h
index 6e20bb5c74ea..de7f79157918 100644
--- a/arch/powerpc/include/asm/membarrier.h
+++ b/arch/powerpc/include/asm/membarrier.h
@@ -12,7 +12,8 @@ static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
* when switching from userspace to kernel is not needed after
* store to rq->curr.
*/
- if (likely(!(atomic_read(&next->membarrier_state) &
+ if (IS_ENABLED(CONFIG_SMP) &&
+ likely(!(atomic_read(&next->membarrier_state) &
(MEMBARRIER_STATE_PRIVATE_EXPEDITED |
MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
return;
diff --git a/arch/powerpc/include/asm/mm-arch-hooks.h b/arch/powerpc/include/asm/mm-arch-hooks.h
deleted file mode 100644
index dce274be824a..000000000000
--- a/arch/powerpc/include/asm/mm-arch-hooks.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Architecture specific mm hooks
- *
- * Copyright (C) 2015, IBM Corporation
- * Author: Laurent Dufour <ldufour@linux.vnet.ibm.com>
- */
-
-#ifndef _ASM_POWERPC_MM_ARCH_HOOKS_H
-#define _ASM_POWERPC_MM_ARCH_HOOKS_H
-
-static inline void arch_remap(struct mm_struct *mm,
- unsigned long old_start, unsigned long old_end,
- unsigned long new_start, unsigned long new_end)
-{
- /*
- * mremap() doesn't allow moving multiple vmas so we can limit the
- * check to old_start == vdso_base.
- */
- if (old_start == mm->context.vdso_base)
- mm->context.vdso_base = new_start;
-}
-#define arch_remap arch_remap
-
-#endif /* _ASM_POWERPC_MM_ARCH_HOOKS_H */
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index d610c2e07b28..17a77d47ed6d 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -12,11 +12,8 @@
#include <linux/mm.h>
#include <linux/pkeys.h>
#include <asm/cpu_has_feature.h>
+#include <asm/firmware.h>
-/*
- * This file is included by linux/mman.h, so we can't use cacl_vm_prot_bits()
- * here. How important is the optimization?
- */
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
unsigned long pkey)
{
@@ -28,24 +25,17 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
}
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
-{
-#ifdef CONFIG_PPC_MEM_KEYS
- return (vm_flags & VM_SAO) ?
- __pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
- __pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
-#else
- return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
-#endif
-}
-#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-
static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
{
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
return false;
- if ((prot & PROT_SAO) && !cpu_has_feature(CPU_FTR_SAO))
- return false;
+ if (prot & PROT_SAO) {
+ if (!cpu_has_feature(CPU_FTR_SAO))
+ return false;
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ !IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR))
+ return false;
+ }
return true;
}
#define arch_validate_prot arch_validate_prot
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 0699cfeeb8c9..94b981152667 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -29,6 +29,28 @@
*/
/*
+ * Supports KUAP feature
+ * key 0 controlling userspace addresses on radix
+ * Key 3 on hash
+ */
+#define MMU_FTR_BOOK3S_KUAP ASM_CONST(0x00000200)
+
+/*
+ * Supports KUEP feature
+ * key 0 controlling userspace addresses on radix
+ * Key 3 on hash
+ */
+#define MMU_FTR_BOOK3S_KUEP ASM_CONST(0x00000400)
+
+/*
+ * Support for memory protection keys.
+ */
+#define MMU_FTR_PKEY ASM_CONST(0x00000800)
+
+/* Guest Translation Shootdown Enable */
+#define MMU_FTR_GTSE ASM_CONST(0x00001000)
+
+/*
* Support for 68 bit VA space. We added that from ISA 2.05
*/
#define MMU_FTR_68_BIT_VA ASM_CONST(0x00002000)
@@ -74,15 +96,6 @@
*/
#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000)
-/* Enable use of TLB reservation. Processor should support tlbsrx.
- * instruction and MAS0[WQ].
- */
-#define MMU_FTR_USE_TLBRSRV ASM_CONST(0x00800000)
-
-/* Use paired MAS registers (MAS7||MAS3, etc.)
- */
-#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000)
-
/* Doesn't support the B bit (1T segment) in SLBIE
*/
#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x02000000)
@@ -107,14 +120,11 @@
*/
#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000)
-/*
- * Supports KUAP (key 0 controlling userspace addresses) on radix
- */
-#define MMU_FTR_RADIX_KUAP ASM_CONST(0x80000000)
+// NX paste RMA reject in DSI
+#define MMU_FTR_NX_DSI ASM_CONST(0x80000000)
/* MMU feature bit sets for various CPUs */
-#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \
- MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
+#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 (MMU_FTR_HPTE_TABLE | MMU_FTR_TLBIEL | MMU_FTR_16M_PAGE)
#define MMU_FTRS_POWER MMU_FTRS_DEFAULT_HPTE_ARCH_V2
#define MMU_FTRS_PPC970 MMU_FTRS_POWER | MMU_FTR_TLBIE_CROP_VA
#define MMU_FTRS_POWER5 MMU_FTRS_POWER | MMU_FTR_LOCKLESS_TLBIE
@@ -122,6 +132,7 @@
#define MMU_FTRS_POWER7 MMU_FTRS_POWER6
#define MMU_FTRS_POWER8 MMU_FTRS_POWER6
#define MMU_FTRS_POWER9 MMU_FTRS_POWER6
+#define MMU_FTRS_POWER10 MMU_FTRS_POWER6
#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
MMU_FTR_CI_LARGE_PAGE
#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
@@ -133,14 +144,14 @@
typedef pte_t *pgtable_t;
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
#include <asm/percpu.h>
DECLARE_PER_CPU(int, next_tlbcam_idx);
#endif
enum {
MMU_FTRS_POSSIBLE =
-#ifdef CONFIG_PPC_BOOK3S
+#if defined(CONFIG_PPC_BOOK3S_604)
MMU_FTR_HPTE_TABLE |
#endif
#ifdef CONFIG_PPC_8xx
@@ -149,38 +160,80 @@ enum {
#ifdef CONFIG_40x
MMU_FTR_TYPE_40x |
#endif
-#ifdef CONFIG_44x
+#ifdef CONFIG_PPC_47x
+ MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL |
+#elif defined(CONFIG_44x)
MMU_FTR_TYPE_44x |
#endif
-#if defined(CONFIG_E200) || defined(CONFIG_E500)
+#ifdef CONFIG_PPC_E500
MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX |
#endif
-#ifdef CONFIG_PPC_47x
- MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL |
-#endif
#ifdef CONFIG_PPC_BOOK3S_32
- MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU |
+ MMU_FTR_USE_HIGH_BATS |
#endif
-#ifdef CONFIG_PPC_BOOK3E_64
- MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
+#ifdef CONFIG_PPC_83xx
+ MMU_FTR_NEED_DTLB_SW_LRU |
#endif
#ifdef CONFIG_PPC_BOOK3S_64
+ MMU_FTR_KERNEL_RO |
+#ifdef CONFIG_PPC_64S_HASH_MMU
MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
- MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
+ MMU_FTR_68_BIT_VA | MMU_FTR_HPTE_TABLE |
#endif
#ifdef CONFIG_PPC_RADIX_MMU
MMU_FTR_TYPE_RADIX |
+ MMU_FTR_GTSE | MMU_FTR_NX_DSI |
+#endif /* CONFIG_PPC_RADIX_MMU */
+#endif
#ifdef CONFIG_PPC_KUAP
- MMU_FTR_RADIX_KUAP |
+ MMU_FTR_BOOK3S_KUAP |
#endif /* CONFIG_PPC_KUAP */
-#endif /* CONFIG_PPC_RADIX_MMU */
+#ifdef CONFIG_PPC_MEM_KEYS
+ MMU_FTR_PKEY |
+#endif
+#ifdef CONFIG_PPC_KUEP
+ MMU_FTR_BOOK3S_KUEP |
+#endif /* CONFIG_PPC_KUAP */
+
0,
};
-static inline bool early_mmu_has_feature(unsigned long feature)
+#if defined(CONFIG_PPC_BOOK3S_604) && !defined(CONFIG_PPC_BOOK3S_603)
+#define MMU_FTRS_ALWAYS MMU_FTR_HPTE_TABLE
+#endif
+#ifdef CONFIG_PPC_8xx
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_8xx
+#endif
+#ifdef CONFIG_40x
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_40x
+#endif
+#ifdef CONFIG_PPC_47x
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_47x
+#elif defined(CONFIG_44x)
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_44x
+#endif
+#ifdef CONFIG_PPC_E500
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E
+#endif
+
+/* BOOK3S_64 options */
+#if defined(CONFIG_PPC_RADIX_MMU) && !defined(CONFIG_PPC_64S_HASH_MMU)
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_RADIX
+#elif !defined(CONFIG_PPC_RADIX_MMU) && defined(CONFIG_PPC_64S_HASH_MMU)
+#define MMU_FTRS_ALWAYS MMU_FTR_HPTE_TABLE
+#endif
+
+#ifndef MMU_FTRS_ALWAYS
+#define MMU_FTRS_ALWAYS 0
+#endif
+
+static __always_inline bool early_mmu_has_feature(unsigned long feature)
{
+ if (MMU_FTRS_ALWAYS & feature)
+ return true;
+
return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
}
@@ -209,6 +262,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
}
#endif
+ if (MMU_FTRS_ALWAYS & feature)
+ return true;
+
if (!(MMU_FTRS_POSSIBLE & feature))
return false;
@@ -231,7 +287,7 @@ static inline void mmu_feature_keys_init(void)
}
-static inline bool mmu_has_feature(unsigned long feature)
+static __always_inline bool mmu_has_feature(unsigned long feature)
{
return early_mmu_has_feature(feature);
}
@@ -269,36 +325,15 @@ static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
}
#endif /* !CONFIG_DEBUG_VM */
-#ifdef CONFIG_PPC_RADIX_MMU
-static inline bool radix_enabled(void)
+static __always_inline bool radix_enabled(void)
{
return mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
-static inline bool early_radix_enabled(void)
+static __always_inline bool early_radix_enabled(void)
{
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
-#else
-static inline bool radix_enabled(void)
-{
- return false;
-}
-
-static inline bool early_radix_enabled(void)
-{
- return false;
-}
-#endif
-
-#ifdef CONFIG_PPC_MEM_KEYS
-extern u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address);
-#else
-static inline u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address)
-{
- return 0;
-}
-#endif /* CONFIG_PPC_MEM_KEYS */
#ifdef CONFIG_STRICT_KERNEL_RWX
static inline bool strict_kernel_rwx_enabled(void)
@@ -311,6 +346,11 @@ static inline bool strict_kernel_rwx_enabled(void)
return false;
}
#endif
+
+static inline bool strict_module_rwx_enabled(void)
+{
+ return IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && strict_kernel_rwx_enabled();
+}
#endif /* !__ASSEMBLY__ */
/* The kernel use the constants below to index in the page sizes array.
@@ -364,6 +404,8 @@ extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size);
static inline void mmu_early_init_devtree(void) { }
+static inline void pkey_early_init_devtree(void) {}
+
extern void *abatron_pteptrs[2];
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 360367c579de..c1ea270bb848 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -14,12 +14,13 @@
/*
* Most if the context management is out of line
*/
+#define init_new_context init_new_context
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+#define destroy_context destroy_context
extern void destroy_context(struct mm_struct *mm);
#ifdef CONFIG_SPAPR_TCE_IOMMU
struct mm_iommu_table_group_mem_t;
-extern int isolate_lru_page(struct page *page); /* from internal.h */
extern bool mm_iommu_preregistered(struct mm_struct *mm);
extern long mm_iommu_new(struct mm_struct *mm,
unsigned long ua, unsigned long entries,
@@ -30,18 +31,12 @@ extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
extern long mm_iommu_put(struct mm_struct *mm,
struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_init(struct mm_struct *mm);
-extern void mm_iommu_cleanup(struct mm_struct *mm);
extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
unsigned long ua, unsigned long size);
-extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
- struct mm_struct *mm, unsigned long ua, unsigned long size);
extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
-extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned int pageshift, unsigned long *hpa);
-extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
unsigned int pageshift, unsigned long *size);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
@@ -55,7 +50,6 @@ static inline bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
static inline void mm_iommu_init(struct mm_struct *mm) { }
#endif
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
-extern void set_context(unsigned long id, pgd_t *pgd);
#ifdef CONFIG_PPC_BOOK3S_64
extern void radix__switch_mmu_context(struct mm_struct *prev,
@@ -70,10 +64,11 @@ static inline void switch_mmu_context(struct mm_struct *prev,
}
extern int hash__alloc_context_id(void);
-extern void hash__reserve_context_id(int id);
+void __init hash__reserve_context_id(int id);
extern void __destroy_context(int context_id);
static inline void mmu_context_init(void) { }
+#ifdef CONFIG_PPC_64S_HASH_MMU
static inline int alloc_extended_context(struct mm_struct *mm,
unsigned long ea)
{
@@ -99,6 +94,7 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
return true;
return false;
}
+#endif
#else
extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
@@ -120,13 +116,6 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
}
#endif
-#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
-extern void radix_kvm_prefetch_workaround(struct mm_struct *mm);
-#else
-static inline void radix_kvm_prefetch_workaround(struct mm_struct *mm) { }
-#endif
-
-extern void switch_cop(struct mm_struct *next);
extern int use_cop(unsigned long acop, struct mm_struct *mm);
extern void drop_cop(unsigned long acop, struct mm_struct *mm);
@@ -185,6 +174,34 @@ static inline void mm_context_remove_copro(struct mm_struct *mm)
dec_mm_active_cpus(mm);
}
}
+
+/*
+ * vas_windows counter shows number of open windows in the mm
+ * context. During context switch, use this counter to clear the
+ * foreign real address mapping (CP_ABORT) for the thread / process
+ * that intend to use COPY/PASTE. When a process closes all windows,
+ * disable CP_ABORT which is expensive to run.
+ *
+ * For user context, register a copro so that TLBIs are seen by the
+ * nest MMU. mm_context_add/remove_vas_window() are used only for user
+ * space windows.
+ */
+static inline void mm_context_add_vas_window(struct mm_struct *mm)
+{
+ atomic_inc(&mm->context.vas_windows);
+ mm_context_add_copro(mm);
+}
+
+static inline void mm_context_remove_vas_window(struct mm_struct *mm)
+{
+ int v;
+
+ mm_context_remove_copro(mm);
+ v = atomic_dec_if_positive(&mm->context.vas_windows);
+
+ /* Detect imbalance between add and remove */
+ WARN_ON(v < 0);
+}
#else
static inline void inc_mm_active_cpus(struct mm_struct *mm) { }
static inline void dec_mm_active_cpus(struct mm_struct *mm) { }
@@ -192,6 +209,18 @@ static inline void mm_context_add_copro(struct mm_struct *mm) { }
static inline void mm_context_remove_copro(struct mm_struct *mm) { }
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
+void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end);
+#else
+static inline void do_h_rpt_invalidate_prt(unsigned long pid,
+ unsigned long lpid,
+ unsigned long type,
+ unsigned long pg_sizes,
+ unsigned long start,
+ unsigned long end) { }
+#endif
extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
@@ -207,35 +236,36 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
}
#define switch_mm_irqs_off switch_mm_irqs_off
-
-#define deactivate_mm(tsk,mm) do { } while (0)
-
/*
* After we have set current->mm to a new value, this activates
* the context for the new mm so we see the new mappings.
*/
+#define activate_mm activate_mm
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
- switch_mm(prev, next, current);
+ switch_mm_irqs_off(prev, next, current);
}
/* We don't currently use enter_lazy_tlb() for anything */
+#ifdef CONFIG_PPC_BOOK3E_64
+#define enter_lazy_tlb enter_lazy_tlb
static inline void enter_lazy_tlb(struct mm_struct *mm,
struct task_struct *tsk)
{
/* 64-bit Book3E keeps track of current PGD in the PACA */
-#ifdef CONFIG_PPC_BOOK3E_64
get_paca()->pgd = NULL;
-#endif
}
+#endif
extern void arch_exit_mmap(struct mm_struct *mm);
static inline void arch_unmap(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
- if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
- mm->context.vdso_base = 0;
+ unsigned long vdso_base = (unsigned long)mm->context.vdso;
+
+ if (start <= vdso_base && vdso_base < end)
+ mm->context.vdso = NULL;
}
#ifdef CONFIG_PPC_MEM_KEYS
@@ -251,12 +281,9 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
}
#define pkey_mm_init(mm)
-#define thread_pkey_regs_save(thread)
-#define thread_pkey_regs_restore(new_thread, old_thread)
-#define thread_pkey_regs_init(thread)
#define arch_dup_pkeys(oldmm, mm)
-static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
+static inline u64 pte_to_hpte_pkey_bits(u64 pteflags, unsigned long flags)
{
return 0x0UL;
}
@@ -270,5 +297,7 @@ static inline int arch_dup_mmap(struct mm_struct *oldmm,
return 0;
}
+#include <asm-generic/mmu_context.h>
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h
index 91c69ff53a8a..4c6c6dbd182f 100644
--- a/arch/powerpc/include/asm/mmzone.h
+++ b/arch/powerpc/include/asm/mmzone.h
@@ -18,7 +18,7 @@
* flags field of the struct page
*/
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
extern struct pglist_data *node_data[];
/*
@@ -41,10 +41,15 @@ u64 memory_hotplug_max(void);
#else
#define memory_hotplug_max() memblock_end_of_DRAM()
-#endif /* CONFIG_NEED_MULTIPLE_NODES */
+#endif /* CONFIG_NUMA */
#ifdef CONFIG_FA_DUMP
#define __HAVE_ARCH_RESERVED_KERNEL_PAGES
#endif
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern int create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_MMZONE_H_ */
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index 356658711a86..09e2ffd360bb 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -3,28 +3,10 @@
#define _ASM_POWERPC_MODULE_H
#ifdef __KERNEL__
-/*
- */
-
#include <linux/list.h>
#include <asm/bug.h>
#include <asm-generic/module.h>
-
-#ifdef CONFIG_MPROFILE_KERNEL
-#define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel "
-#else
-#define MODULE_ARCH_VERMAGIC_FTRACE ""
-#endif
-
-#ifdef CONFIG_RELOCATABLE
-#define MODULE_ARCH_VERMAGIC_RELOCATABLE "relocatable "
-#else
-#define MODULE_ARCH_VERMAGIC_RELOCATABLE ""
-#endif
-
-#define MODULE_ARCH_VERMAGIC MODULE_ARCH_VERMAGIC_FTRACE MODULE_ARCH_VERMAGIC_RELOCATABLE
-
#ifndef __powerpc64__
/*
* Thanks to Paul M for explaining this.
@@ -59,10 +41,8 @@ struct mod_arch_specific {
#ifdef CONFIG_DYNAMIC_FTRACE
unsigned long tramp;
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
unsigned long tramp_regs;
#endif
-#endif
/* List of BUG addresses, source line numbers and filenames */
struct list_head bug_list;
@@ -90,12 +70,9 @@ struct mod_arch_specific {
# ifdef MODULE
asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
# endif /* MODULE */
-#endif
int module_trampoline_target(struct module *mod, unsigned long trampoline,
unsigned long *target);
-
-#ifdef CONFIG_DYNAMIC_FTRACE
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
#else
static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
diff --git a/arch/powerpc/kernel/module.lds b/arch/powerpc/include/asm/module.lds.h
index cea5dc124be4..cea5dc124be4 100644
--- a/arch/powerpc/kernel/module.lds
+++ b/arch/powerpc/include/asm/module.lds.h
diff --git a/arch/powerpc/include/asm/mpc52xx.h b/arch/powerpc/include/asm/mpc52xx.h
index ce1e0aabaa64..5ea16a71c2f0 100644
--- a/arch/powerpc/include/asm/mpc52xx.h
+++ b/arch/powerpc/include/asm/mpc52xx.h
@@ -15,7 +15,6 @@
#ifndef __ASSEMBLY__
#include <asm/types.h>
-#include <asm/prom.h>
#include <asm/mpc5xxx.h>
#endif /* __ASSEMBLY__ */
@@ -268,13 +267,14 @@ struct mpc52xx_intr {
#ifndef __ASSEMBLY__
+struct device_node;
+
/* mpc52xx_common.c */
extern void mpc5200_setup_xlb_arbiter(void);
extern void mpc52xx_declare_of_platform_devices(void);
extern int mpc5200_psc_ac97_gpio_reset(int psc_number);
extern void mpc52xx_map_common_devices(void);
extern int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv);
-extern unsigned int mpc52xx_get_xtal_freq(struct device_node *node);
extern void __noreturn mpc52xx_restart(char *cmd);
/* mpc52xx_gpt.c */
diff --git a/arch/powerpc/include/asm/mpc5xxx.h b/arch/powerpc/include/asm/mpc5xxx.h
index 2f60f5c5461b..44db26380435 100644
--- a/arch/powerpc/include/asm/mpc5xxx.h
+++ b/arch/powerpc/include/asm/mpc5xxx.h
@@ -11,7 +11,14 @@
#ifndef __ASM_POWERPC_MPC5xxx_H__
#define __ASM_POWERPC_MPC5xxx_H__
-extern unsigned long mpc5xxx_get_bus_frequency(struct device_node *node);
+#include <linux/property.h>
+
+unsigned long mpc5xxx_fwnode_get_bus_frequency(struct fwnode_handle *fwnode);
+
+static inline unsigned long mpc5xxx_get_bus_frequency(struct device *dev)
+{
+ return mpc5xxx_fwnode_get_bus_frequency(dev_fwnode(dev));
+}
#endif /* __ASM_POWERPC_MPC5xxx_H__ */
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 0abf2e7fd222..58353c5bd3fb 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -472,7 +472,7 @@ extern int mpic_cpu_get_priority(void);
extern void mpic_cpu_set_priority(int prio);
/* Request IPIs on primary mpic */
-extern void mpic_request_ipis(void);
+void __init mpic_request_ipis(void);
/* Send a message (IPI) to a given target (cpu number or MSG_*) */
void smp_mpic_message_pass(int target, int msg);
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
index 84b4cfe73edd..c3c7adef74de 100644
--- a/arch/powerpc/include/asm/nmi.h
+++ b/arch/powerpc/include/asm/nmi.h
@@ -4,11 +4,14 @@
#ifdef CONFIG_PPC_WATCHDOG
extern void arch_touch_nmi_watchdog(void);
+long soft_nmi_interrupt(struct pt_regs *regs);
+void watchdog_nmi_set_timeout_pct(u64 pct);
#else
static inline void arch_touch_nmi_watchdog(void) {}
+static inline void watchdog_nmi_set_timeout_pct(u64 pct) {}
#endif
-#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
+#ifdef CONFIG_NMI_IPI
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
index a46616937d20..de092b04ee1a 100644
--- a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
@@ -13,13 +13,13 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
- return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
+ return PAGE_SHIFT_8M;
}
static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
unsigned int pdshift)
{
- unsigned long idx = (addr & ((1UL << pdshift) - 1)) >> PAGE_SHIFT;
+ unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT;
return hugepd_page(hpd) + idx;
}
@@ -32,8 +32,12 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
{
- *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT |
- (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M : _PMD_PAGE_512K));
+ *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M);
+}
+
+static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
+{
+ *hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M);
}
static inline int check_and_get_huge_psize(int shift)
@@ -41,4 +45,37 @@ static inline int check_and_get_huge_psize(int shift)
return shift_to_mmu_psize(shift);
}
+#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte);
+
+#define __HAVE_ARCH_HUGE_PTE_CLEAR
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz)
+{
+ pte_update(mm, addr, ptep, ~0UL, 0, 1);
+}
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
+ unsigned long set = pte_val(pte_wrprotect(__pte(0)));
+
+ pte_update(mm, addr, ptep, clr, set, 1);
+}
+
+#ifdef CONFIG_PPC_4K_PAGES
+static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
+{
+ size_t size = 1UL << shift;
+
+ if (size == SZ_16K)
+ return __pte(pte_val(entry) | _PAGE_SPS);
+ else
+ return __pte(pte_val(entry) | _PAGE_SPS | _PAGE_HUGE);
+}
+#define arch_make_huge_pte arch_make_huge_pte
+#endif
+
#endif /* _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H */
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 85ed2390fb99..c44d97751723 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -7,64 +7,81 @@
#ifdef CONFIG_PPC_KUAP
-#ifdef __ASSEMBLY__
-
-.macro kuap_save_and_lock sp, thread, gpr1, gpr2, gpr3
- lis \gpr2, MD_APG_KUAP@h /* only APG0 and APG1 are used */
- mfspr \gpr1, SPRN_MD_AP
- mtspr SPRN_MD_AP, \gpr2
- stw \gpr1, STACK_REGS_KUAP(\sp)
-.endm
-
-.macro kuap_restore sp, current, gpr1, gpr2, gpr3
- lwz \gpr1, STACK_REGS_KUAP(\sp)
- mtspr SPRN_MD_AP, \gpr1
-.endm
-
-.macro kuap_check current, gpr
-#ifdef CONFIG_PPC_KUAP_DEBUG
- mfspr \gpr, SPRN_MD_AP
- rlwinm \gpr, \gpr, 16, 0xffff
-999: twnei \gpr, MD_APG_KUAP@h
- EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
-#endif
-.endm
-
-#else /* !__ASSEMBLY__ */
+#ifndef __ASSEMBLY__
+
+#include <linux/jump_label.h>
#include <asm/reg.h>
-static inline void allow_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+extern struct static_key_false disable_kuap_key;
+
+static __always_inline bool kuap_is_disabled(void)
{
- mtspr(SPRN_MD_AP, MD_APG_INIT);
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
+static inline void __kuap_lock(void)
+{
+}
+
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
+ regs->kuap = mfspr(SPRN_MD_AP);
+ mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
-static inline void prevent_user_access(void __user *to, const void __user *from,
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+}
+
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ mtspr(SPRN_MD_AP, regs->kuap);
+}
+
+static inline unsigned long __kuap_get_and_assert_locked(void)
+{
+ unsigned long kuap;
+
+ kuap = mfspr(SPRN_MD_AP);
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16);
+
+ return kuap;
+}
+
+static inline void __allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir)
{
+ mtspr(SPRN_MD_AP, MD_APG_INIT);
+}
+
+static inline void __prevent_user_access(unsigned long dir)
+{
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
-static inline unsigned long prevent_user_access_return(void)
+static inline unsigned long __prevent_user_access_return(void)
{
- unsigned long flags = mfspr(SPRN_MD_AP);
+ unsigned long flags;
+
+ flags = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
return flags;
}
-static inline void restore_user_access(unsigned long flags)
+static inline void __restore_user_access(unsigned long flags)
{
mtspr(SPRN_MD_AP, flags);
}
static inline bool
-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
- return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000),
- "Bug: fault blocked by AP register !");
+ return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
index 74f4edb5916e..8a8f13a22cf4 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h
@@ -57,7 +57,7 @@
typedef struct {
unsigned int id;
unsigned int active;
- unsigned long vdso_base;
+ void __user *vdso;
} mm_context_t;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
index 28aa3b339c5e..2d92a39d8f2e 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -108,7 +108,7 @@ extern unsigned int tlb_44x_index;
typedef struct {
unsigned int id;
unsigned int active;
- unsigned long vdso_base;
+ void __user *vdso;
} mm_context_t;
/* patch sites */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 76af5b0cb16e..0e93a4728c9e 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -19,7 +19,6 @@
#define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */
#define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
#define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */
-#define MI_RESETVAL 0x00000000 /* Value of register at reset */
/* These are the Ks and Kp from the PowerPC books. For proper operation,
* Ks = 0, Kp = 1.
@@ -34,19 +33,16 @@
* respectively NA for All or X for Supervisor and no access for User.
* Then we use the APG to say whether accesses are according to Page rules or
* "all Supervisor" rules (Access to all)
- * Therefore, we define 2 APG groups. lsb is _PMD_USER
- * 0 => Kernel => 01 (all accesses performed according to page definition)
- * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
+ * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say
+ * "all User" rules, that will lead to NA for all.
+ * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED
+ * 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
+ * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
+ * 2 => User => 11 (all accesses performed according as user iaw page definition)
+ * 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP
+ * 4-15 => Not Used
*/
-#define MI_APG_INIT 0x4fffffff
-
-/*
- * 0 => Kernel => 01 (all accesses performed according to page definition)
- * 1 => User => 10 (all accesses performed according to swaped page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
- */
-#define MI_APG_KUEP 0x6fffffff
+#define MI_APG_INIT 0xde000000
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in
@@ -95,7 +91,6 @@
#define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
#define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
-#define MD_RESETVAL 0x04000000 /* Value of register at reset */
#define SPRN_M_CASID 793 /* Address space ID (context) to match */
#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
@@ -108,25 +103,9 @@
#define MD_Ks 0x80000000 /* Should not be set */
#define MD_Kp 0x40000000 /* Should always be set */
-/*
- * All pages' PP data bits are set to either 000 or 011 or 001, which means
- * respectively RW for Supervisor and no access for User, or RO for
- * Supervisor and no access for user and NA for ALL.
- * Then we use the APG to say whether accesses are according to Page rules or
- * "all Supervisor" rules (Access to all)
- * Therefore, we define 2 APG groups. lsb is _PMD_USER
- * 0 => Kernel => 01 (all accesses performed according to page definition)
- * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
- */
-#define MD_APG_INIT 0x4fffffff
-
-/*
- * 0 => No user => 01 (all accesses performed according to page definition)
- * 1 => User => 10 (all accesses performed according to swaped page definition)
- * 2-16 => NA => 11 (all accesses performed as user iaw page definition)
- */
-#define MD_APG_KUAP 0x6fffffff
+/* See explanation above at the definition of MI_APG_INIT */
+#define MD_APG_INIT 0xdc000000
+#define MD_APG_KUAP 0xde000000
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MD_RPN is written, bits in
@@ -178,12 +157,6 @@
*/
#define SPRN_M_TW 799
-#ifdef CONFIG_PPC_MM_SLICES
-#include <asm/nohash/32/slice.h>
-#define SLICE_ARRAY_SIZE (1 << (32 - SLICE_LOW_SHIFT - 1))
-#define LOW_SLICE_ARRAY_SZ SLICE_ARRAY_SIZE
-#endif
-
#if defined(CONFIG_PPC_4K_PAGES)
#define mmu_virtual_psize MMU_PAGE_4K
#elif defined(CONFIG_PPC_16K_PAGES)
@@ -197,75 +170,23 @@
#define mmu_linear_psize MMU_PAGE_8M
+#define MODULES_VADDR (PAGE_OFFSET - SZ_256M)
+#define MODULES_END PAGE_OFFSET
+
#ifndef __ASSEMBLY__
#include <linux/mmdebug.h>
+#include <linux/sizes.h>
-struct slice_mask {
- u64 low_slices;
- DECLARE_BITMAP(high_slices, 0);
-};
+void mmu_pin_tlb(unsigned long top, bool readonly);
typedef struct {
unsigned int id;
unsigned int active;
- unsigned long vdso_base;
-#ifdef CONFIG_PPC_MM_SLICES
- u16 user_psize; /* page size index */
- unsigned char low_slices_psize[SLICE_ARRAY_SIZE];
- unsigned char high_slices_psize[0];
- unsigned long slb_addr_limit;
- struct slice_mask mask_base_psize; /* 4k or 16k */
- struct slice_mask mask_512k;
- struct slice_mask mask_8m;
-#endif
+ void __user *vdso;
void *pte_frag;
} mm_context_t;
-#ifdef CONFIG_PPC_MM_SLICES
-static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
-{
- return ctx->user_psize;
-}
-
-static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
-{
- ctx->user_psize = user_psize;
-}
-
-static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
-{
- return ctx->low_slices_psize;
-}
-
-static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
-{
- return ctx->high_slices_psize;
-}
-
-static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
-{
- return ctx->slb_addr_limit;
-}
-
-static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
-{
- ctx->slb_addr_limit = limit;
-}
-
-static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
-{
- if (psize == MMU_PAGE_512K)
- return &ctx->mask_512k;
- if (psize == MMU_PAGE_8M)
- return &ctx->mask_8m;
-
- BUG_ON(psize != mmu_virtual_psize);
-
- return &ctx->mask_base_psize;
-}
-#endif /* CONFIG_PPC_MM_SLICE */
-
#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
@@ -303,14 +224,50 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG();
}
-/* patch sites */
-extern s32 patch__itlbmiss_linmem_top, patch__itlbmiss_linmem_top8;
-extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
-extern s32 patch__fixupdar_linmem_top;
-extern s32 patch__dtlbmiss_romem_top, patch__dtlbmiss_romem_top8;
+static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn,
+ unsigned int max_page_shift, unsigned long size)
+{
+ if (end - addr < size)
+ return false;
+
+ if ((1UL << max_page_shift) < size)
+ return false;
+
+ if (!IS_ALIGNED(addr, size))
+ return false;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), size))
+ return false;
-extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2;
-extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3;
+ return true;
+}
+
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K))
+ return SZ_512K;
+ if (PAGE_SIZE == SZ_16K)
+ return SZ_16K;
+ if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K))
+ return SZ_16K;
+ return PAGE_SIZE;
+}
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ if (size >= SZ_512K)
+ return 19;
+ else if (size >= SZ_16K)
+ return 14;
+ else
+ return PAGE_SHIFT;
+}
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+
+/* patch sites */
+extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index 60c4d829152e..0d40b33184eb 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -2,14 +2,12 @@
#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
-#define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h>
#ifndef __ASSEMBLY__
#include <linux/sched.h>
#include <linux/threads.h>
#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */
-#include <asm/asm-405.h>
#ifdef CONFIG_44x
extern int icache_44x_need_flush;
@@ -30,6 +28,8 @@ extern int icache_44x_need_flush;
#define PMD_TABLE_SIZE 0
#define PUD_TABLE_SIZE 0
#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
#endif /* __ASSEMBLY__ */
#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
@@ -54,7 +54,6 @@ extern int icache_44x_need_flush;
#define PGD_MASKED_BITS 0
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_ADDRESS 0UL
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
@@ -65,6 +64,7 @@ extern int icache_44x_need_flush;
#ifndef __ASSEMBLY__
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
#endif /* !__ASSEMBLY__ */
@@ -110,13 +110,13 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#define VMALLOC_OFFSET (0x1000000) /* 16M */
#ifdef PPC_PIN_SIZE
-#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#else
#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
#endif
#ifdef CONFIG_KASAN_VMALLOC
-#define VMALLOC_END _ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
+#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
#define VMALLOC_END ioremap_bot
#endif
@@ -130,10 +130,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#include <asm/nohash/32/pte-40x.h>
#elif defined(CONFIG_44x)
#include <asm/nohash/32/pte-44x.h>
-#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
-#include <asm/nohash/pte-book3e.h>
-#elif defined(CONFIG_FSL_BOOKE)
-#include <asm/nohash/32/pte-fsl-booke.h>
+#elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT)
+#include <asm/nohash/pte-e500.h>
+#elif defined(CONFIG_PPC_85xx)
+#include <asm/nohash/32/pte-85xx.h>
#elif defined(CONFIG_PPC_8xx)
#include <asm/nohash/32/pte-8xx.h>
#endif
@@ -153,8 +153,10 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
*/
#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
#else
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
#endif
/*
@@ -166,7 +168,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#ifndef __ASSEMBLY__
#define pte_clear(mm, addr, ptep) \
- do { pte_update(ptep, ~0, 0); } while (0)
+ do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
#ifndef pte_mkwrite
static inline pte_t pte_mkwrite(pte_t pte)
@@ -192,10 +194,12 @@ static inline pte_t pte_wrprotect(pte_t pte)
}
#endif
+#ifndef pte_mkexec
static inline pte_t pte_mkexec(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_EXEC);
}
+#endif
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
@@ -205,8 +209,6 @@ static inline void pmd_clear(pmd_t *pmdp)
*pmdp = __pmd(0);
}
-
-
/*
* PTE updates. This function is called whenever an existing
* valid PTE is updated. This does -not- include set_pte_at()
@@ -221,66 +223,64 @@ static inline void pmd_clear(pmd_t *pmdp)
* that an executable user mapping was modified, which is needed
* to properly flush the virtually tagged instruction cache of
* those implementations.
+ *
+ * On the 8xx, the page tables are a bit special. For 16k pages, we have
+ * 4 identical entries. For 512k pages, we have 128 entries as if it was
+ * 4k pages, but they are flagged as 512k pages for the hardware.
+ * For other page sizes, we have a single entry in the table.
*/
-#ifndef CONFIG_PTE_64BIT
-static inline unsigned long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
+#ifdef CONFIG_PPC_8xx
+static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
+static int hugepd_ok(hugepd_t hpd);
+
+static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
{
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long old, tmp;
-
- __asm__ __volatile__("\
-1: lwarx %0,0,%3\n\
- andc %1,%0,%4\n\
- or %1,%1,%5\n"
- PPC405_ERR77(0,%3)
-" stwcx. %1,0,%3\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
-#else /* PTE_ATOMIC_UPDATES */
- unsigned long old = pte_val(*p);
- unsigned long new = (old & ~clr) | set;
-
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
- p->pte = p->pte1 = p->pte2 = p->pte3 = new;
-#else
- *p = __pte(new);
-#endif
-#endif /* !PTE_ATOMIC_UPDATES */
+ if (!huge)
+ return PAGE_SIZE / SZ_4K;
+ else if (hugepd_ok(*((hugepd_t *)pmd)))
+ return 1;
+ else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
+ return SZ_16K / SZ_4K;
+ else
+ return SZ_512K / SZ_4K;
+}
+
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
+{
+ pte_basic_t *entry = (pte_basic_t *)p;
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+ int num, i;
+ pmd_t *pmd = pmd_off(mm, addr);
+
+ num = number_of_cells_per_pte(pmd, new, huge);
+
+ for (i = 0; i < num; i++, entry++, new += SZ_4K)
+ *entry = new;
-#ifdef CONFIG_44x
- if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
- icache_44x_need_flush = 1;
-#endif
return old;
}
-#else /* CONFIG_PTE_64BIT */
-static inline unsigned long long pte_update(pte_t *p,
- unsigned long clr,
- unsigned long set)
+
+#ifdef CONFIG_PPC_16K_PAGES
+#define __HAVE_ARCH_PTEP_GET
+static inline pte_t ptep_get(pte_t *ptep)
{
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long long old;
- unsigned long tmp;
-
- __asm__ __volatile__("\
-1: lwarx %L0,0,%4\n\
- lwzx %0,0,%3\n\
- andc %1,%L0,%5\n\
- or %1,%1,%6\n"
- PPC405_ERR77(0,%3)
-" stwcx. %1,0,%4\n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
- : "cc" );
-#else /* PTE_ATOMIC_UPDATES */
- unsigned long long old = pte_val(*p);
- *p = __pte((old & ~(unsigned long long)clr) | set);
-#endif /* !PTE_ATOMIC_UPDATES */
+ pte_basic_t val = READ_ONCE(ptep->pte);
+ pte_t pte = {val, val, val, val};
+
+ return pte;
+}
+#endif /* CONFIG_PPC_16K_PAGES */
+
+#else
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge)
+{
+ pte_basic_t old = pte_val(*p);
+ pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
+
+ *p = __pte(new);
#ifdef CONFIG_44x
if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
@@ -288,58 +288,56 @@ static inline unsigned long long pte_update(pte_t *p,
#endif
return old;
}
-#endif /* CONFIG_PTE_64BIT */
+#endif
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
unsigned long old;
- old = pte_update(ptep, _PAGE_ACCESSED, 0);
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
}
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
- __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- return __pte(pte_update(ptep, ~0, 0));
+ return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+#ifndef ptep_set_wrprotect
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
- unsigned long set = pte_val(pte_wrprotect(__pte(0)));
-
- pte_update(ptep, clr, set);
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
}
+#endif
+#ifndef __ptep_set_access_flags
static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry,
unsigned long address,
int psize)
{
- pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
- pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
- unsigned long set = pte_val(entry) & pte_val(pte_set);
- unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
+ unsigned long set = pte_val(entry) &
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+ int huge = psize > mmu_virtual_psize ? 1 : 0;
- pte_update(ptep, clr, set);
+ pte_update(vma->vm_mm, address, ptep, 0, set, huge);
flush_tlb_page(vma, address);
}
+#endif
static inline int pte_young(pte_t pte)
{
return pte_val(pte) & _PAGE_ACCESSED;
}
-#define __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)
-
/*
* Note that on Book E processors, the pmd contains the kernel virtual
* (lowmem) address of the pte page. The physical address is less useful
@@ -348,35 +346,14 @@ static inline int pte_young(pte_t pte)
* of the pte page. -- paulus
*/
#ifndef CONFIG_BOOKE
-#define pmd_page_vaddr(pmd) \
- ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
-#define pmd_page(pmd) \
- pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT)
#else
#define pmd_page_vaddr(pmd) \
((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
-#define pmd_page(pmd) \
- pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
+#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
#endif
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, addr) \
- (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
- pte_index(addr))
-#define pte_offset_map(dir, addr) \
- ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
- (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
-#define pte_unmap(pte) kunmap_atomic(pte)
-
+#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
index 12c6811e344b..2d3153cfc0d7 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
@@ -44,9 +44,8 @@
#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
#define _PAGE_USER 0x010 /* matches one of the zone permission bits */
#define _PAGE_SPECIAL 0x020 /* software: Special page */
-#define _PAGE_RW 0x040 /* software: Writes permitted */
#define _PAGE_DIRTY 0x080 /* software: dirty page */
-#define _PAGE_HWWRITE 0x100 /* hardware: Dirty & RW, set in exception */
+#define _PAGE_RW 0x100 /* hardware: WR, anded with dirty in exception */
#define _PAGE_EXEC 0x200 /* hardware: EX permission */
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
@@ -58,8 +57,8 @@
#define _PAGE_KERNEL_RO 0
#define _PAGE_KERNEL_ROX _PAGE_EXEC
-#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
-#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
#define _PMD_PRESENT_MASK _PMD_PRESENT
@@ -85,21 +84,5 @@
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
-#ifndef __ASSEMBLY__
-static inline pte_t pte_wrprotect(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE));
-}
-
-#define pte_wrprotect pte_wrprotect
-
-static inline pte_t pte_mkclean(pte_t pte)
-{
- return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
-}
-
-#define pte_mkclean pte_mkclean
-#endif
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h
index 0fc1bd42bb3e..93fb8e11a3f1 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H
-#define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_85xx_H
+#define _ASM_POWERPC_NOHASH_32_PTE_85xx_H
#ifdef __KERNEL__
/* PTE bit definitions for Freescale BookE SW loaded TLB MMU based
@@ -71,4 +71,4 @@
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */
+#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index c9e4b2d90f65..1a89ebdc3acc 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -39,13 +39,15 @@
* into the TLB.
*/
#define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */
-#define _PAGE_SPECIAL 0x0020 /* SW entry */
+#define _PAGE_ACCESSED 0x0020 /* Copied to L1 APG 1 entry in I/DTLB */
#define _PAGE_EXEC 0x0040 /* Copied to PP (bit 21) in ITLB */
-#define _PAGE_ACCESSED 0x0080 /* software: page referenced */
+#define _PAGE_SPECIAL 0x0080 /* SW entry */
#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */
#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
+#define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */
+
/* cache related flags non existing on 8xx */
#define _PAGE_COHERENT 0
#define _PAGE_WRITETHRU 0
@@ -57,11 +59,12 @@
#define _PMD_PRESENT 0x0001
#define _PMD_PRESENT_MASK _PMD_PRESENT
-#define _PMD_BAD 0x0fd0
+#define _PMD_BAD 0x0f90
#define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c
#define _PMD_PAGE_512K 0x0004
-#define _PMD_USER 0x0020 /* APG 1 */
+#define _PMD_ACCESSED 0x0020 /* APG 1 */
+#define _PMD_USER 0x0040 /* APG 2 */
#define _PTE_NONE_MASK 0
@@ -128,10 +131,55 @@ static inline pte_t pte_mkuser(pte_t pte)
static inline pte_t pte_mkhuge(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SPS);
+ return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE);
}
#define pte_mkhuge pte_mkhuge
+
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
+ unsigned long clr, unsigned long set, int huge);
+
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+{
+ pte_update(mm, addr, ptep, 0, _PAGE_RO, 0);
+}
+#define ptep_set_wrprotect ptep_set_wrprotect
+
+static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
+ pte_t entry, unsigned long address, int psize)
+{
+ unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_EXEC);
+ unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+ int huge = psize > mmu_virtual_psize ? 1 : 0;
+
+ pte_update(vma->vm_mm, address, ptep, clr, set, huge);
+
+ flush_tlb_page(vma, address);
+}
+#define __ptep_set_access_flags __ptep_set_access_flags
+
+static inline unsigned long pgd_leaf_size(pgd_t pgd)
+{
+ if (pgd_val(pgd) & _PMD_PAGE_8M)
+ return SZ_8M;
+ return SZ_4M;
+}
+
+#define pgd_leaf_size pgd_leaf_size
+
+static inline unsigned long pte_leaf_size(pte_t pte)
+{
+ pte_basic_t val = pte_val(pte);
+
+ if (val & _PAGE_HUGE)
+ return SZ_512K;
+ if (val & _PAGE_SPS)
+ return SZ_16K;
+ return SZ_4K;
+}
+
+#define pte_leaf_size pte_leaf_size
+
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nohash/32/slice.h b/arch/powerpc/include/asm/nohash/32/slice.h
deleted file mode 100644
index 39eb0154ae2d..000000000000
--- a/arch/powerpc/include/asm/nohash/32/slice.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_32_SLICE_H
-#define _ASM_POWERPC_NOHASH_32_SLICE_H
-
-#ifdef CONFIG_PPC_MM_SLICES
-
-#define SLICE_LOW_SHIFT 26 /* 64 slices */
-#define SLICE_LOW_TOP (0x100000000ull)
-#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
-#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
-
-#define SLICE_HIGH_SHIFT 0
-#define SLICE_NUM_HIGH 0ul
-#define GET_HIGH_SLICE_INDEX(addr) (addr & 0)
-
-#define SLB_ADDR_LIMIT_DEFAULT DEFAULT_MAP_WINDOW
-
-#endif /* CONFIG_PPC_MM_SLICES */
-
-#endif /* _ASM_POWERPC_NOHASH_32_SLICE_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index b9534a793293..e50b211becb3 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -15,7 +15,10 @@ struct vmemmap_backing {
};
extern struct vmemmap_backing *vmemmap_list;
-#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD)
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+ p4d_set(p4d, (unsigned long)pud);
+}
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index c40ec32b8194..10f5cf444d72 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h
@@ -2,7 +2,7 @@
#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
-#include <asm-generic/5level-fixup.h>
+#include <asm-generic/pgtable-nop4d.h>
/*
* Entries per page directory level. The PTE level must use a 64b record
@@ -45,42 +45,42 @@
#define PMD_MASKED_BITS 0
/* Bits to mask out from a PUD to get to the PMD page */
#define PUD_MASKED_BITS 0
-/* Bits to mask out from a PGD to get to the PUD page */
-#define PGD_MASKED_BITS 0
+/* Bits to mask out from a P4D to get to the PUD page */
+#define P4D_MASKED_BITS 0
/*
* 4-level page tables related bits
*/
-#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_bad(pgd) (pgd_val(pgd) == 0)
-#define pgd_present(pgd) (pgd_val(pgd) != 0)
-#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
+#define p4d_none(p4d) (!p4d_val(p4d))
+#define p4d_bad(p4d) (p4d_val(p4d) == 0)
+#define p4d_present(p4d) (p4d_val(p4d) != 0)
#ifndef __ASSEMBLY__
-static inline void pgd_clear(pgd_t *pgdp)
+static inline pud_t *p4d_pgtable(p4d_t p4d)
{
- *pgdp = __pgd(0);
+ return (pud_t *) (p4d_val(p4d) & ~P4D_MASKED_BITS);
}
-static inline pte_t pgd_pte(pgd_t pgd)
+static inline void p4d_clear(p4d_t *p4dp)
{
- return __pte(pgd_val(pgd));
+ *p4dp = __p4d(0);
}
-static inline pgd_t pte_pgd(pte_t pte)
+static inline pte_t p4d_pte(p4d_t p4d)
{
- return __pgd(pte_val(pte));
+ return __pte(p4d_val(p4d));
}
-extern struct page *pgd_page(pgd_t pgd);
-#endif /* !__ASSEMBLY__ */
+static inline p4d_t pte_p4d(pte_t pte)
+{
+ return __p4d(pte_val(pte));
+}
+extern struct page *p4d_page(p4d_t p4d);
-#define pud_offset(pgdp, addr) \
- (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
- (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
+#endif /* !__ASSEMBLY__ */
#define pud_ERROR(e) \
pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 9a33b8bd842d..879e9a6e5a87 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -6,12 +6,12 @@
* the ppc64 non-hashed page table.
*/
+#include <linux/sizes.h>
+
#include <asm/nohash/64/pgtable-4k.h>
#include <asm/barrier.h>
#include <asm/asm-const.h>
-#define FIRST_USER_ADDRESS 0UL
-
/*
* Size of EA range mapped by our pagetables.
*/
@@ -25,7 +25,7 @@
/*
* Define the address range of the kernel non-linear virtual area
*/
-#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
+#define KERN_VIRT_START ASM_CONST(0xc000100000000000)
#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
/*
@@ -38,15 +38,16 @@
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
/*
- * The second half of the kernel virtual space is used for IO mappings,
+ * The third quarter of the kernel virtual space is used for IO mappings,
* it's itself carved into the PIO region (ISA and PHB IO space) and
* the ioremap space
*
* ISA_IO_BASE = KERN_IO_START, 64K reserved area
* PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
- * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to KERN_IO_START + KERN_IO_SIZE
*/
#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
+#define KERN_IO_SIZE (KERN_VIRT_SIZE >> 2)
#define FULL_IO_SIZE 0x80000000ul
#define ISA_IO_BASE (KERN_IO_START)
#define ISA_IO_END (KERN_IO_START + 0x10000ul)
@@ -54,19 +55,8 @@
#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
#define IOREMAP_BASE (PHB_IO_END)
#define IOREMAP_START (ioremap_bot)
-#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
-
-
-/*
- * Region IDs
- */
-#define REGION_SHIFT 60UL
-#define REGION_MASK (0xfUL << REGION_SHIFT)
-#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
-
-#define VMALLOC_REGION_ID (REGION_ID(VMALLOC_START))
-#define KERNEL_REGION_ID (REGION_ID(PAGE_OFFSET))
-#define USER_REGION_ID (0UL)
+#define IOREMAP_END (KERN_IO_START + KERN_IO_SIZE - FIXADDR_SIZE)
+#define FIXADDR_SIZE SZ_32M
/*
* Defines the address of the vmemap area, in its own region on
@@ -80,9 +70,7 @@
/*
* Include the PTE bits definitions
*/
-#include <asm/nohash/pte-book3e.h>
-
-#define _PAGE_SAO 0
+#include <asm/nohash/pte-e500.h>
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
@@ -117,11 +105,6 @@ static inline pte_t pte_wrprotect(pte_t pte)
return __pte(pte_val(pte) & ~_PAGE_RW);
}
-static inline pte_t pte_mkexec(pte_t pte)
-{
- return __pte(pte_val(pte) | _PAGE_EXEC);
-}
-
#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
@@ -146,6 +129,7 @@ static inline pte_t pmd_pte(pmd_t pmd)
#define pmd_present(pmd) (!pmd_none(pmd))
#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
extern struct page *pmd_page(pmd_t pmd);
+#define pmd_pfn(pmd) (page_to_pfn(pmd_page(pmd)))
static inline void pud_set(pud_t *pudp, unsigned long val)
{
@@ -161,7 +145,11 @@ static inline void pud_clear(pud_t *pudp)
#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \
|| (pud_val(pud) & PUD_BAD_BITS))
#define pud_present(pud) (pud_val(pud) != 0)
-#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)(pud_val(pud) & ~PUD_MASKED_BITS);
+}
extern struct page *pud_page(pud_t pud);
@@ -175,35 +163,13 @@ static inline pud_t pte_pud(pte_t pte)
return __pud(pte_val(pte));
}
#define pud_write(pud) pte_write(pud_pte(pud))
-#define pgd_write(pgd) pte_write(pgd_pte(pgd))
+#define p4d_write(pgd) pte_write(p4d_pte(p4d))
-static inline void pgd_set(pgd_t *pgdp, unsigned long val)
+static inline void p4d_set(p4d_t *p4dp, unsigned long val)
{
- *pgdp = __pgd(val);
+ *p4dp = __p4d(val);
}
-/*
- * Find an entry in a page-table-directory. We combine the address region
- * (the high order N bits) and the pgd portion of the address.
- */
-#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
-
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
-
-#define pmd_offset(pudp,addr) \
- (((pmd_t *) pud_page_vaddr(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
-
-#define pte_offset_kernel(dir,addr) \
- (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-
-#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
-
-static inline void pte_unmap(pte_t *pte) { }
-
-/* to find an entry in a kernel page-table-directory */
-/* This now only contains the vmalloc pages */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
/* Atomic PTE updates */
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
@@ -211,22 +177,9 @@ static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long set,
int huge)
{
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long old, tmp;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%3 # pte_update\n\
- andc %1,%0,%4 \n\
- or %1,%1,%6\n\
- stdcx. %1,0,%3 \n\
- bne- 1b"
- : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
- : "r" (ptep), "r" (clr), "m" (*ptep), "r" (set)
- : "cc" );
-#else
unsigned long old = pte_val(*ptep);
*ptep = __pte((old & ~clr) | set);
-#endif
+
/* huge pages use the old page table lock */
if (!huge)
assert_pte_locked(mm, addr);
@@ -310,28 +263,12 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long bits = pte_val(entry) &
(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
-#ifdef PTE_ATOMIC_UPDATES
- unsigned long old, tmp;
-
- __asm__ __volatile__(
- "1: ldarx %0,0,%4\n\
- or %0,%3,%0\n\
- stdcx. %0,0,%4\n\
- bne- 1b"
- :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
- :"r" (bits), "r" (ptep), "m" (*ptep)
- :"cc");
-#else
unsigned long old = pte_val(*ptep);
*ptep = __pte(old | bits);
-#endif
flush_tlb_page(vma, address);
}
-#define __HAVE_ARCH_PTE_SAME
-#define pte_same(A,B) ((pte_val(A) ^ pte_val(B)) == 0)
-
#define pte_ERROR(e) \
pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
#define pmd_ERROR(e) \
@@ -356,11 +293,18 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __swp_entry_to_pte(x) __pte((x).val)
int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
extern int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);
extern void vmemmap_remove_mapping(unsigned long start,
unsigned long page_size);
+void __patch_exception(int exc, unsigned long addr);
+#define patch_exception(exc, name) do { \
+ extern unsigned int name; \
+ __patch_exception((exc), (unsigned long)&name); \
+} while (0)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h b/arch/powerpc/include/asm/nohash/hugetlb-e500.h
index ecd8694cb229..8f04ad20e040 100644
--- a/arch/powerpc/include/asm/nohash/hugetlb-book3e.h
+++ b/arch/powerpc/include/asm/nohash/hugetlb-e500.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H
-#define _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H
+#ifndef _ASM_POWERPC_NOHASH_HUGETLB_E500_H
+#define _ASM_POWERPC_NOHASH_HUGETLB_E500_H
static inline pte_t *hugepd_page(hugepd_t hpd)
{
@@ -30,7 +30,7 @@ void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift)
{
- /* We use the old format for PPC_FSL_BOOK3E */
+ /* We use the old format for PPC_E500 */
*hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
}
@@ -42,4 +42,4 @@ static inline int check_and_get_huge_psize(int shift)
return shift_to_mmu_psize(shift);
}
-#endif /* _ASM_POWERPC_NOHASH_HUGETLB_BOOK3E_H */
+#endif /* _ASM_POWERPC_NOHASH_HUGETLB_E500_H */
diff --git a/arch/powerpc/include/asm/nohash/kup-booke.h b/arch/powerpc/include/asm/nohash/kup-booke.h
new file mode 100644
index 000000000000..49bb41ed0816
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/kup-booke.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KUP_BOOKE_H_
+#define _ASM_POWERPC_KUP_BOOKE_H_
+
+#include <asm/bug.h>
+
+#ifdef CONFIG_PPC_KUAP
+
+#ifdef __ASSEMBLY__
+
+.macro kuap_check_amr gpr1, gpr2
+.endm
+
+#else
+
+#include <linux/jump_label.h>
+#include <linux/sched.h>
+
+#include <asm/reg.h>
+
+extern struct static_key_false disable_kuap_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
+static inline void __kuap_lock(void)
+{
+ mtspr(SPRN_PID, 0);
+ isync();
+}
+
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
+ regs->kuap = mfspr(SPRN_PID);
+ mtspr(SPRN_PID, 0);
+ isync();
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+ if (kuap_is_disabled())
+ return;
+
+ mtspr(SPRN_PID, current->thread.pid);
+
+ /* Context synchronisation is performed by rfi */
+}
+
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ if (regs->kuap)
+ mtspr(SPRN_PID, current->thread.pid);
+
+ /* Context synchronisation is performed by rfi */
+}
+
+static inline unsigned long __kuap_get_and_assert_locked(void)
+{
+ unsigned long kuap = mfspr(SPRN_PID);
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ WARN_ON_ONCE(kuap);
+
+ return kuap;
+}
+
+static inline void __allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{
+ mtspr(SPRN_PID, current->thread.pid);
+ isync();
+}
+
+static inline void __prevent_user_access(unsigned long dir)
+{
+ mtspr(SPRN_PID, 0);
+ isync();
+}
+
+static inline unsigned long __prevent_user_access_return(void)
+{
+ unsigned long flags = mfspr(SPRN_PID);
+
+ mtspr(SPRN_PID, 0);
+ isync();
+
+ return flags;
+}
+
+static inline void __restore_user_access(unsigned long flags)
+{
+ if (flags) {
+ mtspr(SPRN_PID, current->thread.pid);
+ isync();
+ }
+}
+
+static inline bool
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ return !regs->kuap;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_PPC_KUAP */
+
+#endif /* _ASM_POWERPC_KUP_BOOKE_H_ */
diff --git a/arch/powerpc/include/asm/nohash/mmu-book3e.h b/arch/powerpc/include/asm/nohash/mmu-e500.h
index b41004664312..e43a418d3ccd 100644
--- a/arch/powerpc/include/asm/nohash/mmu-book3e.h
+++ b/arch/powerpc/include/asm/nohash/mmu-e500.h
@@ -238,7 +238,7 @@ extern unsigned int tlbcam_index;
typedef struct {
unsigned int id;
unsigned int active;
- unsigned long vdso_base;
+ void __user *vdso;
} mm_context_t;
/* Page size definitions, common between 32 and 64-bit
diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h
index edc793e5f08f..e264be219fdb 100644
--- a/arch/powerpc/include/asm/nohash/mmu.h
+++ b/arch/powerpc/include/asm/nohash/mmu.h
@@ -8,9 +8,9 @@
#elif defined(CONFIG_44x)
/* 44x-style software loaded TLB */
#include <asm/nohash/32/mmu-44x.h>
-#elif defined(CONFIG_PPC_BOOK3E_MMU)
+#elif defined(CONFIG_PPC_E500)
/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */
-#include <asm/nohash/mmu-book3e.h>
+#include <asm/nohash/mmu-e500.h>
#elif defined (CONFIG_PPC_8xx)
/* Motorola/Freescale 8xx software loaded TLB */
#include <asm/nohash/32/mmu-8xx.h>
diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h
index 29c43665a753..4b62376318e1 100644
--- a/arch/powerpc/include/asm/nohash/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/pgalloc.h
@@ -15,7 +15,7 @@ static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
{
}
-#endif /* !CONFIG_PPC_BOOK3E */
+#endif /* !CONFIG_PPC_BOOK3E_64 */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 7fed9dc0f147..d9067dfc531c 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -11,31 +11,11 @@
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
-#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
- _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
-/*
- * Protection used for kernel text. We want the debuggers to be able to
- * set breakpoints anywhere, so don't write protect the kernel text
- * on platforms where such control is possible.
- */
-#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
- defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
-#else
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
-#endif
-
-/* Make modules code happy. We don't set RO yet */
-#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
-
-/* Advertise special mapping type for AGP */
-#define PAGE_AGP (PAGE_KERNEL_NC)
-#define HAVE_PAGE_AGP
-
#ifndef __ASSEMBLY__
/* Generic accessors to PTE bits */
@@ -56,7 +36,7 @@ static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
#ifdef CONFIG_NUMA_BALANCING
/*
* These work without NUMA balancing but the kernel does not care. See the
- * comment in include/asm-generic/pgtable.h . On powerpc, this will only
+ * comment in include/linux/pgtable.h . On powerpc, this will only
* work for user pages and always return true for kernel pages.
*/
static inline int pte_protnone(pte_t pte)
@@ -130,23 +110,16 @@ static inline pte_t pte_exprotect(pte_t pte)
return __pte(pte_val(pte) & ~_PAGE_EXEC);
}
-#ifndef pte_mkclean
static inline pte_t pte_mkclean(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
}
-#endif
static inline pte_t pte_mkold(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
}
-static inline pte_t pte_mkpte(pte_t pte)
-{
- return pte;
-}
-
static inline pte_t pte_mkspecial(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_SPECIAL);
@@ -199,9 +172,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
*/
if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
__asm__ __volatile__("\
- stw%U0%X0 %2,%0\n\
- eieio\n\
- stw%U0%X0 %L2,%1"
+ stw%X0 %2,%0\n\
+ mbar\n\
+ stw%X1 %L2,%1"
: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
: "r" (pte) : "memory");
return;
@@ -267,7 +240,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
static inline int hugepd_ok(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd_val(hpd) & 0x4) != 0);
+ return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M);
#else
/* We clear the top bit to indicate hugepd */
return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
@@ -284,12 +257,6 @@ static inline int pud_huge(pud_t pud)
return 0;
}
-static inline int pgd_huge(pgd_t pgd)
-{
- return 0;
-}
-#define pgd_huge pgd_huge
-
#define is_hugepd(hpd) (hugepd_ok(hpd))
#endif
@@ -299,7 +266,7 @@ static inline int pgd_huge(pgd_t pgd)
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
*/
-#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_HUGETLB_PAGE)
+#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE)
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
#else
static inline
diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-e500.h
index 813918f40765..0934e8965e4e 100644
--- a/arch/powerpc/include/asm/nohash/pte-book3e.h
+++ b/arch/powerpc/include/asm/nohash/pte-e500.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
-#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
+#ifndef _ASM_POWERPC_NOHASH_PTE_E500_H
+#define _ASM_POWERPC_NOHASH_PTE_E500_H
#ifdef __KERNEL__
/* PTE bit definitions for processors compliant to the Book3E
@@ -48,7 +48,7 @@
#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */
/* "Higher level" linux bit combinations */
-#define _PAGE_EXEC _PAGE_BAP_UX /* .. and was cache cleaned */
+#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */
#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */
#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY)
#define _PAGE_KERNEL_RO (_PAGE_BAP_SR)
@@ -93,11 +93,11 @@
/* Permission masks used to generate the __P and __S table */
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX)
#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX)
#ifndef __ASSEMBLY__
static inline pte_t pte_mkprivileged(pte_t pte)
@@ -113,7 +113,17 @@ static inline pte_t pte_mkuser(pte_t pte)
}
#define pte_mkuser pte_mkuser
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ if (pte_val(pte) & _PAGE_BAP_UR)
+ return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX);
+ else
+ return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX);
+}
+#define pte_mkexec pte_mkexec
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */
+#endif /* _ASM_POWERPC_NOHASH_PTE_E500_H */
diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h
index b1d8fec29169..bdaf34ad41ea 100644
--- a/arch/powerpc/include/asm/nohash/tlbflush.h
+++ b/arch/powerpc/include/asm/nohash/tlbflush.h
@@ -10,7 +10,6 @@
* - local_flush_tlb_mm(mm, full) flushes the specified mm context on
* the local processor
* - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
- * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
*
@@ -19,7 +18,7 @@
/*
* TLB flushing for software loaded TLB chips
*
- * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range &
+ * TODO: (CONFIG_PPC_85xx) determine if flush_tlb_range &
* flush_tlb_kernel_range are best implemented as tlbia vs
* specific tlbie's
*/
@@ -31,13 +30,38 @@ struct mm_struct;
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
-extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
+#ifdef CONFIG_PPC_8xx
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ unsigned int pid = READ_ONCE(mm->context.id);
+
+ if (pid != MMU_NO_CONTEXT)
+ asm volatile ("sync; tlbia; isync" : : : "memory");
+}
+
+static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory");
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ start &= PAGE_MASK;
+
+ if (end - start <= PAGE_SIZE)
+ asm volatile ("tlbie %0; sync" : : "r" (start) : "memory");
+ else
+ asm volatile ("sync; tlbia; isync" : : : "memory");
+}
+#else
+extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
extern void local_flush_tlb_mm(struct mm_struct *mm);
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
int tsize, int ind);
+#endif
#ifdef CONFIG_SMP
extern void flush_tlb_mm(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index c1f25a760eb1..a2bc4b95e703 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -1067,6 +1067,7 @@ enum {
OPAL_REBOOT_PLATFORM_ERROR = 1,
OPAL_REBOOT_FULL_IPL = 2,
OPAL_REBOOT_MPIPL = 3,
+ OPAL_REBOOT_FAST = 4,
};
/* Argument to OPAL_PCI_TCE_KILL */
@@ -1090,9 +1091,10 @@ enum {
OPAL_XIVE_IRQ_TRIGGER_PAGE = 0x00000001,
OPAL_XIVE_IRQ_STORE_EOI = 0x00000002,
OPAL_XIVE_IRQ_LSI = 0x00000004,
- OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008,
- OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010,
- OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020,
+ OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, /* P9 DD1.0 workaround */
+ OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, /* P9 DD1.0 workaround */
+ OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, /* P9 DD1.0 workaround */
+ OPAL_XIVE_IRQ_STORE_EOI2 = 0x00000040,
};
/* Flags for OPAL_XIVE_GET/SET_QUEUE_INFO */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9986ac34b8e2..726125a534de 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -28,9 +28,6 @@ extern struct device_node *opal_node;
/* API functions */
int64_t opal_invalid_call(void);
-int64_t opal_npu_destroy_context(uint64_t phb_id, uint64_t pid, uint64_t bdf);
-int64_t opal_npu_init_context(uint64_t phb_id, int pasid, uint64_t msr,
- uint64_t bdf);
int64_t opal_npu_map_lpar(uint64_t phb_id, uint64_t bdf, uint64_t lparid,
uint64_t lpcr);
int64_t opal_npu_spa_setup(uint64_t phb_id, uint32_t bdfn,
@@ -307,7 +304,7 @@ int opal_secvar_enqueue_update(const char *key, uint64_t key_len, u8 *data,
s64 opal_mpipl_update(enum opal_mpipl_ops op, u64 src, u64 dest, u64 size);
s64 opal_mpipl_register_tag(enum opal_mpipl_tags tag, u64 addr);
-s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, u64 *addr);
+s64 opal_mpipl_query_tag(enum opal_mpipl_tags tag, __be64 *addr);
s64 opal_signal_system_reset(s32 cpu);
s64 opal_quiesce(u64 shutdown_type, s32 cpu);
@@ -317,7 +314,7 @@ extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data);
-extern void opal_configure_cores(void);
+void __init opal_configure_cores(void);
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
@@ -327,16 +324,10 @@ extern int opal_flush_console(uint32_t vtermno);
extern void hvc_opal_init_early(void);
-extern int opal_notifier_register(struct notifier_block *nb);
-extern int opal_notifier_unregister(struct notifier_block *nb);
-
extern int opal_message_notifier_register(enum opal_msg_type msg_type,
struct notifier_block *nb);
extern int opal_message_notifier_unregister(enum opal_msg_type msg_type,
struct notifier_block *nb);
-extern void opal_notifier_enable(void);
-extern void opal_notifier_disable(void);
-extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
extern int opal_async_get_token_interruptible(void);
extern int opal_async_release_token(int token);
diff --git a/arch/powerpc/include/asm/oprofile_impl.h b/arch/powerpc/include/asm/oprofile_impl.h
deleted file mode 100644
index 2a166c297f97..000000000000
--- a/arch/powerpc/include/asm/oprofile_impl.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Based on alpha version.
- */
-
-#ifndef _ASM_POWERPC_OPROFILE_IMPL_H
-#define _ASM_POWERPC_OPROFILE_IMPL_H
-#ifdef __KERNEL__
-
-#define OP_MAX_COUNTER 8
-
-/* Per-counter configuration as set via oprofilefs. */
-struct op_counter_config {
- unsigned long enabled;
- unsigned long event;
- unsigned long count;
- /* Classic doesn't support per-counter user/kernel selection */
- unsigned long kernel;
- unsigned long user;
- unsigned long unit_mask;
-};
-
-/* System-wide configuration as set via oprofilefs. */
-struct op_system_config {
-#ifdef CONFIG_PPC64
- unsigned long mmcr0;
- unsigned long mmcr1;
- unsigned long mmcra;
-#ifdef CONFIG_OPROFILE_CELL
- /* Register for oprofile user tool to check cell kernel profiling
- * support.
- */
- unsigned long cell_support;
-#endif
-#endif
- unsigned long enable_kernel;
- unsigned long enable_user;
-};
-
-/* Per-arch configuration */
-struct op_powerpc_model {
- int (*reg_setup) (struct op_counter_config *,
- struct op_system_config *,
- int num_counters);
- int (*cpu_setup) (struct op_counter_config *);
- int (*start) (struct op_counter_config *);
- int (*global_start) (struct op_counter_config *);
- void (*stop) (void);
- void (*global_stop) (void);
- int (*sync_start)(void);
- int (*sync_stop)(void);
- void (*handle_interrupt) (struct pt_regs *,
- struct op_counter_config *);
- int num_counters;
-};
-
-extern struct op_powerpc_model op_model_fsl_emb;
-extern struct op_powerpc_model op_model_power4;
-extern struct op_powerpc_model op_model_7450;
-extern struct op_powerpc_model op_model_cell;
-extern struct op_powerpc_model op_model_pa6t;
-
-
-/* All the classic PPC parts use these */
-static inline unsigned int classic_ctr_read(unsigned int i)
-{
- switch(i) {
- case 0:
- return mfspr(SPRN_PMC1);
- case 1:
- return mfspr(SPRN_PMC2);
- case 2:
- return mfspr(SPRN_PMC3);
- case 3:
- return mfspr(SPRN_PMC4);
- case 4:
- return mfspr(SPRN_PMC5);
- case 5:
- return mfspr(SPRN_PMC6);
-
-/* No PPC32 chip has more than 6 so far */
-#ifdef CONFIG_PPC64
- case 6:
- return mfspr(SPRN_PMC7);
- case 7:
- return mfspr(SPRN_PMC8);
-#endif
- default:
- return 0;
- }
-}
-
-static inline void classic_ctr_write(unsigned int i, unsigned int val)
-{
- switch(i) {
- case 0:
- mtspr(SPRN_PMC1, val);
- break;
- case 1:
- mtspr(SPRN_PMC2, val);
- break;
- case 2:
- mtspr(SPRN_PMC3, val);
- break;
- case 3:
- mtspr(SPRN_PMC4, val);
- break;
- case 4:
- mtspr(SPRN_PMC5, val);
- break;
- case 5:
- mtspr(SPRN_PMC6, val);
- break;
-
-/* No PPC32 chip has more than 6, yet */
-#ifdef CONFIG_PPC64
- case 6:
- mtspr(SPRN_PMC7, val);
- break;
- case 7:
- mtspr(SPRN_PMC8, val);
- break;
-#endif
- default:
- break;
- }
-}
-
-
-extern void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth);
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_OPROFILE_IMPL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e3cc9eb9204d..09f1790d0ae1 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -12,12 +12,13 @@
#ifdef CONFIG_PPC64
+#include <linux/cache.h>
#include <linux/string.h>
#include <asm/types.h>
#include <asm/lppaca.h>
#include <asm/mmu.h>
#include <asm/page.h>
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
#include <asm/exception-64e.h>
#else
#include <asm/exception-64s.h>
@@ -29,6 +30,7 @@
#include <asm/hmi.h>
#include <asm/cpuidle.h>
#include <asm/atomic.h>
+#include <asm/mce.h>
#include <asm-generic/mmiowb_types.h>
@@ -52,6 +54,7 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
struct task_struct;
+struct rtas_args;
/*
* Defines the layout of the paca.
@@ -95,7 +98,9 @@ struct paca_struct {
/* this becomes non-zero. */
u8 kexec_state; /* set when kexec down has irqs off */
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
struct slb_shadow *slb_shadow_ptr;
+#endif
struct dtl_entry *dispatch_log;
struct dtl_entry *dispatch_log_end;
#endif
@@ -107,8 +112,8 @@ struct paca_struct {
*/
/* used for most interrupts/exceptions */
u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
- u64 exslb[EX_SIZE]; /* used for SLB/segment table misses
- * on the linear mapping */
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
/* SLB related definitions */
u16 vmalloc_sllp;
u8 slb_cache_ptr;
@@ -119,9 +124,10 @@ struct paca_struct {
u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */
u32 slb_kern_bitmap;
u32 slb_cache[SLB_CACHE_ENTRIES];
+#endif
#endif /* CONFIG_PPC_BOOK3S_64 */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
u64 exgen[8] __aligned(0x40);
/* Keep pgd in the same cacheline as the start of extlb */
pgd_t *pgd __aligned(0x40); /* Current PGD */
@@ -145,18 +151,11 @@ struct paca_struct {
void *dbg_kstack;
struct tlb_core_data tcd;
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_BOOK3E_64 */
-#ifdef CONFIG_PPC_BOOK3S
- mm_context_id_t mm_ctx_id;
-#ifdef CONFIG_PPC_MM_SLICES
+#ifdef CONFIG_PPC_64S_HASH_MMU
unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
- unsigned long mm_ctx_slb_addr_limit;
-#else
- u16 mm_ctx_user_psize;
- u16 mm_ctx_sllp;
-#endif
#endif
/*
@@ -166,9 +165,16 @@ struct paca_struct {
u64 kstack; /* Saved Kernel stack addr */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC64
+ u64 exit_save_r1; /* Syscall/interrupt R1 save */
+#endif
+#ifdef CONFIG_PPC_BOOK3E_64
u16 trap_save; /* Used when bad stack is encountered */
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ u8 hsrr_valid; /* HSRRs set for HRFID */
+ u8 srr_valid; /* SRRs set for RFID */
+#endif
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
@@ -224,6 +230,7 @@ struct paca_struct {
u16 in_mce;
u8 hmi_event_available; /* HMI event is available */
u8 hmi_p9_special_emu; /* HMI P9 special emulation */
+ u32 hmi_irqs; /* HMI irq stat */
#endif
u8 ftrace_enabled; /* Hard disable ftrace */
@@ -260,9 +267,11 @@ struct paca_struct {
#endif /* CONFIG_PPC_PSERIES */
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
/* Capture SLB related old contents in MCE handler. */
struct slb_entry *mce_faulty_slbs;
u16 slb_save_cache_ptr;
+#endif
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_STACKPROTECTOR
unsigned long canary;
@@ -270,6 +279,10 @@ struct paca_struct {
#ifdef CONFIG_MMIOWB
struct mmiowb_state mmiowb_state;
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct mce_info *mce_info;
+ u8 mce_pending_irq_work;
+#endif /* CONFIG_PPC_BOOK3S_64 */
} ____cacheline_aligned;
extern void copy_mm_to_paca(struct mm_struct *mm);
@@ -282,9 +295,9 @@ extern void free_unused_pacas(void);
#else /* CONFIG_PPC64 */
-static inline void allocate_paca_ptrs(void) { };
-static inline void allocate_paca(int cpu) { };
-static inline void free_unused_pacas(void) { };
+static inline void allocate_paca_ptrs(void) { }
+static inline void allocate_paca(int cpu) { }
+static inline void free_unused_pacas(void) { }
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 080a0bf8e54b..edf1dd1b0ca9 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -31,7 +31,7 @@ extern unsigned int hpage_shift;
#define HPAGE_SHIFT hpage_shift
#elif defined(CONFIG_PPC_8xx)
#define HPAGE_SHIFT 19 /* 512k pages */
-#elif defined(CONFIG_PPC_FSL_BOOK3E)
+#elif defined(CONFIG_PPC_E500)
#define HPAGE_SHIFT 22 /* 4M pages */
#endif
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
@@ -132,7 +132,11 @@ static inline bool pfn_valid(unsigned long pfn)
#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
-#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
+ pfn_valid(virt_to_pfn(_addr)); \
+})
/*
* On Book-E parts we need __va to parse the device tree and we can't
@@ -212,6 +216,9 @@ static inline bool pfn_valid(unsigned long pfn)
#define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
#ifdef CONFIG_PPC64
+
+#define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
+
/*
* gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
* with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
@@ -219,13 +226,13 @@ static inline bool pfn_valid(unsigned long pfn)
*/
#define __va(x) \
({ \
- VIRTUAL_BUG_ON((unsigned long)(x) >= PAGE_OFFSET); \
+ VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \
(void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
})
#define __pa(x) \
({ \
- VIRTUAL_BUG_ON((unsigned long)(x) < PAGE_OFFSET); \
+ VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \
(unsigned long)(x) & 0x0fffffffffffffffUL; \
})
@@ -240,13 +247,8 @@ static inline bool pfn_valid(unsigned long pfn)
* and needs to be executable. This means the whole heap ends
* up being executable.
*/
-#define VM_DATA_DEFAULT_FLAGS32 \
- (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
- VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC
+#define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
#ifdef __powerpc64__
#include <asm/page_64.h>
@@ -254,21 +256,16 @@ static inline bool pfn_valid(unsigned long pfn)
#include <asm/page_32.h>
#endif
-/* align addr on a size boundary - adjust address up/down if needed */
-#define _ALIGN_UP(addr, size) __ALIGN_KERNEL(addr, size)
-#define _ALIGN_DOWN(addr, size) ((addr)&(~((typeof(addr))(size)-1)))
-
-/* align addr on a size boundary - adjust address up if needed */
-#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
-
/*
* Don't compare things with KERNELBASE or PAGE_OFFSET to test for
* "kernelness", use is_kernel_addr() - it should do what you want.
*/
#ifdef CONFIG_PPC_BOOK3E_64
#define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
-#else
+#elif defined(CONFIG_PPC_BOOK3S_64)
#define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
+#else
+#define is_kernel_addr(x) ((x) >= TASK_SIZE)
#endif
#ifndef CONFIG_PPC_BOOK3S_64
@@ -311,12 +308,6 @@ static inline bool pfn_valid(unsigned long pfn)
#include <asm/pgtable-types.h>
#endif
-
-#ifndef CONFIG_HUGETLB_PAGE
-#define is_hugepd(pdep) (0)
-#define pgd_huge(pgd) (0)
-#endif /* CONFIG_HUGETLB_PAGE */
-
struct page;
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
@@ -339,6 +330,5 @@ static inline unsigned long kaslr_offset(void)
#include <asm-generic/memory_model.h>
#endif /* __ASSEMBLY__ */
-#include <asm/slice.h>
#endif /* _ASM_POWERPC_PAGE_H */
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index d64dfe3ac712..56f217606327 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -16,12 +16,6 @@
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif
-#ifdef CONFIG_PTE_64BIT
-#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */
-#else
-#define PTE_FLAGS_OFFSET 0
-#endif
-
#if defined(CONFIG_PPC_256K_PAGES) || \
(defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2 - 2) /* 1/4 of a page */
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
index 5962797f784a..79a9b7c6a132 100644
--- a/arch/powerpc/include/asm/page_64.h
+++ b/arch/powerpc/include/asm/page_64.h
@@ -94,11 +94,8 @@ extern u64 ppc64_pft_size;
* stack by default, so in the absence of a PT_GNU_STACK program header
* we turn execute permission off.
*/
-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_STACK_DEFAULT_FLAGS32 VM_DATA_FLAGS_EXEC
+#define VM_STACK_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
#define VM_STACK_DEFAULT_FLAGS \
(is_32bit_task() ? \
diff --git a/arch/powerpc/include/asm/paravirt.h b/arch/powerpc/include/asm/paravirt.h
new file mode 100644
index 000000000000..f5ba1a3c41f8
--- /dev/null
+++ b/arch/powerpc/include/asm/paravirt.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_PARAVIRT_H
+#define _ASM_POWERPC_PARAVIRT_H
+
+#include <linux/jump_label.h>
+#include <asm/smp.h>
+#ifdef CONFIG_PPC64
+#include <asm/paca.h>
+#include <asm/hvcall.h>
+#endif
+
+#ifdef CONFIG_PPC_SPLPAR
+#include <linux/smp.h>
+#include <asm/kvm_guest.h>
+#include <asm/cputhreads.h>
+
+DECLARE_STATIC_KEY_FALSE(shared_processor);
+
+static inline bool is_shared_processor(void)
+{
+ return static_branch_unlikely(&shared_processor);
+}
+
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
+
+u64 pseries_paravirt_steal_clock(int cpu);
+
+static inline u64 paravirt_steal_clock(int cpu)
+{
+ return pseries_paravirt_steal_clock(cpu);
+}
+#endif
+
+/* If bit 0 is set, the cpu has been ceded, conferred, or preempted */
+static inline u32 yield_count_of(int cpu)
+{
+ __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
+ return be32_to_cpu(yield_count);
+}
+
+/*
+ * Spinlock code confers and prods, so don't trace the hcalls because the
+ * tracing code takes spinlocks which can cause recursion deadlocks.
+ *
+ * These calls are made while the lock is not held: the lock slowpath yields if
+ * it can not acquire the lock, and unlock slow path might prod if a waiter has
+ * yielded). So this may not be a problem for simple spin locks because the
+ * tracing does not technically recurse on the lock, but we avoid it anyway.
+ *
+ * However the queued spin lock contended path is more strictly ordered: the
+ * H_CONFER hcall is made after the task has queued itself on the lock, so then
+ * recursing on that lock will cause the task to then queue up again behind the
+ * first instance (or worse: queued spinlocks use tricks that assume a context
+ * never waits on more than one spinlock, so such recursion may cause random
+ * corruption in the lock code).
+ */
+static inline void yield_to_preempted(int cpu, u32 yield_count)
+{
+ plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
+}
+
+static inline void prod_cpu(int cpu)
+{
+ plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
+}
+
+static inline void yield_to_any(void)
+{
+ plpar_hcall_norets_notrace(H_CONFER, -1, 0);
+}
+#else
+static inline bool is_shared_processor(void)
+{
+ return false;
+}
+
+static inline u32 yield_count_of(int cpu)
+{
+ return 0;
+}
+
+extern void ___bad_yield_to_preempted(void);
+static inline void yield_to_preempted(int cpu, u32 yield_count)
+{
+ ___bad_yield_to_preempted(); /* This would be a bug */
+}
+
+extern void ___bad_yield_to_any(void);
+static inline void yield_to_any(void)
+{
+ ___bad_yield_to_any(); /* This would be a bug */
+}
+
+extern void ___bad_prod_cpu(void);
+static inline void prod_cpu(int cpu)
+{
+ ___bad_prod_cpu(); /* This would be a bug */
+}
+
+#endif
+
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+ /*
+ * The dispatch/yield bit alone is an imperfect indicator of
+ * whether the hypervisor has dispatched @cpu to run on a physical
+ * processor. When it is clear, @cpu is definitely not preempted.
+ * But when it is set, it means only that it *might* be, subject to
+ * other conditions. So we check other properties of the VM and
+ * @cpu first, resorting to the yield count last.
+ */
+
+ /*
+ * Hypervisor preemption isn't possible in dedicated processor
+ * mode by definition.
+ */
+ if (!is_shared_processor())
+ return false;
+
+#ifdef CONFIG_PPC_SPLPAR
+ if (!is_kvm_guest()) {
+ int first_cpu;
+
+ /*
+ * The result of vcpu_is_preempted() is used in a
+ * speculative way, and is always subject to invalidation
+ * by events internal and external to Linux. While we can
+ * be called in preemptable context (in the Linux sense),
+ * we're not accessing per-cpu resources in a way that can
+ * race destructively with Linux scheduler preemption and
+ * migration, and callers can tolerate the potential for
+ * error introduced by sampling the CPU index without
+ * pinning the task to it. So it is permissible to use
+ * raw_smp_processor_id() here to defeat the preempt debug
+ * warnings that can arise from using smp_processor_id()
+ * in arbitrary contexts.
+ */
+ first_cpu = cpu_first_thread_sibling(raw_smp_processor_id());
+
+ /*
+ * The PowerVM hypervisor dispatches VMs on a whole core
+ * basis. So we know that a thread sibling of the local CPU
+ * cannot have been preempted by the hypervisor, even if it
+ * has called H_CONFER, which will set the yield bit.
+ */
+ if (cpu_first_thread_sibling(cpu) == first_cpu)
+ return false;
+ }
+#endif
+
+ if (yield_count_of(cpu) & 1)
+ return true;
+ return false;
+}
+
+static inline bool pv_is_native_spin_unlock(void)
+{
+ return !is_shared_processor();
+}
+
+#endif /* _ASM_POWERPC_PARAVIRT_H */
diff --git a/arch/powerpc/include/asm/paravirt_api_clock.h b/arch/powerpc/include/asm/paravirt_api_clock.h
new file mode 100644
index 000000000000..d25ca7ac57c7
--- /dev/null
+++ b/arch/powerpc/include/asm/paravirt_api_clock.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/paravirt.h>
diff --git a/arch/powerpc/include/asm/parport.h b/arch/powerpc/include/asm/parport.h
index 8abfb8f7c33d..42cc321ed754 100644
--- a/arch/powerpc/include/asm/parport.h
+++ b/arch/powerpc/include/asm/parport.h
@@ -11,7 +11,7 @@
#define _ASM_POWERPC_PARPORT_H
#ifdef __KERNEL__
-#include <asm/prom.h>
+#include <linux/of_irq.h>
static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
{
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 69f4cb3b7c56..e18c95f4e1d4 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -66,7 +66,7 @@ struct pci_controller {
void __iomem *io_base_virt;
#ifdef CONFIG_PPC64
- void *io_base_alloc;
+ void __iomem *io_base_alloc;
#endif
resource_size_t io_base_phys;
resource_size_t pci_io_size;
@@ -126,7 +126,11 @@ struct pci_controller {
#endif /* CONFIG_PPC64 */
void *private_data;
- struct npu *npu;
+
+ /* IRQ domain hierarchy */
+ struct irq_domain *dev_domain;
+ struct irq_domain *msi_domain;
+ struct fwnode_handle *fwnode;
};
/* These are used for config access before all the PCI probing
@@ -166,11 +170,15 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
return bus->sysdata;
}
-#ifndef CONFIG_PPC64
-
+#ifdef CONFIG_PPC_PMAC
extern int pci_device_from_OF_node(struct device_node *node,
u8 *bus, u8 *devfn);
+#endif
+#ifndef CONFIG_PPC64
+
+#ifdef CONFIG_PPC_CHRP
extern void pci_create_OF_bus_map(void);
+#endif
#else /* CONFIG_PPC64 */
@@ -202,7 +210,6 @@ struct pci_dn {
#define IODA_INVALID_PE 0xFFFFFFFF
unsigned int pe_number;
#ifdef CONFIG_PCI_IOV
- int vf_index; /* VF index in the PF */
u16 vfs_expanded; /* number of VFs IOV BAR expanded */
u16 num_vfs; /* number of VFs enabled*/
unsigned int *pe_num_map; /* PE# for the first VF PE or array */
@@ -232,16 +239,6 @@ struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev);
void remove_sriov_vf_pdns(struct pci_dev *pdev);
#endif
-static inline int pci_device_from_OF_node(struct device_node *np,
- u8 *bus, u8 *devfn)
-{
- if (!PCI_DN(np))
- return -ENODEV;
- *bus = PCI_DN(np)->busno;
- *devfn = PCI_DN(np)->devfn;
- return 0;
-}
-
#if defined(CONFIG_EEH)
static inline struct eeh_dev *pdn_to_eeh_dev(struct pci_dn *pdn)
{
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 63ed7e3b0ba3..289f1ec85bc5 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -9,12 +9,11 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/string.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/scatterlist.h>
#include <asm/machdep.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
/* Return values for pci_controller_ops.probe_mode function */
@@ -39,7 +38,6 @@
#define pcibios_assign_all_busses() \
(pci_has_flag(PCI_REASSIGN_ALL_BUS))
-#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
if (ppc_md.pci_get_legacy_ide_irq)
@@ -48,7 +46,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#ifdef CONFIG_PCI
-extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
+void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops);
#else /* CONFIG_PCI */
#define set_pci_dma_ops(d)
#endif
@@ -119,11 +117,4 @@ extern void pcibios_scan_phb(struct pci_controller *hose);
#endif /* __KERNEL__ */
-extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev);
-extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index);
-extern int pnv_npu2_init(struct pci_controller *hose);
-extern int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
- unsigned long msr);
-extern int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev);
-
#endif /* __ASM_POWERPC_PCI_H */
diff --git a/arch/powerpc/include/asm/percpu.h b/arch/powerpc/include/asm/percpu.h
index dce863a7635c..8e5b7d0b851c 100644
--- a/arch/powerpc/include/asm/percpu.h
+++ b/arch/powerpc/include/asm/percpu.h
@@ -10,8 +10,6 @@
#ifdef CONFIG_SMP
-#include <asm/paca.h>
-
#define __my_cpu_offset local_paca->data_offset
#endif /* CONFIG_SMP */
@@ -19,4 +17,6 @@
#include <asm-generic/percpu.h>
+#include <asm/paca.h>
+
#endif /* _ASM_POWERPC_PERCPU_H_ */
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 7426d7a90e1e..164e910bf654 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -12,6 +12,9 @@
#ifdef CONFIG_PPC_PERF_CTRS
#include <asm/perf_event_server.h>
+#else
+static inline bool is_sier_available(void) { return false; }
+static inline unsigned long get_pmcs_ext_regs(int idx) { return 0; }
#endif
#ifdef CONFIG_FSL_EMB_PERF_EVENT
@@ -32,10 +35,14 @@
do { \
(regs)->result = 0; \
(regs)->nip = __ip; \
- (regs)->gpr[1] = current_stack_pointer(); \
+ (regs)->gpr[1] = current_stack_frame(); \
asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \
} while (0)
/* To support perf_regs sier update */
extern bool is_sier_available(void);
+extern unsigned long get_pmcs_ext_regs(int idx);
+/* To define perf extended regs mask value */
+extern u64 PERF_REG_EXTENDED_MASK;
+#define PERF_REG_EXTENDED_MASK PERF_REG_EXTENDED_MASK
#endif
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 3e9703f44c7c..e2221d29fdf9 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -17,6 +17,13 @@
struct perf_event;
+struct mmcr_regs {
+ unsigned long mmcr0;
+ unsigned long mmcr1;
+ unsigned long mmcr2;
+ unsigned long mmcra;
+ unsigned long mmcr3;
+};
/*
* This struct provides the constants and functions needed to
* describe the PMU on a particular POWER-family CPU.
@@ -28,26 +35,26 @@ struct power_pmu {
unsigned long add_fields;
unsigned long test_adder;
int (*compute_mmcr)(u64 events[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[],
- struct perf_event *pevents[]);
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[], u32 flags);
int (*get_constraint)(u64 event_id, unsigned long *mskp,
- unsigned long *valp);
+ unsigned long *valp, u64 event_config1);
int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
void (*get_mem_data_src)(union perf_mem_data_src *dsrc,
u32 flags, struct pt_regs *regs);
- void (*get_mem_weight)(u64 *weight);
+ void (*get_mem_weight)(u64 *weight, u64 type);
unsigned long group_constraint_mask;
unsigned long group_constraint_val;
u64 (*bhrb_filter_map)(u64 branch_sample_type);
void (*config_bhrb)(u64 pmu_bhrb_filter);
- void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
+ void (*disable_pmc)(unsigned int pmc, struct mmcr_regs *mmcr);
int (*limited_pmc_event)(u64 event_id);
u32 flags;
const struct attribute_group **attr_groups;
int n_generic;
int *generic_events;
- int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+ u64 (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
@@ -55,6 +62,17 @@ struct power_pmu {
int *blacklist_ev;
/* BHRB entries in the PMU */
int bhrb_nr;
+ /*
+ * set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if
+ * the pmu supports extended perf regs capability
+ */
+ int capabilities;
+ /*
+ * Function to check event code for values which are
+ * reserved. Function takes struct perf_event as input,
+ * since event code could be spread in attr.config*
+ */
+ int (*check_attr_config)(struct perf_event *ev);
};
/*
@@ -69,6 +87,9 @@ struct power_pmu {
#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
+#define PPMU_ARCH_31 0x00000200 /* Has MMCR3, SIER2 and SIER3 */
+#define PPMU_P10_DD1 0x00000400 /* Is power10 DD1 processor version */
+#define PPMU_HAS_ATTR_CONFIG1 0x00000800 /* Using config1 attribute */
/*
* Values for flags to get_alternatives()
@@ -77,7 +98,7 @@ struct power_pmu {
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
-extern int register_power_pmu(struct power_pmu *);
+int __init register_power_pmu(struct power_pmu *pmu);
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h
index 6dd78a2dc03a..3360cad78ace 100644
--- a/arch/powerpc/include/asm/pgalloc.h
+++ b/arch/powerpc/include/asm/pgalloc.h
@@ -70,9 +70,4 @@ extern struct kmem_cache *pgtable_cache[];
#include <asm/nohash/pgalloc.h>
#endif
-static inline pgtable_t pmd_pgtable(pmd_t pmd)
-{
- return (pgtable_t)pmd_page_vaddr(pmd);
-}
-
#endif /* _ASM_POWERPC_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index b169bbf95fcb..82633200b500 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -101,6 +101,7 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
return pmd_raw(old) == prev;
}
+#ifdef CONFIG_ARCH_HAS_HUGEPD
typedef struct { __be64 pdbe; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
@@ -108,5 +109,6 @@ static inline unsigned long hpd_val(hugepd_t x)
{
return be64_to_cpu(x.pdbe);
}
+#endif
#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index d11b4c61d686..082c85cc09b1 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -2,17 +2,33 @@
#ifndef _ASM_POWERPC_PGTABLE_TYPES_H
#define _ASM_POWERPC_PGTABLE_TYPES_H
+#if defined(__CHECKER__) || !defined(CONFIG_PPC32)
+#define STRICT_MM_TYPECHECKS
+#endif
+
/* PTE level */
#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
typedef struct { pte_basic_t pte, pte1, pte2, pte3; } pte_t;
-#else
+#elif defined(STRICT_MM_TYPECHECKS)
typedef struct { pte_basic_t pte; } pte_t;
+#else
+typedef pte_basic_t pte_t;
#endif
+
+#if defined(STRICT_MM_TYPECHECKS) || \
+ (defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES))
#define __pte(x) ((pte_t) { (x) })
static inline pte_basic_t pte_val(pte_t x)
{
return x.pte;
}
+#else
+#define __pte(x) ((pte_t)(x))
+static inline pte_basic_t pte_val(pte_t x)
+{
+ return x;
+}
+#endif
/* PMD level */
#ifdef CONFIG_PPC64
@@ -67,11 +83,13 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
}
#endif
+#ifdef CONFIG_ARCH_HAS_HUGEPD
typedef struct { unsigned long pd; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { (x) })
static inline unsigned long hpd_val(hugepd_t x)
{
return x.pd;
}
+#endif
#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 8cc543ed114c..283f40d05a4d 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -20,28 +20,30 @@ struct mm_struct;
#include <asm/nohash/pgtable.h>
#endif /* !CONFIG_PPC_BOOK3S */
-/* Note due to the way vm flags are laid out, the bits are XWR */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_X
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY_X
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_X
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED_X
-#define __S111 PAGE_SHARED_X
+/*
+ * Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) || \
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
+
+/* Advertise special mapping type for AGP */
+#define PAGE_AGP (PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
#ifndef __ASSEMBLY__
-#include <asm/tlbflush.h>
+#ifndef MAX_PTRS_PER_PGD
+#define MAX_PTRS_PER_PGD PTRS_PER_PGD
+#endif
/* Keep these as a macros to avoid include dependency mess */
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -57,6 +59,13 @@ static inline pgprot_t pte_pgprot(pte_t pte)
return __pgprot(pte_flags);
}
+#ifndef pmd_page_vaddr
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return ((unsigned long)__va(pmd_val(pmd) & ~PMD_MASKED_BITS));
+}
+#define pmd_page_vaddr pmd_page_vaddr
+#endif
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -67,8 +76,10 @@ extern unsigned long empty_zero_page[];
extern pgd_t swapper_pg_dir[];
extern void paging_init(void);
+void poking_init(void);
extern unsigned long ioremap_bot;
+extern const pgprot_t protection_map[16];
/*
* kern_addr_valid is intended to indicate whether an address is a valid
@@ -77,8 +88,6 @@ extern unsigned long ioremap_bot;
*/
#define kern_addr_valid(addr) (1)
-#include <asm-generic/pgtable.h>
-
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_large(pmd) 0
#endif
@@ -88,6 +97,8 @@ unsigned long vmalloc_to_phys(void *vmalloc_addr);
void pgtable_cache_add(unsigned int shift);
+pte_t *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va);
+
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_PPC32)
void mark_initmem_nx(void);
#else
@@ -139,14 +150,20 @@ static inline bool pud_is_leaf(pud_t pud)
}
#endif
-#ifndef pgd_is_leaf
-#define pgd_is_leaf pgd_is_leaf
-static inline bool pgd_is_leaf(pgd_t pgd)
+#ifndef p4d_is_leaf
+#define p4d_is_leaf p4d_is_leaf
+static inline bool p4d_is_leaf(p4d_t p4d)
{
return false;
}
#endif
+#define pmd_pgtable pmd_pgtable
+static inline pgtable_t pmd_pgtable(pmd_t pmd)
+{
+ return (pgtable_t)pmd_page_vaddr(pmd);
+}
+
#ifdef CONFIG_PPC64
#define is_ioremap_addr is_ioremap_addr
static inline bool is_ioremap_addr(const void *x)
@@ -155,6 +172,9 @@ static inline bool is_ioremap_addr(const void *x)
return addr >= IOREMAP_BASE && addr < IOREMAP_END;
}
+
+struct seq_file;
+void arch_report_meminfo(struct seq_file *m);
#endif /* CONFIG_PPC64 */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/pkeys.h b/arch/powerpc/include/asm/pkeys.h
index 20ebf153c871..59a2c7dbc78f 100644
--- a/arch/powerpc/include/asm/pkeys.h
+++ b/arch/powerpc/include/asm/pkeys.h
@@ -11,9 +11,7 @@
#include <linux/jump_label.h>
#include <asm/firmware.h>
-DECLARE_STATIC_KEY_TRUE(pkey_disabled);
-extern int pkeys_total; /* total pkeys as per device tree */
-extern u32 initial_allocation_mask; /* bits set for the initially allocated keys */
+extern int num_pkey;
extern u32 reserved_allocation_mask; /* bits set for reserved keys */
#define ARCH_VM_PKEY_FLAGS (VM_PKEY_BIT0 | VM_PKEY_BIT1 | VM_PKEY_BIT2 | \
@@ -25,48 +23,28 @@ extern u32 reserved_allocation_mask; /* bits set for reserved keys */
PKEY_DISABLE_WRITE | \
PKEY_DISABLE_EXECUTE)
+#ifdef CONFIG_PPC_BOOK3S_64
+#include <asm/book3s/64/pkeys.h>
+#else
+#error "Not supported"
+#endif
+
+
static inline u64 pkey_to_vmflag_bits(u16 pkey)
{
return (((u64)pkey << VM_PKEY_SHIFT) & ARCH_VM_PKEY_FLAGS);
}
-static inline u64 vmflag_to_pte_pkey_bits(u64 vm_flags)
-{
- if (static_branch_likely(&pkey_disabled))
- return 0x0UL;
-
- return (((vm_flags & VM_PKEY_BIT0) ? H_PTE_PKEY_BIT4 : 0x0UL) |
- ((vm_flags & VM_PKEY_BIT1) ? H_PTE_PKEY_BIT3 : 0x0UL) |
- ((vm_flags & VM_PKEY_BIT2) ? H_PTE_PKEY_BIT2 : 0x0UL) |
- ((vm_flags & VM_PKEY_BIT3) ? H_PTE_PKEY_BIT1 : 0x0UL) |
- ((vm_flags & VM_PKEY_BIT4) ? H_PTE_PKEY_BIT0 : 0x0UL));
-}
-
static inline int vma_pkey(struct vm_area_struct *vma)
{
- if (static_branch_likely(&pkey_disabled))
+ if (!mmu_has_feature(MMU_FTR_PKEY))
return 0;
return (vma->vm_flags & ARCH_VM_PKEY_FLAGS) >> VM_PKEY_SHIFT;
}
-#define arch_max_pkey() pkeys_total
-
-static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
+static inline int arch_max_pkey(void)
{
- return (((pteflags & H_PTE_PKEY_BIT0) ? HPTE_R_KEY_BIT0 : 0x0UL) |
- ((pteflags & H_PTE_PKEY_BIT1) ? HPTE_R_KEY_BIT1 : 0x0UL) |
- ((pteflags & H_PTE_PKEY_BIT2) ? HPTE_R_KEY_BIT2 : 0x0UL) |
- ((pteflags & H_PTE_PKEY_BIT3) ? HPTE_R_KEY_BIT3 : 0x0UL) |
- ((pteflags & H_PTE_PKEY_BIT4) ? HPTE_R_KEY_BIT4 : 0x0UL));
-}
-
-static inline u16 pte_to_pkey_bits(u64 pteflags)
-{
- return (((pteflags & H_PTE_PKEY_BIT0) ? 0x10 : 0x0UL) |
- ((pteflags & H_PTE_PKEY_BIT1) ? 0x8 : 0x0UL) |
- ((pteflags & H_PTE_PKEY_BIT2) ? 0x4 : 0x0UL) |
- ((pteflags & H_PTE_PKEY_BIT3) ? 0x2 : 0x0UL) |
- ((pteflags & H_PTE_PKEY_BIT4) ? 0x1 : 0x0UL));
+ return num_pkey;
}
#define pkey_alloc_mask(pkey) (0x1 << pkey)
@@ -101,7 +79,7 @@ static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey)
/*
* Returns a positive, 5-bit key on success, or -1 on failure.
- * Relies on the mmap_sem to protect against concurrency in mm_pkey_alloc() and
+ * Relies on the mmap_lock to protect against concurrency in mm_pkey_alloc() and
* mm_pkey_free().
*/
static inline int mm_pkey_alloc(struct mm_struct *mm)
@@ -114,9 +92,8 @@ static inline int mm_pkey_alloc(struct mm_struct *mm)
u32 all_pkeys_mask = (u32)(~(0x0));
int ret;
- if (static_branch_likely(&pkey_disabled))
+ if (!mmu_has_feature(MMU_FTR_PKEY))
return -1;
-
/*
* Are we out of pkeys? We must handle this specially because ffz()
* behavior is undefined if there are no zeros.
@@ -132,7 +109,7 @@ static inline int mm_pkey_alloc(struct mm_struct *mm)
static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
{
- if (static_branch_likely(&pkey_disabled))
+ if (!mmu_has_feature(MMU_FTR_PKEY))
return -1;
if (!mm_pkey_is_allocated(mm, pkey))
@@ -147,21 +124,13 @@ static inline int mm_pkey_free(struct mm_struct *mm, int pkey)
* Try to dedicate one of the protection keys to be used as an
* execute-only protection key.
*/
-extern int __execute_only_pkey(struct mm_struct *mm);
-static inline int execute_only_pkey(struct mm_struct *mm)
-{
- if (static_branch_likely(&pkey_disabled))
- return -1;
-
- return __execute_only_pkey(mm);
-}
-
+extern int execute_only_pkey(struct mm_struct *mm);
extern int __arch_override_mprotect_pkey(struct vm_area_struct *vma,
int prot, int pkey);
static inline int arch_override_mprotect_pkey(struct vm_area_struct *vma,
int prot, int pkey)
{
- if (static_branch_likely(&pkey_disabled))
+ if (!mmu_has_feature(MMU_FTR_PKEY))
return 0;
/*
@@ -179,7 +148,7 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
unsigned long init_val)
{
- if (static_branch_likely(&pkey_disabled))
+ if (!mmu_has_feature(MMU_FTR_PKEY))
return -EINVAL;
/*
@@ -196,14 +165,8 @@ static inline int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
static inline bool arch_pkeys_enabled(void)
{
- return !static_branch_likely(&pkey_disabled);
+ return mmu_has_feature(MMU_FTR_PKEY);
}
extern void pkey_mm_init(struct mm_struct *mm);
-extern bool arch_supports_pkeys(int cap);
-extern unsigned int arch_usable_pkeys(void);
-extern void thread_pkey_regs_save(struct thread_struct *thread);
-extern void thread_pkey_regs_restore(struct thread_struct *new_thread,
- struct thread_struct *old_thread);
-extern void thread_pkey_regs_init(struct thread_struct *thread);
#endif /*_ASM_POWERPC_KEYS_H */
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index 4497c8afb573..8239c0af5eb2 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -28,7 +28,11 @@ static inline void set_cede_latency_hint(u8 latency_hint)
static inline long cede_processor(void)
{
- return plpar_hcall_norets(H_CEDE);
+ /*
+ * We cannot call tracepoints inside RCU idle regions which
+ * means we must not trace H_CEDE.
+ */
+ return plpar_hcall_norets_notrace(H_CEDE);
}
static inline long extended_cede_processor(unsigned long latency_hint)
@@ -39,11 +43,10 @@ static inline long extended_cede_processor(unsigned long latency_hint)
set_cede_latency_hint(latency_hint);
rc = cede_processor();
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+
/* Ensure that H_CEDE returns with IRQs on */
- if (WARN_ON(!(mfmsr() & MSR_EE)))
+ if (WARN_ON(IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && !(mfmsr() & MSR_EE)))
__hard_irq_enable();
-#endif
set_cede_latency_hint(old_latency_hint);
@@ -312,7 +315,12 @@ static inline long plpar_set_ciabr(unsigned long ciabr)
static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
{
- return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0);
+}
+
+static inline long plpar_set_watchpoint1(unsigned long dawr1, unsigned long dawrx1)
+{
+ return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR1, dawr1, dawrx1);
}
static inline long plpar_signal_sys_reset(long cpu)
@@ -334,6 +342,51 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
return rc;
}
+/*
+ * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
+ *
+ * - Returns H_SUCCESS on success
+ * - For H_BUSY return value, we retry the hcall.
+ * - For any other hcall failures, attempt a full flush once before
+ * resorting to BUG().
+ *
+ * Note: This hcall is expected to fail only very rarely. The correct
+ * error recovery of killing the process/guest will be eventually
+ * needed.
+ */
+static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
+ u64 page_sizes, u64 start, u64 end)
+{
+ long rc;
+ unsigned long all;
+
+ while (true) {
+ rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, type,
+ page_sizes, start, end);
+ if (rc == H_BUSY) {
+ cpu_relax();
+ continue;
+ } else if (rc == H_SUCCESS)
+ return rc;
+
+ /* Flush request failed, try with a full flush once */
+ if (type & H_RPTI_TYPE_NESTED)
+ all = H_RPTI_TYPE_NESTED | H_RPTI_TYPE_NESTED_ALL;
+ else
+ all = H_RPTI_TYPE_ALL;
+retry:
+ rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target,
+ all, page_sizes, 0, -1UL);
+ if (rc == H_BUSY) {
+ cpu_relax();
+ goto retry;
+ } else if (rc == H_SUCCESS)
+ return rc;
+
+ BUG();
+ }
+}
+
#else /* !CONFIG_PPC_PSERIES */
static inline long plpar_set_ciabr(unsigned long ciabr)
@@ -346,6 +399,13 @@ static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
{
return 0;
}
+
+static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
+ u64 page_sizes, u64 start, u64 end)
+{
+ return 0;
+}
+
#endif /* CONFIG_PPC_PSERIES */
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h
index e08e829261b6..2495866f2e97 100644
--- a/arch/powerpc/include/asm/pmac_feature.h
+++ b/arch/powerpc/include/asm/pmac_feature.h
@@ -401,5 +401,17 @@ extern u32 __iomem *uninorth_base;
*/
extern int pmac_get_uninorth_variant(void);
+/*
+ * Power macintoshes have either a CUDA, PMU or SMU controlling
+ * system reset, power, NVRAM, RTC.
+ */
+typedef enum sys_ctrler_kind {
+ SYS_CTRLER_UNKNOWN = 0,
+ SYS_CTRLER_CUDA = 1,
+ SYS_CTRLER_PMU = 2,
+ SYS_CTRLER_SMU = 3,
+} sys_ctrler_t;
+extern sys_ctrler_t sys_ctrler;
+
#endif /* __ASM_POWERPC_PMAC_FEATURE_H */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
index c6bbe9778d3c..3c09109e708e 100644
--- a/arch/powerpc/include/asm/pmc.h
+++ b/arch/powerpc/include/asm/pmc.h
@@ -34,6 +34,13 @@ static inline void ppc_set_pmu_inuse(int inuse)
#endif
}
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+static inline int ppc_get_pmu_inuse(void)
+{
+ return get_paca()->pmcregs_in_use;
+}
+#endif
+
extern void power4_enable_pmcs(void);
#else /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/pnv-ocxl.h b/arch/powerpc/include/asm/pnv-ocxl.h
index 7de82647e761..9acd1fbf1197 100644
--- a/arch/powerpc/include/asm/pnv-ocxl.h
+++ b/arch/powerpc/include/asm/pnv-ocxl.h
@@ -3,34 +3,83 @@
#ifndef _ASM_PNV_OCXL_H
#define _ASM_PNV_OCXL_H
+#include <linux/bitfield.h>
#include <linux/pci.h>
#define PNV_OCXL_TL_MAX_TEMPLATE 63
#define PNV_OCXL_TL_BITS_PER_RATE 4
#define PNV_OCXL_TL_RATE_BUF_SIZE ((PNV_OCXL_TL_MAX_TEMPLATE+1) * PNV_OCXL_TL_BITS_PER_RATE / 8)
-extern int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled,
- u16 *supported);
-extern int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count);
+#define PNV_OCXL_ATSD_TIMEOUT 1
-extern int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap,
- char *rate_buf, int rate_buf_size);
-extern int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap,
- uint64_t rate_buf_phys, int rate_buf_size);
+/* TLB Management Instructions */
+#define PNV_OCXL_ATSD_LNCH 0x00
+/* Radix Invalidate */
+#define PNV_OCXL_ATSD_LNCH_R PPC_BIT(0)
+/* Radix Invalidation Control
+ * 0b00 Just invalidate TLB.
+ * 0b01 Invalidate just Page Walk Cache.
+ * 0b10 Invalidate TLB, Page Walk Cache, and any
+ * caching of Partition and Process Table Entries.
+ */
+#define PNV_OCXL_ATSD_LNCH_RIC PPC_BITMASK(1, 2)
+/* Number and Page Size of translations to be invalidated */
+#define PNV_OCXL_ATSD_LNCH_LP PPC_BITMASK(3, 10)
+/* Invalidation Criteria
+ * 0b00 Invalidate just the target VA.
+ * 0b01 Invalidate matching PID.
+ */
+#define PNV_OCXL_ATSD_LNCH_IS PPC_BITMASK(11, 12)
+/* 0b1: Process Scope, 0b0: Partition Scope */
+#define PNV_OCXL_ATSD_LNCH_PRS PPC_BIT(13)
+/* Invalidation Flag */
+#define PNV_OCXL_ATSD_LNCH_B PPC_BIT(14)
+/* Actual Page Size to be invalidated
+ * 000 4KB
+ * 101 64KB
+ * 001 2MB
+ * 010 1GB
+ */
+#define PNV_OCXL_ATSD_LNCH_AP PPC_BITMASK(15, 17)
+/* Defines the large page select
+ * L=0b0 for 4KB pages
+ * L=0b1 for large pages)
+ */
+#define PNV_OCXL_ATSD_LNCH_L PPC_BIT(18)
+/* Process ID */
+#define PNV_OCXL_ATSD_LNCH_PID PPC_BITMASK(19, 38)
+/* NoFlush – Assumed to be 0b0 */
+#define PNV_OCXL_ATSD_LNCH_F PPC_BIT(39)
+#define PNV_OCXL_ATSD_LNCH_OCAPI_SLBI PPC_BIT(40)
+#define PNV_OCXL_ATSD_LNCH_OCAPI_SINGLETON PPC_BIT(41)
+#define PNV_OCXL_ATSD_AVA 0x08
+#define PNV_OCXL_ATSD_AVA_AVA PPC_BITMASK(0, 51)
+#define PNV_OCXL_ATSD_STAT 0x10
+
+int pnv_ocxl_get_actag(struct pci_dev *dev, u16 *base, u16 *enabled, u16 *supported);
+int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count);
-extern int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq);
-extern void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar,
- void __iomem *tfc, void __iomem *pe_handle);
-extern int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr,
- void __iomem **dar, void __iomem **tfc,
- void __iomem **pe_handle);
+int pnv_ocxl_get_tl_cap(struct pci_dev *dev, long *cap,
+ char *rate_buf, int rate_buf_size);
+int pnv_ocxl_set_tl_conf(struct pci_dev *dev, long cap,
+ uint64_t rate_buf_phys, int rate_buf_size);
-extern int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask,
- void **platform_data);
-extern void pnv_ocxl_spa_release(void *platform_data);
-extern int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle);
+int pnv_ocxl_get_xsl_irq(struct pci_dev *dev, int *hwirq);
+void pnv_ocxl_unmap_xsl_regs(void __iomem *dsisr, void __iomem *dar,
+ void __iomem *tfc, void __iomem *pe_handle);
+int pnv_ocxl_map_xsl_regs(struct pci_dev *dev, void __iomem **dsisr,
+ void __iomem **dar, void __iomem **tfc,
+ void __iomem **pe_handle);
-extern int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr);
-extern void pnv_ocxl_free_xive_irq(u32 irq);
+int pnv_ocxl_spa_setup(struct pci_dev *dev, void *spa_mem, int PE_mask, void **platform_data);
+void pnv_ocxl_spa_release(void *platform_data);
+int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle);
+int pnv_ocxl_map_lpar(struct pci_dev *dev, uint64_t lparid,
+ uint64_t lpcr, void __iomem **arva);
+void pnv_ocxl_unmap_lpar(void __iomem *arva);
+void pnv_ocxl_tlb_invalidate(void __iomem *arva,
+ unsigned long pid,
+ unsigned long addr,
+ unsigned long page_size);
#endif /* _ASM_PNV_OCXL_H */
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index d0ee0ede5767..8afc92860dbb 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/irq.h>
+#include <linux/of.h>
#include <misc/cxl-base.h>
#include <asm/opal-api.h>
@@ -33,7 +34,7 @@ int pnv_cxl_alloc_hwirqs(struct pci_dev *dev, int num);
void pnv_cxl_release_hwirqs(struct pci_dev *dev, int hwirq, int num);
int pnv_cxl_get_irq_count(struct pci_dev *dev);
struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev);
-int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq);
+int64_t pnv_opal_pci_msi_eoi(struct irq_data *d);
bool is_pnv_opal_msi(struct irq_chip *chip);
#ifdef CONFIG_CXL_BASE
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c1df75edde44..21e33e46f4b8 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -76,9 +76,104 @@
#define __REGA0_R30 30
#define __REGA0_R31 31
+/* For use with PPC_RAW_() macros */
+#define _R0 0
+#define _R1 1
+#define _R2 2
+#define _R3 3
+#define _R4 4
+#define _R5 5
+#define _R6 6
+#define _R7 7
+#define _R8 8
+#define _R9 9
+#define _R10 10
+#define _R11 11
+#define _R12 12
+#define _R13 13
+#define _R14 14
+#define _R15 15
+#define _R16 16
+#define _R17 17
+#define _R18 18
+#define _R19 19
+#define _R20 20
+#define _R21 21
+#define _R22 22
+#define _R23 23
+#define _R24 24
+#define _R25 25
+#define _R26 26
+#define _R27 27
+#define _R28 28
+#define _R29 29
+#define _R30 30
+#define _R31 31
+
+#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
+#define IMM_DS(i) ((uintptr_t)(i) & 0xfffc)
+#define IMM_DQ(i) ((uintptr_t)(i) & 0xfff0)
+#define IMM_D0(i) (((uintptr_t)(i) >> 16) & 0x3ffff)
+#define IMM_D1(i) IMM_L(i)
+
+/*
+ * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
+ * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the
+ * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
+ */
+#define IMM_H(i) ((uintptr_t)(i)>>16)
+#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
+ (((uintptr_t)(i) & 0x8000) >> 15))
+
+
/* opcode and xopcode for instructions */
-#define OP_TRAP 3
-#define OP_TRAP_64 2
+#define OP_PREFIX 1
+#define OP_TRAP_64 2
+#define OP_TRAP 3
+#define OP_SC 17
+#define OP_19 19
+#define OP_31 31
+#define OP_LWZ 32
+#define OP_LWZU 33
+#define OP_LBZ 34
+#define OP_LBZU 35
+#define OP_STW 36
+#define OP_STWU 37
+#define OP_STB 38
+#define OP_STBU 39
+#define OP_LHZ 40
+#define OP_LHZU 41
+#define OP_LHA 42
+#define OP_LHAU 43
+#define OP_STH 44
+#define OP_STHU 45
+#define OP_LMW 46
+#define OP_STMW 47
+#define OP_LFS 48
+#define OP_LFSU 49
+#define OP_LFD 50
+#define OP_LFDU 51
+#define OP_STFS 52
+#define OP_STFSU 53
+#define OP_STFD 54
+#define OP_STFDU 55
+#define OP_LQ 56
+#define OP_LD 58
+#define OP_STD 62
+
+#define OP_19_XOP_RFID 18
+#define OP_19_XOP_RFMCI 38
+#define OP_19_XOP_RFDI 39
+#define OP_19_XOP_RFI 50
+#define OP_19_XOP_RFCI 51
+#define OP_19_XOP_RFSCV 82
+#define OP_19_XOP_HRFID 274
+#define OP_19_XOP_URFID 306
+#define OP_19_XOP_STOP 370
+#define OP_19_XOP_DOZE 402
+#define OP_19_XOP_NAP 434
+#define OP_19_XOP_SLEEP 466
+#define OP_19_XOP_RVWINKLE 498
#define OP_31_XOP_TRAP 4
#define OP_31_XOP_LDX 21
@@ -100,6 +195,8 @@
#define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MSGSNDP 142
#define OP_31_XOP_MSGCLRP 174
+#define OP_31_XOP_MTMSR 146
+#define OP_31_XOP_MTMSRD 178
#define OP_31_XOP_TLBIE 306
#define OP_31_XOP_MFSPR 339
#define OP_31_XOP_LWAX 341
@@ -158,91 +255,30 @@
/* VMX Vector Store Instructions */
#define OP_31_XOP_STVX 231
-#define OP_31 31
-#define OP_LWZ 32
-#define OP_STFS 52
-#define OP_STFSU 53
-#define OP_STFD 54
-#define OP_STFDU 55
-#define OP_LD 58
-#define OP_LWZU 33
-#define OP_LBZ 34
-#define OP_LBZU 35
-#define OP_STW 36
-#define OP_STWU 37
-#define OP_STD 62
-#define OP_STB 38
-#define OP_STBU 39
-#define OP_LHZ 40
-#define OP_LHZU 41
-#define OP_LHA 42
-#define OP_LHAU 43
-#define OP_STH 44
-#define OP_STHU 45
-#define OP_LMW 46
-#define OP_STMW 47
-#define OP_LFS 48
-#define OP_LFSU 49
-#define OP_LFD 50
-#define OP_LFDU 51
-#define OP_STFS 52
-#define OP_STFSU 53
-#define OP_STFD 54
-#define OP_STFDU 55
-#define OP_LQ 56
-
/* sorted alphabetically */
-#define PPC_INST_BHRBE 0x7c00025c
-#define PPC_INST_CLRBHRB 0x7c00035c
+#define PPC_INST_BCCTR_FLUSH 0x4c400420
#define PPC_INST_COPY 0x7c20060c
-#define PPC_INST_CP_ABORT 0x7c00068c
-#define PPC_INST_DARN 0x7c0005e6
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
-#define PPC_INST_DCBAL 0x7c2005ec
-#define PPC_INST_DCBZL 0x7c2007ec
-#define PPC_INST_ICBT 0x7c00002c
-#define PPC_INST_ICSWX 0x7c00032d
-#define PPC_INST_ICSWEPX 0x7c00076d
+#define PPC_INST_DSSALL 0x7e00066c
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
-#define PPC_INST_LDARX 0x7c0000a8
-#define PPC_INST_STDCX 0x7c0001ad
-#define PPC_INST_LQARX 0x7c000228
-#define PPC_INST_STQCX 0x7c00016d
#define PPC_INST_LSWI 0x7c0004aa
#define PPC_INST_LSWX 0x7c00042a
-#define PPC_INST_LWARX 0x7c000028
-#define PPC_INST_STWCX 0x7c00012d
#define PPC_INST_LWSYNC 0x7c2004ac
#define PPC_INST_SYNC 0x7c0004ac
#define PPC_INST_SYNC_MASK 0xfc0007fe
-#define PPC_INST_ISYNC 0x4c00012c
-#define PPC_INST_LXVD2X 0x7c000698
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
-#define PPC_INST_MFTMR 0x7c0002dc
-#define PPC_INST_MSGSND 0x7c00019c
-#define PPC_INST_MSGCLR 0x7c0001dc
-#define PPC_INST_MSGSYNC 0x7c0006ec
-#define PPC_INST_MSGSNDP 0x7c00011c
-#define PPC_INST_MSGCLRP 0x7c00015c
#define PPC_INST_MTMSRD 0x7c000164
-#define PPC_INST_MTTMR 0x7c0003dc
-#define PPC_INST_NOP 0x60000000
#define PPC_INST_PASTE 0x7c20070d
+#define PPC_INST_PASTE_MASK 0xfc2007ff
#define PPC_INST_POPCNTB 0x7c0000f4
#define PPC_INST_POPCNTB_MASK 0xfc0007fe
-#define PPC_INST_POPCNTD 0x7c0003f4
-#define PPC_INST_POPCNTW 0x7c0002f4
#define PPC_INST_RFEBB 0x4c000124
-#define PPC_INST_RFCI 0x4c000066
-#define PPC_INST_RFDI 0x4c00004e
#define PPC_INST_RFID 0x4c000024
-#define PPC_INST_RFMCI 0x4c00004c
-#define PPC_INST_MFSPR 0x7c0002a6
#define PPC_INST_MFSPR_DSCR 0x7c1102a6
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
@@ -251,131 +287,27 @@
#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
-#define PPC_INST_MFVSRD 0x7c000066
-#define PPC_INST_MTVSRD 0x7c000166
-#define PPC_INST_SC 0x44000002
-#define PPC_INST_SLBFEE 0x7c0007a7
-#define PPC_INST_SLBIA 0x7c0003e4
-
#define PPC_INST_STRING 0x7c00042a
#define PPC_INST_STRING_MASK 0xfc0007fe
#define PPC_INST_STRING_GEN_MASK 0xfc00067e
-
#define PPC_INST_STSWI 0x7c0005aa
#define PPC_INST_STSWX 0x7c00052a
-#define PPC_INST_STXVD2X 0x7c000798
-#define PPC_INST_TLBIE 0x7c000264
-#define PPC_INST_TLBIEL 0x7c000224
-#define PPC_INST_TLBILX 0x7c000024
-#define PPC_INST_WAIT 0x7c00007c
-#define PPC_INST_TLBIVAX 0x7c000624
-#define PPC_INST_TLBSRX_DOT 0x7c0006a5
-#define PPC_INST_VPMSUMW 0x10000488
-#define PPC_INST_VPMSUMD 0x100004c8
-#define PPC_INST_VPERMXOR 0x1000002d
-#define PPC_INST_XXLOR 0xf0000490
-#define PPC_INST_XXSWAPD 0xf0000250
-#define PPC_INST_XVCPSGNDP 0xf0000780
#define PPC_INST_TRECHKPT 0x7c0007dd
#define PPC_INST_TRECLAIM 0x7c00075d
-#define PPC_INST_TABORT 0x7c00071d
#define PPC_INST_TSR 0x7c0005dd
-
-#define PPC_INST_NAP 0x4c000364
-#define PPC_INST_SLEEP 0x4c0003a4
-#define PPC_INST_WINKLE 0x4c0003e4
-
-#define PPC_INST_STOP 0x4c0002e4
-
-/* A2 specific instructions */
-#define PPC_INST_ERATWE 0x7c0001a6
-#define PPC_INST_ERATRE 0x7c000166
-#define PPC_INST_ERATILX 0x7c000066
-#define PPC_INST_ERATIVAX 0x7c000666
-#define PPC_INST_ERATSX 0x7c000126
-#define PPC_INST_ERATSX_DOT 0x7c000127
-
-/* Misc instructions for BPF compiler */
-#define PPC_INST_LBZ 0x88000000
-#define PPC_INST_LD 0xe8000000
-#define PPC_INST_LDX 0x7c00002a
-#define PPC_INST_LHZ 0xa0000000
-#define PPC_INST_LWZ 0x80000000
-#define PPC_INST_LHBRX 0x7c00062c
-#define PPC_INST_LDBRX 0x7c000428
-#define PPC_INST_STB 0x98000000
-#define PPC_INST_STH 0xb0000000
-#define PPC_INST_STD 0xf8000000
-#define PPC_INST_STDX 0x7c00012a
-#define PPC_INST_STDU 0xf8000001
-#define PPC_INST_STW 0x90000000
-#define PPC_INST_STWU 0x94000000
-#define PPC_INST_MFLR 0x7c0802a6
-#define PPC_INST_MTLR 0x7c0803a6
-#define PPC_INST_MTCTR 0x7c0903a6
-#define PPC_INST_CMPWI 0x2c000000
-#define PPC_INST_CMPDI 0x2c200000
-#define PPC_INST_CMPW 0x7c000000
-#define PPC_INST_CMPD 0x7c200000
-#define PPC_INST_CMPLW 0x7c000040
-#define PPC_INST_CMPLD 0x7c200040
-#define PPC_INST_CMPLWI 0x28000000
-#define PPC_INST_CMPLDI 0x28200000
-#define PPC_INST_ADDI 0x38000000
-#define PPC_INST_ADDIS 0x3c000000
-#define PPC_INST_ADD 0x7c000214
-#define PPC_INST_ADDC 0x7c000014
-#define PPC_INST_SUB 0x7c000050
-#define PPC_INST_BLR 0x4e800020
-#define PPC_INST_BLRL 0x4e800021
-#define PPC_INST_BCTR 0x4e800420
-#define PPC_INST_MULLD 0x7c0001d2
-#define PPC_INST_MULLW 0x7c0001d6
-#define PPC_INST_MULHWU 0x7c000016
-#define PPC_INST_MULLI 0x1c000000
-#define PPC_INST_MADDHD 0x10000030
-#define PPC_INST_MADDHDU 0x10000031
-#define PPC_INST_MADDLD 0x10000033
-#define PPC_INST_DIVWU 0x7c000396
-#define PPC_INST_DIVD 0x7c0003d2
-#define PPC_INST_DIVDU 0x7c000392
-#define PPC_INST_RLWINM 0x54000000
-#define PPC_INST_RLWINM_DOT 0x54000001
-#define PPC_INST_RLWIMI 0x50000000
-#define PPC_INST_RLDICL 0x78000000
-#define PPC_INST_RLDICR 0x78000004
-#define PPC_INST_SLW 0x7c000030
-#define PPC_INST_SLD 0x7c000036
-#define PPC_INST_SRW 0x7c000430
-#define PPC_INST_SRAW 0x7c000630
-#define PPC_INST_SRAWI 0x7c000670
-#define PPC_INST_SRD 0x7c000436
-#define PPC_INST_SRAD 0x7c000634
-#define PPC_INST_SRADI 0x7c000674
-#define PPC_INST_AND 0x7c000038
-#define PPC_INST_ANDDOT 0x7c000039
-#define PPC_INST_OR 0x7c000378
-#define PPC_INST_XOR 0x7c000278
-#define PPC_INST_ANDI 0x70000000
-#define PPC_INST_ORI 0x60000000
-#define PPC_INST_ORIS 0x64000000
-#define PPC_INST_XORI 0x68000000
-#define PPC_INST_XORIS 0x6c000000
-#define PPC_INST_NEG 0x7c0000d0
-#define PPC_INST_EXTSW 0x7c0007b4
-#define PPC_INST_BRANCH 0x48000000
#define PPC_INST_BRANCH_COND 0x40800000
-#define PPC_INST_LBZCIX 0x7c0006aa
-#define PPC_INST_STBCIX 0x7c0007aa
-#define PPC_INST_LWZX 0x7c00002e
-#define PPC_INST_LFSX 0x7c00042e
-#define PPC_INST_STFSX 0x7c00052e
-#define PPC_INST_LFDX 0x7c0004ae
-#define PPC_INST_STFDX 0x7c0005ae
-#define PPC_INST_LVX 0x7c0000ce
-#define PPC_INST_STVX 0x7c0001ce
-#define PPC_INST_VCMPEQUD 0x100000c7
-#define PPC_INST_VCMPEQUB 0x10000006
+
+/* Prefixes */
+#define PPC_INST_LFS 0xc0000000
+#define PPC_INST_STFS 0xd0000000
+#define PPC_INST_LFD 0xc8000000
+#define PPC_INST_STFD 0xd8000000
+#define PPC_PREFIX_MLS 0x06000000
+#define PPC_PREFIX_8LS 0x04000000
+
+/* Prefixed instructions */
+#define PPC_INST_PLD 0xe4000000
+#define PPC_INST_PSTD 0xf4000000
/* macros to insert fields into opcodes */
#define ___PPC_RA(a) (((a) & 0x1f) << 16)
@@ -395,7 +327,10 @@
#define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4))
#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5))
#define __PPC_XT(s) __PPC_XS(s)
+#define __PPC_XSP(s) ((((s) & 0x1e) | (((s) >> 5) & 0x1)) << 21)
+#define __PPC_XTP(s) __PPC_XSP(s)
#define __PPC_T_TLB(t) (((t) & 0x3) << 21)
+#define __PPC_PL(p) (((p) & 0x3) << 16)
#define __PPC_WC(w) (((w) & 0x3) << 21)
#define __PPC_WS(w) (((w) & 0x1f) << 11)
#define __PPC_SH(s) __PPC_WS(s)
@@ -408,6 +343,8 @@
#define __PPC_CT(t) (((t) & 0x0f) << 21)
#define __PPC_SPR(r) ((((r) & 0x1f) << 16) | ((((r) >> 5) & 0x1f) << 11))
#define __PPC_RC21 (0x1 << 10)
+#define __PPC_PRFX_R(r) (((r) & 0x1) << 20)
+#define __PPC_EH(eh) (((eh) & 0x1) << 0)
/*
* Both low and high 16 bits are added as SIGNED additions, so if low 16 bits
@@ -417,186 +354,324 @@
#define PPC_LO(v) ((v) & 0xffff)
#define PPC_HI(v) (((v) >> 16) & 0xffff)
#define PPC_HA(v) PPC_HI((v) + 0x8000)
+#define PPC_HIGHER(v) (((v) >> 32) & 0xffff)
+#define PPC_HIGHEST(v) (((v) >> 48) & 0xffff)
+
+/* LI Field */
+#define PPC_LI_MASK 0x03fffffc
+#define PPC_LI(v) ((v) & PPC_LI_MASK)
+
+/* Base instruction encoding */
+#define PPC_RAW_CP_ABORT (0x7c00068c)
+#define PPC_RAW_COPY(a, b) (PPC_INST_COPY | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_DARN(t, l) (0x7c0005e6 | ___PPC_RT(t) | (((l) & 0x3) << 16))
+#define PPC_RAW_DCBAL(a, b) (0x7c2005ec | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_RAW_DCBZL(a, b) (0x7c2007ec | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_RAW_LQARX(t, a, b, eh) (0x7c000228 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_RAW_LDARX(t, a, b, eh) (0x7c0000a8 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_RAW_LWARX(t, a, b, eh) (0x7c000028 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | __PPC_EH(eh))
+#define PPC_RAW_PHWSYNC (0x7c8004ac)
+#define PPC_RAW_PLWSYNC (0x7ca004ac)
+#define PPC_RAW_STQCX(t, a, b) (0x7c00016d | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_MADDHD(t, a, b, c) (0x10000030 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c))
+#define PPC_RAW_MADDHDU(t, a, b, c) (0x10000031 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c))
+#define PPC_RAW_MADDLD(t, a, b, c) (0x10000033 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | ___PPC_RC(c))
+#define PPC_RAW_MSGSND(b) (0x7c00019c | ___PPC_RB(b))
+#define PPC_RAW_MSGSYNC (0x7c0006ec)
+#define PPC_RAW_MSGCLR(b) (0x7c0001dc | ___PPC_RB(b))
+#define PPC_RAW_MSGSNDP(b) (0x7c00011c | ___PPC_RB(b))
+#define PPC_RAW_MSGCLRP(b) (0x7c00015c | ___PPC_RB(b))
+#define PPC_RAW_PASTE(a, b) (0x7c20070d | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_POPCNTB(a, s) (PPC_INST_POPCNTB | __PPC_RA(a) | __PPC_RS(s))
+#define PPC_RAW_POPCNTD(a, s) (0x7c0003f4 | __PPC_RA(a) | __PPC_RS(s))
+#define PPC_RAW_POPCNTW(a, s) (0x7c0002f4 | __PPC_RA(a) | __PPC_RS(s))
+#define PPC_RAW_RFCI (0x4c000066)
+#define PPC_RAW_RFDI (0x4c00004e)
+#define PPC_RAW_RFMCI (0x4c00004c)
+#define PPC_RAW_TLBILX(t, a, b) (0x7c000024 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_WAIT_v203 (0x7c00007c)
+#define PPC_RAW_WAIT(w, p) (0x7c00003c | __PPC_WC(w) | __PPC_PL(p))
+#define PPC_RAW_TLBIE(lp, a) (0x7c000264 | ___PPC_RB(a) | ___PPC_RS(lp))
+#define PPC_RAW_TLBIE_5(rb, rs, ric, prs, r) \
+ (0x7c000264 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
+#define PPC_RAW_TLBIEL(rb, rs, ric, prs, r) \
+ (0x7c000224 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
+#define PPC_RAW_TLBIEL_v205(rb, l) (0x7c000224 | ___PPC_RB(rb) | (l << 21))
+#define PPC_RAW_TLBSRX_DOT(a, b) (0x7c0006a5 | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_TLBIVAX(a, b) (0x7c000624 | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_ERATWE(s, a, w) (0x7c0001a6 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
+#define PPC_RAW_ERATRE(s, a, w) (0x7c000166 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
+#define PPC_RAW_ERATILX(t, a, b) (0x7c000066 | __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_ERATIVAX(s, a, b) (0x7c000666 | __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_ERATSX(t, a, w) (0x7c000126 | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_ERATSX_DOT(t, a, w) (0x7c000127 | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_SLBFEE_DOT(t, b) (0x7c0007a7 | __PPC_RT(t) | __PPC_RB(b))
+#define __PPC_RAW_SLBFEE_DOT(t, b) (0x7c0007a7 | ___PPC_RT(t) | ___PPC_RB(b))
+#define PPC_RAW_ICBT(c, a, b) (0x7c00002c | __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_RAW_LBZCIX(t, a, b) (0x7c0006aa | __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_RAW_STBCIX(s, a, b) (0x7c0007aa | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
+#define PPC_RAW_DCBFPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (4 << 21))
+#define PPC_RAW_DCBSTPS(a, b) (0x7c0000ac | ___PPC_RA(a) | ___PPC_RB(b) | (6 << 21))
+#define PPC_RAW_SC() (0x44000002)
+#define PPC_RAW_SYNC() (0x7c0004ac)
+#define PPC_RAW_ISYNC() (0x4c00012c)
/*
- * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a
- * larx with EH set as an illegal instruction.
+ * Define what the VSX XX1 form instructions will look like, then add
+ * the 128 bit load store instructions based on that.
*/
-#ifdef CONFIG_PPC64
-#define __PPC_EH(eh) (((eh) & 0x1) << 0)
-#else
-#define __PPC_EH(eh) 0
-#endif
+#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b))
+#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
+#define PPC_RAW_STXVD2X(s, a, b) (0x7c000798 | VSX_XX1((s), a, b))
+#define PPC_RAW_LXVD2X(s, a, b) (0x7c000698 | VSX_XX1((s), a, b))
+#define PPC_RAW_MFVRD(a, t) (0x7c000066 | VSX_XX1((t) + 32, a, R0))
+#define PPC_RAW_MTVRD(t, a) (0x7c000166 | VSX_XX1((t) + 32, a, R0))
+#define PPC_RAW_VPMSUMW(t, a, b) (0x10000488 | VSX_XX3((t), a, b))
+#define PPC_RAW_VPMSUMD(t, a, b) (0x100004c8 | VSX_XX3((t), a, b))
+#define PPC_RAW_XXLOR(t, a, b) (0xf0000490 | VSX_XX3((t), a, b))
+#define PPC_RAW_XXSWAPD(t, a) (0xf0000250 | VSX_XX3((t), a, a))
+#define PPC_RAW_XVCPSGNDP(t, a, b) ((0xf0000780 | VSX_XX3((t), (a), (b))))
+#define PPC_RAW_VPERMXOR(vrt, vra, vrb, vrc) \
+ ((0x1000002d | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6)))
+#define PPC_RAW_LXVP(xtp, a, i) (0x18000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_DQ(i))
+#define PPC_RAW_STXVP(xsp, a, i) (0x18000001 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_DQ(i))
+#define PPC_RAW_LXVPX(xtp, a, b) (0x7c00029a | __PPC_XTP(xtp) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_STXVPX(xsp, a, b) (0x7c00039a | __PPC_XSP(xsp) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_PLXVP_P(xtp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PLXVP_S(xtp, i, a, pr) (0xe8000000 | __PPC_XTP(xtp) | ___PPC_RA(a) | IMM_D1(i))
+#define PPC_RAW_PSTXVP_P(xsp, i, a, pr) (PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_D0(i))
+#define PPC_RAW_PSTXVP_S(xsp, i, a, pr) (0xf8000000 | __PPC_XSP(xsp) | ___PPC_RA(a) | IMM_D1(i))
+#define PPC_RAW_NAP (0x4c000364)
+#define PPC_RAW_SLEEP (0x4c0003a4)
+#define PPC_RAW_WINKLE (0x4c0003e4)
+#define PPC_RAW_STOP (0x4c0002e4)
+#define PPC_RAW_CLRBHRB (0x7c00035c)
+#define PPC_RAW_MFBHRBE(r, n) (0x7c00025c | __PPC_RT(r) | (((n) & 0x3ff) << 11))
+#define PPC_RAW_TRECHKPT (PPC_INST_TRECHKPT)
+#define PPC_RAW_TRECLAIM(r) (PPC_INST_TRECLAIM | __PPC_RA(r))
+#define PPC_RAW_TABORT(r) (0x7c00071d | __PPC_RA(r))
+#define TMRN(x) ((((x) & 0x1f) << 16) | (((x) & 0x3e0) << 6))
+#define PPC_RAW_MTTMR(tmr, r) (0x7c0003dc | TMRN(tmr) | ___PPC_RS(r))
+#define PPC_RAW_MFTMR(tmr, r) (0x7c0002dc | TMRN(tmr) | ___PPC_RT(r))
+#define PPC_RAW_ICSWX(s, a, b) (0x7c00032d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ICSWEPX(s, a, b) (0x7c00076d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SLBIA(IH) (0x7c0003e4 | (((IH) & 0x7) << 21))
+#define PPC_RAW_VCMPEQUD_RC(vrt, vra, vrb) \
+ (0x100000c7 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21)
+#define PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb) \
+ (0x10000006 | ___PPC_RT(vrt) | ___PPC_RA(vra) | ___PPC_RB(vrb) | __PPC_RC21)
+#define PPC_RAW_LD(r, base, i) (0xe8000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_DS(i))
+#define PPC_RAW_LWZ(r, base, i) (0x80000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_LWZX(t, a, b) (0x7c00002e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_STD(r, base, i) (0xf8000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_DS(i))
+#define PPC_RAW_STDCX(s, a, b) (0x7c0001ad | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_LFSX(t, a, b) (0x7c00042e | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_STFSX(s, a, b) (0x7c00052e | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_LFDX(t, a, b) (0x7c0004ae | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_STFDX(s, a, b) (0x7c0005ae | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_LVX(t, a, b) (0x7c0000ce | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_STVX(s, a, b) (0x7c0001ce | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADDE(t, a, b) (0x7c000114 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADDZE(t, a) (0x7c000194 | ___PPC_RT(t) | ___PPC_RA(a))
+#define PPC_RAW_ADDME(t, a) (0x7c0001d4 | ___PPC_RT(t) | ___PPC_RA(a))
+#define PPC_RAW_ADD(t, a, b) (0x7c000214 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADD_DOT(t, a, b) (0x7c000214 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
+#define PPC_RAW_ADDC(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_ADDC_DOT(t, a, b) (0x7c000014 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
+#define PPC_RAW_NOP() PPC_RAW_ORI(0, 0, 0)
+#define PPC_RAW_BLR() (0x4e800020)
+#define PPC_RAW_BLRL() (0x4e800021)
+#define PPC_RAW_MTLR(r) (0x7c0803a6 | ___PPC_RT(r))
+#define PPC_RAW_MFLR(t) (0x7c0802a6 | ___PPC_RT(t))
+#define PPC_RAW_BCTR() (0x4e800420)
+#define PPC_RAW_BCTRL() (0x4e800421)
+#define PPC_RAW_MTCTR(r) (0x7c0903a6 | ___PPC_RT(r))
+#define PPC_RAW_ADDI(d, a, i) (0x38000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_LI(r, i) PPC_RAW_ADDI(r, 0, i)
+#define PPC_RAW_ADDIS(d, a, i) (0x3c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIC(d, a, i) (0x30000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_ADDIC_DOT(d, a, i) (0x34000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_LIS(r, i) PPC_RAW_ADDIS(r, 0, i)
+#define PPC_RAW_STDX(r, base, b) (0x7c00012a | ___PPC_RS(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_STDU(r, base, i) (0xf8000001 | ___PPC_RS(r) | ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_RAW_STW(r, base, i) (0x90000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_STWU(r, base, i) (0x94000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_STH(r, base, i) (0xb0000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_STB(r, base, i) (0x98000000 | ___PPC_RS(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_LBZ(r, base, i) (0x88000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+#define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_CMPDI(a, i) (0x2c200000 | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_CMPW(a, b) (0x7c000000 | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_CMPD(a, b) (0x7c200000 | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_CMPLWI(a, i) (0x28000000 | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_CMPLDI(a, i) (0x28200000 | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_CMPLW(a, b) (0x7c000040 | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_CMPLD(a, b) (0x7c200040 | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SUB(d, a, b) (0x7c000050 | ___PPC_RT(d) | ___PPC_RB(a) | ___PPC_RA(b))
+#define PPC_RAW_SUBFC(d, a, b) (0x7c000010 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SUBFE(d, a, b) (0x7c000110 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_SUBFIC(d, a, i) (0x20000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_SUBFZE(d, a) (0x7c000190 | ___PPC_RT(d) | ___PPC_RA(a))
+#define PPC_RAW_MULD(d, a, b) (0x7c0001d2 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_MULW(d, a, b) (0x7c0001d6 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_MULHWU(d, a, b) (0x7c000016 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_MULI(d, a, i) (0x1c000000 | ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
+#define PPC_RAW_DIVWU(d, a, b) (0x7c000396 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_DIVDU(d, a, b) (0x7c000392 | ___PPC_RT(d) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_DIVDE(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_DIVDE_DOT(t, a, b) (0x7c000352 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
+#define PPC_RAW_DIVDEU(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_DIVDEU_DOT(t, a, b) (0x7c000312 | ___PPC_RT(t) | ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
+#define PPC_RAW_AND(d, a, b) (0x7c000038 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_RAW_ANDI(d, a, i) (0x70000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ANDIS(d, a, i) (0x74000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_AND_DOT(d, a, b) (0x7c000039 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_RAW_OR(d, a, b) (0x7c000378 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_RAW_MR(d, a) PPC_RAW_OR(d, a, a)
+#define PPC_RAW_ORI(d, a, i) (0x60000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_ORIS(d, a, i) (0x64000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_NOR(d, a, b) (0x7c0000f8 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_RAW_XOR(d, a, b) (0x7c000278 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_RAW_XORI(d, a, i) (0x68000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_XORIS(d, a, i) (0x6c000000 | ___PPC_RA(d) | ___PPC_RS(a) | IMM_L(i))
+#define PPC_RAW_EXTSW(d, a) (0x7c0007b4 | ___PPC_RA(d) | ___PPC_RS(a))
+#define PPC_RAW_SLW(d, a, s) (0x7c000030 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SLD(d, a, s) (0x7c000036 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SRW(d, a, s) (0x7c000430 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SRAW(d, a, s) (0x7c000630 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SRAWI(d, a, i) (0x7c000670 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i))
+#define PPC_RAW_SRD(d, a, s) (0x7c000436 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SRAD(d, a, s) (0x7c000634 | ___PPC_RA(d) | ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_RAW_SRADI(d, a, i) (0x7c000674 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i))
+#define PPC_RAW_RLWINM(d, a, i, mb, me) (0x54000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RAW_RLWINM_DOT(d, a, i, mb, me) \
+ (0x54000001 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RAW_RLWIMI(d, a, i, mb, me) (0x50000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH(i) | __PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RAW_RLDICL(d, a, i, mb) (0x78000000 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_MB64(mb))
+#define PPC_RAW_RLDICR(d, a, i, me) (0x78000004 | ___PPC_RA(d) | ___PPC_RS(a) | __PPC_SH64(i) | __PPC_ME64(me))
+
+/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
+#define PPC_RAW_SLWI(d, a, i) PPC_RAW_RLWINM(d, a, i, 0, 31-(i))
+/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
+#define PPC_RAW_SRWI(d, a, i) PPC_RAW_RLWINM(d, a, 32-(i), i, 31)
+/* sldi = rldicr Rx, Ry, n, 63-n */
+#define PPC_RAW_SLDI(d, a, i) PPC_RAW_RLDICR(d, a, i, 63-(i))
+/* sldi = rldicl Rx, Ry, 64-n, n */
+#define PPC_RAW_SRDI(d, a, i) PPC_RAW_RLDICL(d, a, 64-(i), i)
+
+#define PPC_RAW_NEG(d, a) (0x7c0000d0 | ___PPC_RT(d) | ___PPC_RA(a))
+
+#define PPC_RAW_MFSPR(d, spr) (0x7c0002a6 | ___PPC_RT(d) | __PPC_SPR(spr))
+#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
+#define PPC_RAW_EIEIO() (0x7c0006ac)
+
+#define PPC_RAW_BRANCH(offset) (0x48000000 | PPC_LI(offset))
+#define PPC_RAW_BL(offset) (0x48000001 | PPC_LI(offset))
+#define PPC_RAW_TW(t0, a, b) (0x7c000008 | ___PPC_RS(t0) | ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_RAW_TRAP() PPC_RAW_TW(31, 0, 0)
+#define PPC_RAW_SETB(t, bfa) (0x7c000100 | ___PPC_RT(t) | ___PPC_RA((bfa) << 2))
/* Deal with instructions that older assemblers aren't aware of */
-#define PPC_CP_ABORT stringify_in_c(.long PPC_INST_CP_ABORT)
-#define PPC_COPY(a, b) stringify_in_c(.long PPC_INST_COPY | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_DARN(t, l) stringify_in_c(.long PPC_INST_DARN | \
- ___PPC_RT(t) | \
- (((l) & 0x3) << 16))
-#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_INST_DCBAL | \
- __PPC_RA(a) | __PPC_RB(b))
-#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
- __PPC_RA(a) | __PPC_RB(b))
-#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LQARX | \
- ___PPC_RT(t) | ___PPC_RA(a) | \
- ___PPC_RB(b) | __PPC_EH(eh))
-#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
- ___PPC_RT(t) | ___PPC_RA(a) | \
- ___PPC_RB(b) | __PPC_EH(eh))
-#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
- ___PPC_RT(t) | ___PPC_RA(a) | \
- ___PPC_RB(b) | __PPC_EH(eh))
-#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_INST_STQCX | \
- ___PPC_RT(t) | ___PPC_RA(a) | \
- ___PPC_RB(b))
-#define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_INST_MADDHD | \
- ___PPC_RT(t) | ___PPC_RA(a) | \
- ___PPC_RB(b) | ___PPC_RC(c))
-#define PPC_MADDHDU(t, a, b, c) stringify_in_c(.long PPC_INST_MADDHDU | \
- ___PPC_RT(t) | ___PPC_RA(a) | \
- ___PPC_RB(b) | ___PPC_RC(c))
-#define PPC_MADDLD(t, a, b, c) stringify_in_c(.long PPC_INST_MADDLD | \
- ___PPC_RT(t) | ___PPC_RA(a) | \
- ___PPC_RB(b) | ___PPC_RC(c))
-#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
- ___PPC_RB(b))
-#define PPC_MSGSYNC stringify_in_c(.long PPC_INST_MSGSYNC)
-#define PPC_MSGCLR(b) stringify_in_c(.long PPC_INST_MSGCLR | \
- ___PPC_RB(b))
-#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_INST_MSGSNDP | \
- ___PPC_RB(b))
-#define PPC_MSGCLRP(b) stringify_in_c(.long PPC_INST_MSGCLRP | \
- ___PPC_RB(b))
-#define PPC_PASTE(a, b) stringify_in_c(.long PPC_INST_PASTE | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
- __PPC_RA(a) | __PPC_RS(s))
-#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
- __PPC_RA(a) | __PPC_RS(s))
-#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_INST_POPCNTW | \
- __PPC_RA(a) | __PPC_RS(s))
-#define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI)
-#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI)
-#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI)
-#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \
- __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
+#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
+#define PPC_CP_ABORT stringify_in_c(.long PPC_RAW_CP_ABORT)
+#define PPC_COPY(a, b) stringify_in_c(.long PPC_RAW_COPY(a, b))
+#define PPC_DARN(t, l) stringify_in_c(.long PPC_RAW_DARN(t, l))
+#define PPC_DCBAL(a, b) stringify_in_c(.long PPC_RAW_DCBAL(a, b))
+#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b))
+#define PPC_DIVDE(t, a, b) stringify_in_c(.long PPC_RAW_DIVDE(t, a, b))
+#define PPC_DIVDEU(t, a, b) stringify_in_c(.long PPC_RAW_DIVDEU(t, a, b))
+#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL)
+#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh))
+#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_RAW_STQCX(t, a, b))
+#define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHD(t, a, b, c))
+#define PPC_MADDHDU(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHDU(t, a, b, c))
+#define PPC_MADDLD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDLD(t, a, b, c))
+#define PPC_MSGSND(b) stringify_in_c(.long PPC_RAW_MSGSND(b))
+#define PPC_MSGSYNC stringify_in_c(.long PPC_RAW_MSGSYNC)
+#define PPC_MSGCLR(b) stringify_in_c(.long PPC_RAW_MSGCLR(b))
+#define PPC_MSGSNDP(b) stringify_in_c(.long PPC_RAW_MSGSNDP(b))
+#define PPC_MSGCLRP(b) stringify_in_c(.long PPC_RAW_MSGCLRP(b))
+#define PPC_PASTE(a, b) stringify_in_c(.long PPC_RAW_PASTE(a, b))
+#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_RAW_POPCNTB(a, s))
+#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_RAW_POPCNTD(a, s))
+#define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_RAW_POPCNTW(a, s))
+#define PPC_RFCI stringify_in_c(.long PPC_RAW_RFCI)
+#define PPC_RFDI stringify_in_c(.long PPC_RAW_RFDI)
+#define PPC_RFMCI stringify_in_c(.long PPC_RAW_RFMCI)
+#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_RAW_TLBILX(t, a, b))
#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
-#define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \
- __PPC_WC(w))
-#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \
- ___PPC_RB(a) | ___PPC_RS(lp))
-#define PPC_TLBIE_5(rb,rs,ric,prs,r) \
- stringify_in_c(.long PPC_INST_TLBIE | \
- ___PPC_RB(rb) | ___PPC_RS(rs) | \
- ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
- ___PPC_R(r))
+#define PPC_WAIT_v203 stringify_in_c(.long PPC_RAW_WAIT_v203)
+#define PPC_WAIT(w, p) stringify_in_c(.long PPC_RAW_WAIT(w, p))
+#define PPC_TLBIE(lp, a) stringify_in_c(.long PPC_RAW_TLBIE(lp, a))
+#define PPC_TLBIE_5(rb, rs, ric, prs, r) \
+ stringify_in_c(.long PPC_RAW_TLBIE_5(rb, rs, ric, prs, r))
#define PPC_TLBIEL(rb,rs,ric,prs,r) \
- stringify_in_c(.long PPC_INST_TLBIEL | \
- ___PPC_RB(rb) | ___PPC_RS(rs) | \
- ___PPC_RIC(ric) | ___PPC_PRS(prs) | \
- ___PPC_R(r))
-#define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \
- __PPC_RA0(a) | __PPC_RB(b))
-#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \
- __PPC_RA0(a) | __PPC_RB(b))
-
-#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \
- __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
-#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \
- __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
-#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \
- __PPC_T_TLB(t) | __PPC_RA0(a) | \
- __PPC_RB(b))
-#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \
- __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b))
-#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \
- __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
-#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
- __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
-#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
- __PPC_RT(t) | __PPC_RB(b))
-#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
- ___PPC_RT(t) | ___PPC_RB(b))
-#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
- __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
+ stringify_in_c(.long PPC_RAW_TLBIEL(rb, rs, ric, prs, r))
+#define PPC_TLBIEL_v205(rb, l) stringify_in_c(.long PPC_RAW_TLBIEL_v205(rb, l))
+#define PPC_TLBSRX_DOT(a, b) stringify_in_c(.long PPC_RAW_TLBSRX_DOT(a, b))
+#define PPC_TLBIVAX(a, b) stringify_in_c(.long PPC_RAW_TLBIVAX(a, b))
+
+#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_RAW_ERATWE(s, a, w))
+#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_RAW_ERATRE(a, a, w))
+#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_RAW_ERATILX(t, a, b))
+#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_RAW_ERATIVAX(s, a, b))
+#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_RAW_ERATSX(t, a, w))
+#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_RAW_ERATSX_DOT(t, a, w))
+#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_RAW_SLBFEE_DOT(t, b))
+#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long __PPC_RAW_SLBFEE_DOT(t, b))
+#define PPC_ICBT(c, a, b) stringify_in_c(.long PPC_RAW_ICBT(c, a, b))
/* PASemi instructions */
-#define LBZCIX(t,a,b) stringify_in_c(.long PPC_INST_LBZCIX | \
- __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b))
-#define STBCIX(s,a,b) stringify_in_c(.long PPC_INST_STBCIX | \
- __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
-
-/*
- * Define what the VSX XX1 form instructions will look like, then add
- * the 128 bit load store instructions based on that.
- */
-#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b))
-#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
-#define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \
- VSX_XX1((s), a, b))
-#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \
- VSX_XX1((s), a, b))
-#define MFVRD(a, t) stringify_in_c(.long PPC_INST_MFVSRD | \
- VSX_XX1((t)+32, a, R0))
-#define MTVRD(t, a) stringify_in_c(.long PPC_INST_MTVSRD | \
- VSX_XX1((t)+32, a, R0))
-#define VPMSUMW(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMW | \
- VSX_XX3((t), a, b))
-#define VPMSUMD(t, a, b) stringify_in_c(.long PPC_INST_VPMSUMD | \
- VSX_XX3((t), a, b))
-#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
- VSX_XX3((t), a, b))
-#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \
- VSX_XX3((t), a, a))
-#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \
- VSX_XX3((t), (a), (b))))
+#define LBZCIX(t, a, b) stringify_in_c(.long PPC_RAW_LBZCIX(t, a, b))
+#define STBCIX(s, a, b) stringify_in_c(.long PPC_RAW_STBCIX(s, a, b))
+#define PPC_DCBFPS(a, b) stringify_in_c(.long PPC_RAW_DCBFPS(a, b))
+#define PPC_DCBSTPS(a, b) stringify_in_c(.long PPC_RAW_DCBSTPS(a, b))
+#define PPC_PHWSYNC stringify_in_c(.long PPC_RAW_PHWSYNC)
+#define PPC_PLWSYNC stringify_in_c(.long PPC_RAW_PLWSYNC)
+#define STXVD2X(s, a, b) stringify_in_c(.long PPC_RAW_STXVD2X(s, a, b))
+#define LXVD2X(s, a, b) stringify_in_c(.long PPC_RAW_LXVD2X(s, a, b))
+#define MFVRD(a, t) stringify_in_c(.long PPC_RAW_MFVRD(a, t))
+#define MTVRD(t, a) stringify_in_c(.long PPC_RAW_MTVRD(t, a))
+#define VPMSUMW(t, a, b) stringify_in_c(.long PPC_RAW_VPMSUMW(t, a, b))
+#define VPMSUMD(t, a, b) stringify_in_c(.long PPC_RAW_VPMSUMD(t, a, b))
+#define XXLOR(t, a, b) stringify_in_c(.long PPC_RAW_XXLOR(t, a, b))
+#define XXSWAPD(t, a) stringify_in_c(.long PPC_RAW_XXSWAPD(t, a))
+#define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_RAW_XVCPSGNDP(t, a, b)))
#define VPERMXOR(vrt, vra, vrb, vrc) \
- stringify_in_c(.long (PPC_INST_VPERMXOR | \
- ___PPC_RT(vrt) | ___PPC_RA(vra) | \
- ___PPC_RB(vrb) | (((vrc) & 0x1f) << 6)))
+ stringify_in_c(.long (PPC_RAW_VPERMXOR(vrt, vra, vrb, vrc)))
-#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
-#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
-#define PPC_WINKLE stringify_in_c(.long PPC_INST_WINKLE)
+#define PPC_NAP stringify_in_c(.long PPC_RAW_NAP)
+#define PPC_SLEEP stringify_in_c(.long PPC_RAW_SLEEP)
+#define PPC_WINKLE stringify_in_c(.long PPC_RAW_WINKLE)
-#define PPC_STOP stringify_in_c(.long PPC_INST_STOP)
+#define PPC_STOP stringify_in_c(.long PPC_RAW_STOP)
/* BHRB instructions */
-#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB)
-#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \
- __PPC_RT(r) | \
- (((n) & 0x3ff) << 11))
+#define PPC_CLRBHRB stringify_in_c(.long PPC_RAW_CLRBHRB)
+#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_RAW_MFBHRBE(r, n))
/* Transactional memory instructions */
-#define TRECHKPT stringify_in_c(.long PPC_INST_TRECHKPT)
-#define TRECLAIM(r) stringify_in_c(.long PPC_INST_TRECLAIM \
- | __PPC_RA(r))
-#define TABORT(r) stringify_in_c(.long PPC_INST_TABORT \
- | __PPC_RA(r))
+#define TRECHKPT stringify_in_c(.long PPC_RAW_TRECHKPT)
+#define TRECLAIM(r) stringify_in_c(.long PPC_RAW_TRECLAIM(r))
+#define TABORT(r) stringify_in_c(.long PPC_RAW_TABORT(r))
/* book3e thread control instructions */
-#define TMRN(x) ((((x) & 0x1f) << 16) | (((x) & 0x3e0) << 6))
-#define MTTMR(tmr, r) stringify_in_c(.long PPC_INST_MTTMR | \
- TMRN(tmr) | ___PPC_RS(r))
-#define MFTMR(tmr, r) stringify_in_c(.long PPC_INST_MFTMR | \
- TMRN(tmr) | ___PPC_RT(r))
+#define MTTMR(tmr, r) stringify_in_c(.long PPC_RAW_MTTMR(tmr, r))
+#define MFTMR(tmr, r) stringify_in_c(.long PPC_RAW_MFTMR(tmr, r))
/* Coprocessor instructions */
-#define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_INST_ICSWX | \
- ___PPC_RS(s) | \
- ___PPC_RA(a) | \
- ___PPC_RB(b))
-#define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_INST_ICSWEPX | \
- ___PPC_RS(s) | \
- ___PPC_RA(a) | \
- ___PPC_RB(b))
-
-#define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \
- ((IH & 0x7) << 21))
+#define PPC_ICSWX(s, a, b) stringify_in_c(.long PPC_RAW_ICSWX(s, a, b))
+#define PPC_ICSWEPX(s, a, b) stringify_in_c(.long PPC_RAW_ICSWEPX(s, a, b))
+
+#define PPC_SLBIA(IH) stringify_in_c(.long PPC_RAW_SLBIA(IH))
/*
* These may only be used on ISA v3.0 or later (aka. CPU_FTR_ARCH_300, radix
@@ -608,12 +683,8 @@
#define PPC_RADIX_INVALIDATE_ERAT_USER PPC_SLBIA(3)
#define PPC_RADIX_INVALIDATE_ERAT_GUEST PPC_SLBIA(6)
-#define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUD | \
- ___PPC_RT(vrt) | ___PPC_RA(vra) | \
- ___PPC_RB(vrb) | __PPC_RC21)
+#define VCMPEQUD_RC(vrt, vra, vrb) stringify_in_c(.long PPC_RAW_VCMPEQUD_RC(vrt, vra, vrb))
-#define VCMPEQUB_RC(vrt, vra, vrb) stringify_in_c(.long PPC_INST_VCMPEQUB | \
- ___PPC_RT(vrt) | ___PPC_RA(vra) | \
- ___PPC_RB(vrb) | __PPC_RC21)
+#define VCMPEQUB_RC(vrt, vra, vrb) stringify_in_c(.long PPC_RAW_VCMPEQUB_RC(vrt, vra, vrb))
#endif /* _ASM_POWERPC_PPC_OPCODE_H */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 7f4be5a05eb3..f6cf0159024e 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -13,10 +13,6 @@
extern unsigned long isa_io_base;
-extern void pci_setup_phb_io(struct pci_controller *hose, int primary);
-extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
-
-
extern struct list_head hose_list;
extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
@@ -32,9 +28,6 @@ struct pci_dn;
void *pci_traverse_device_nodes(struct device_node *start,
void *(*fn)(struct device_node *, void *),
void *data);
-void *traverse_pci_dn(struct pci_dn *root,
- void *(*fn)(struct pci_dn *, void *),
- void *data);
extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
/* From rtas_pci.h */
@@ -62,11 +55,6 @@ void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
void eeh_sysfs_add_device(struct pci_dev *pdev);
void eeh_sysfs_remove_device(struct pci_dev *pdev);
-static inline const char *eeh_driver_name(struct pci_dev *pdev)
-{
- return (pdev && pdev->driver) ? pdev->driver->name : "<null>";
-}
-
#endif /* CONFIG_EEH */
#define PCI_BUSNO(bdfn) ((bdfn >> 8) & 0xff)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 6b03dff61a05..753a2757bcd4 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -10,89 +10,69 @@
#include <asm/ppc-opcode.h>
#include <asm/firmware.h>
#include <asm/feature-fixups.h>
+#include <asm/extable.h>
#ifdef __ASSEMBLY__
#define SZL (BITS_PER_LONG/8)
/*
- * Stuff for accurate CPU time accounting.
- * These macros handle transitions between user and system state
- * in exception entry and exit and accumulate time to the
- * user_time and system_time fields in the paca.
+ * This expands to a sequence of operations with reg incrementing from
+ * start to end inclusive, of this form:
+ *
+ * op reg, (offset + (width * reg))(base)
+ *
+ * Note that offset is not the offset of the first operation unless start
+ * is zero (or width is zero).
*/
+.macro OP_REGS op, width, start, end, base, offset
+ .Lreg=\start
+ .rept (\end - \start + 1)
+ \op .Lreg, \offset + \width * .Lreg(\base)
+ .Lreg=.Lreg+1
+ .endr
+.endm
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
-#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
-#define ACCOUNT_STOLEN_TIME
-#else
-#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \
- MFTB(ra); /* get timebase */ \
- PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \
- PPC_STL ra, ACCOUNT_STARTTIME(ptr); \
- subf rb,rb,ra; /* subtract start value */ \
- PPC_LL ra, ACCOUNT_USER_TIME(ptr); \
- add ra,ra,rb; /* add on to user time */ \
- PPC_STL ra, ACCOUNT_USER_TIME(ptr); \
-
-#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \
- MFTB(ra); /* get timebase */ \
- PPC_LL rb, ACCOUNT_STARTTIME(ptr); \
- PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \
- subf rb,rb,ra; /* subtract start value */ \
- PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \
- add ra,ra,rb; /* add on to system time */ \
- PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr)
-
-#ifdef CONFIG_PPC_SPLPAR
-#define ACCOUNT_STOLEN_TIME \
-BEGIN_FW_FTR_SECTION; \
- beq 33f; \
- /* from user - see if there are any DTL entries to process */ \
- ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
- ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
- addi r10,r10,LPPACA_DTLIDX; \
- LDX_BE r10,0,r10; /* get log write index */ \
- cmpd cr1,r11,r10; \
- beq+ cr1,33f; \
- bl accumulate_stolen_time; \
- ld r12,_MSR(r1); \
- andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \
-33: \
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
-
-#else /* CONFIG_PPC_SPLPAR */
-#define ACCOUNT_STOLEN_TIME
-
-#endif /* CONFIG_PPC_SPLPAR */
-
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+/*
+ * This expands to a sequence of register clears for regs start to end
+ * inclusive, of the form:
+ *
+ * li rN, 0
+ */
+.macro ZEROIZE_REGS start, end
+ .Lreg=\start
+ .rept (\end - \start + 1)
+ li .Lreg, 0
+ .Lreg=.Lreg+1
+ .endr
+.endm
/*
* Macros for storing registers into and loading registers from
* exception frames.
*/
#ifdef __powerpc64__
-#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
-#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
-#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
-#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
+#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, GPR0
+#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, GPR0
+#define SAVE_NVGPRS(base) SAVE_GPRS(14, 31, base)
+#define REST_NVGPRS(base) REST_GPRS(14, 31, base)
#else
-#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
-#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
-#define SAVE_NVGPRS(base) stmw 13, GPR0+4*13(base)
-#define REST_NVGPRS(base) lmw 13, GPR0+4*13(base)
+#define SAVE_GPRS(start, end, base) OP_REGS stw, 4, start, end, base, GPR0
+#define REST_GPRS(start, end, base) OP_REGS lwz, 4, start, end, base, GPR0
+#define SAVE_NVGPRS(base) SAVE_GPRS(13, 31, base)
+#define REST_NVGPRS(base) REST_GPRS(13, 31, base)
#endif
-#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
-#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
-#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
-#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
-#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
-#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
-#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
-#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+#define ZEROIZE_GPRS(start, end) ZEROIZE_REGS start, end
+#ifdef __powerpc64__
+#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(14, 31)
+#else
+#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(13, 31)
+#endif
+#define ZEROIZE_GPR(n) ZEROIZE_GPRS(n, n)
+
+#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base)
+#define REST_GPR(n, base) REST_GPRS(n, n, base)
#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
@@ -180,13 +160,18 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define VCPU_GPR(n) __VCPU_GPR(__REG_##n)
#ifdef __KERNEL__
-#ifdef CONFIG_PPC64
+
+/*
+ * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
+ * version below in the else case of the ifdef.
+ */
+#ifdef __powerpc64__
#define STACKFRAMESIZE 256
#define __STK_REG(i) (112 + ((i)-14)*8)
#define STK_REG(i) __STK_REG(__REG_##i)
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define STK_GOT 24
#define __STK_PARAM(i) (32 + ((i)-3)*8)
#else
@@ -195,7 +180,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#endif
#define STK_PARAM(i) __STK_PARAM(__REG_##i)
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define _GLOBAL(name) \
.align 2 ; \
@@ -240,17 +225,14 @@ GLUE(.,name):
#else /* 32-bit */
-#define _ENTRY(n) \
- .globl n; \
-n:
-
#define _GLOBAL(n) \
- .stabs __stringify(n:F-1),N_FUN,0,0,n;\
.globl n; \
n:
#define _GLOBAL_TOC(name) _GLOBAL(name)
+#define DOTSYM(a) a
+
#endif
/*
@@ -306,7 +288,7 @@ n:
/* Be careful, this will clobber the lr register. */
#define LOAD_REG_ADDR_PIC(reg, name) \
- bl 0f; \
+ bcl 20,31,$+4; \
0: mflr reg; \
addis reg,reg,(name - 0b)@ha; \
addi reg,reg,(name - 0b)@l;
@@ -345,6 +327,12 @@ n:
#ifdef __powerpc64__
+#define __LOAD_PACA_TOC(reg) \
+ ld reg,PACATOC(r13)
+
+#define LOAD_PACA_TOC() \
+ __LOAD_PACA_TOC(r2)
+
#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr
#define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr) \
@@ -355,7 +343,19 @@ n:
rldimi reg, tmp, 32, 0
#define LOAD_REG_ADDR(reg,name) \
- ld reg,name@got(r2)
+ addis reg,r2,name@toc@ha; \
+ addi reg,reg,name@toc@l
+
+#ifdef CONFIG_PPC_BOOK3E_64
+/*
+ * This is used in register-constrained interrupt handlers. Not to be used
+ * by BOOK3S. ld complains with "got/toc optimization is not supported" if r2
+ * is not used for the TOC offset, so use @got(tocreg). If the interrupt
+ * handlers saved r2 instead, LOAD_REG_ADDR could be used.
+ */
+#define LOAD_REG_ADDR_ALTTOC(reg,tocreg,name) \
+ ld reg,name@got(tocreg)
+#endif
#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
#define ADDROFF(name) 0
@@ -382,17 +382,7 @@ n:
#endif
/* various errata or part fixups */
-#ifdef CONFIG_PPC601_SYNC_FIX
-#define SYNC sync; isync
-#define SYNC_601 sync
-#define ISYNC_601 isync
-#else
-#define SYNC
-#define SYNC_601
-#define ISYNC_601
-#endif
-
-#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500)
#define MFTB(dest) \
90: mfspr dest, SPRN_TBRL; \
BEGIN_FTR_SECTION_NESTED(96); \
@@ -411,8 +401,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
#define MFTBU(dest) mfspr dest, SPRN_TBRU
#endif
-/* tlbsync is not implemented on 601 */
-#if !defined(CONFIG_SMP) || defined(CONFIG_PPC_BOOK3S_601)
+#ifndef CONFIG_SMP
#define TLBSYNC
#else
#define TLBSYNC tlbsync; sync
@@ -506,15 +495,9 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
#endif
#ifdef CONFIG_PPC_BOOK3S_64
-#define RFI rfid
#define MTMSRD(r) mtmsrd r
#define MTMSR_EERI(reg) mtmsrd reg,1
#else
-#ifndef CONFIG_40x
-#define RFI rfi
-#else
-#define RFI rfi; b . /* Prevent prefetch past rfi */
-#endif
#define MTMSRD(r) mtmsr r
#define MTMSR_EERI(reg) mtmsr reg
#endif
@@ -749,11 +732,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
#define evr30 30
#define evr31 31
-/* some stab codes */
-#define N_FUN 36
-#define N_RSYM 64
-#define N_SLINE 68
-#define N_SO 100
+#define RFSCV .long 0x4c0000a4
/*
* Create an endian fixup trampoline
@@ -770,11 +749,11 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
* kernel is built for.
*/
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
#define FIXUP_ENDIAN
#else
/*
- * This version may be used in in HV or non-HV context.
+ * This version may be used in HV or non-HV context.
* MSR[EE] must be disabled.
*/
#define FIXUP_ENDIAN \
@@ -810,21 +789,26 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
.long 0x2402004c; /* hrfid */ \
191:
-#endif /* !CONFIG_PPC_BOOK3E */
+#endif /* !CONFIG_PPC_BOOK3E_64 */
#endif /* __ASSEMBLY__ */
-/*
- * Helper macro for exception table entries
- */
-#define EX_TABLE(_fault, _target) \
- stringify_in_c(.section __ex_table,"a";)\
- stringify_in_c(.balign 4;) \
- stringify_in_c(.long (_fault) - . ;) \
- stringify_in_c(.long (_target) - . ;) \
+#define SOFT_MASK_TABLE(_start, _end) \
+ stringify_in_c(.section __soft_mask_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.previous)
+
+#define RESTART_TABLE(_start, _end, _target) \
+ stringify_in_c(.section __restart_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.llong (_target);) \
stringify_in_c(.previous)
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
#define BTB_FLUSH(reg) \
lis reg,BUCSR_INIT@h; \
ori reg,reg,BUCSR_INIT@l; \
@@ -832,6 +816,6 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
isync;
#else
#define BTB_FLUSH(reg)
-#endif /* CONFIG_PPC_FSL_BOOK3E */
+#endif /* CONFIG_PPC_E500 */
#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/arch/powerpc/include/asm/probes.h b/arch/powerpc/include/asm/probes.h
index 84dd1addd434..e77a2ed7d938 100644
--- a/arch/powerpc/include/asm/probes.h
+++ b/arch/powerpc/include/asm/probes.h
@@ -8,9 +8,10 @@
* Copyright IBM Corporation, 2012
*/
#include <linux/types.h>
+#include <asm/disassemble.h>
+#include <asm/ppc-opcode.h>
-typedef u32 ppc_opcode_t;
-#define BREAKPOINT_INSTRUCTION 0x7fe00008 /* trap */
+#define BREAKPOINT_INSTRUCTION PPC_RAW_TRAP() /* trap */
/* Trap definitions per ISA */
#define IS_TW(instr) (((instr) & 0xfc0007fe) == 0x7c000008)
@@ -31,17 +32,52 @@ typedef u32 ppc_opcode_t;
#define MSR_SINGLESTEP (MSR_SE)
#endif
+static inline bool can_single_step(u32 inst)
+{
+ switch (get_op(inst)) {
+ case OP_TRAP_64: return false;
+ case OP_TRAP: return false;
+ case OP_SC: return false;
+ case OP_19:
+ switch (get_xop(inst)) {
+ case OP_19_XOP_RFID: return false;
+ case OP_19_XOP_RFMCI: return false;
+ case OP_19_XOP_RFDI: return false;
+ case OP_19_XOP_RFI: return false;
+ case OP_19_XOP_RFCI: return false;
+ case OP_19_XOP_RFSCV: return false;
+ case OP_19_XOP_HRFID: return false;
+ case OP_19_XOP_URFID: return false;
+ case OP_19_XOP_STOP: return false;
+ case OP_19_XOP_DOZE: return false;
+ case OP_19_XOP_NAP: return false;
+ case OP_19_XOP_SLEEP: return false;
+ case OP_19_XOP_RVWINKLE: return false;
+ }
+ break;
+ case OP_31:
+ switch (get_xop(inst)) {
+ case OP_31_XOP_TRAP: return false;
+ case OP_31_XOP_TRAP_64: return false;
+ case OP_31_XOP_MTMSR: return false;
+ case OP_31_XOP_MTMSRD: return false;
+ }
+ break;
+ }
+ return true;
+}
+
/* Enable single stepping for the current task */
static inline void enable_single_step(struct pt_regs *regs)
{
- regs->msr |= MSR_SINGLESTEP;
+ regs_set_return_msr(regs, regs->msr | MSR_SINGLESTEP);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
* We turn off Critical Input Exception(CE) to ensure that the single
* step will be for the instruction we have the probe on; if we don't,
* it is possible we'd get the single step reported for CE.
*/
- regs->msr &= ~MSR_CE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_CE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
#ifdef CONFIG_PPC_47x
isync();
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index eedcbfb9a6ff..631802999d59 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -6,6 +6,8 @@
* Copyright (C) 2001 PPC 64 Team, IBM Corp
*/
+#include <vdso/processor.h>
+
#include <asm/reg.h>
#ifdef CONFIG_VSX
@@ -63,14 +65,6 @@ extern int _chrp_type;
#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
-/* Macros for adjusting thread priority (hardware multi-threading) */
-#define HMT_very_low() asm volatile("or 31,31,31 # very low priority")
-#define HMT_low() asm volatile("or 1,1,1 # low priority")
-#define HMT_medium_low() asm volatile("or 6,6,6 # medium low priority")
-#define HMT_medium() asm volatile("or 2,2,2 # medium priority")
-#define HMT_medium_high() asm volatile("or 5,5,5 # medium high priority")
-#define HMT_high() asm volatile("or 3,3,3 # high priority")
-
#ifdef __KERNEL__
#ifdef CONFIG_PPC64
@@ -81,11 +75,6 @@ extern int _chrp_type;
struct task_struct;
void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
-void release_thread(struct task_struct *);
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
#define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET]
@@ -148,22 +137,18 @@ struct thread_struct {
unsigned long ksp_vsid;
#endif
struct pt_regs *regs; /* Pointer to saved register state */
- mm_segment_t addr_limit; /* for get_fs() validation */
#ifdef CONFIG_BOOKE
/* BookE base exception scratch space; align on cacheline */
unsigned long normsave[8] ____cacheline_aligned;
#endif
#ifdef CONFIG_PPC32
void *pgdir; /* root of page-table tree */
- unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */
#ifdef CONFIG_PPC_RTAS
unsigned long rtas_sp; /* stack pointer for when in RTAS */
#endif
-#endif
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
unsigned long kuap; /* opened segments for user access */
#endif
-#ifdef CONFIG_VMAP_STACK
unsigned long srr0;
unsigned long srr1;
unsigned long dar;
@@ -171,23 +156,29 @@ struct thread_struct {
#ifdef CONFIG_PPC_BOOK3S_32
unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
unsigned long lr, ctr;
+ unsigned long sr0;
#endif
+#endif /* CONFIG_PPC32 */
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ unsigned long pid; /* value written in PID reg. at interrupt exit */
#endif
/* Debug Registers */
struct debug_reg debug;
+#ifdef CONFIG_PPC_FPU_REGS
struct thread_fp_state fp_state;
struct thread_fp_state *fp_save_area;
+#endif
int fpexc_mode; /* floating-point exception mode */
unsigned int align_ctl; /* alignment handling control */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- struct perf_event *ptrace_bps[HBP_NUM];
+ struct perf_event *ptrace_bps[HBP_NUM_MAX];
/*
* Helps identify source of single-step exception and subsequent
* hw-breakpoint enablement
*/
- struct perf_event *last_hit_ubp;
+ struct perf_event *last_hit_ubp[HBP_NUM_MAX];
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
- struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
+ struct arch_hw_breakpoint hw_brk[HBP_NUM_MAX]; /* hardware breakpoint info */
unsigned long trap_nr; /* last trap # on this thread */
u8 load_slb; /* Ages out SLB preload cache entries */
u8 load_fp;
@@ -203,8 +194,10 @@ struct thread_struct {
int used_vsr; /* set if process has used VSX */
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
- unsigned long evr[32]; /* upper 32-bits of SPE regs */
- u64 acc; /* Accumulator */
+ struct_group(spe,
+ unsigned long evr[32]; /* upper 32-bits of SPE regs */
+ u64 acc; /* Accumulator */
+ );
unsigned long spefscr; /* SPE & eFP status */
unsigned long spefscr_last; /* SPEFSCR value on last prctl
call or trap return */
@@ -220,6 +213,7 @@ struct thread_struct {
unsigned long tm_tar;
unsigned long tm_ppr;
unsigned long tm_dscr;
+ unsigned long tm_amr;
/*
* Checkpointed FP and VSX 0-31 register set.
@@ -234,11 +228,6 @@ struct thread_struct {
struct thread_vr_state ckvr_state; /* Checkpointed VR state */
unsigned long ckvrsave; /* Checkpointed VRSAVE */
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-#ifdef CONFIG_PPC_MEM_KEYS
- unsigned long amr;
- unsigned long iamr;
- unsigned long uamor;
-#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
void* kvm_shadow_vcpu; /* KVM internal data */
#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
@@ -272,7 +261,10 @@ struct thread_struct {
unsigned mmcr0;
unsigned used_ebb;
- unsigned int used_vas;
+ unsigned long mmcr3;
+ unsigned long sier2;
+ unsigned long sier3;
+
#endif
};
@@ -289,28 +281,39 @@ struct thread_struct {
#define SPEFSCR_INIT
#endif
-#ifdef CONFIG_PPC32
+#ifdef CONFIG_PPC_BOOK3S_32
+#define SR0_INIT .sr0 = IS_ENABLED(CONFIG_PPC_KUEP) ? SR_NX : 0,
+#else
+#define SR0_INIT
+#endif
+
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+#define INIT_THREAD { \
+ .ksp = INIT_SP, \
+ .pgdir = swapper_pg_dir, \
+ .kuap = ~0UL, /* KUAP_NONE */ \
+ .fpexc_mode = MSR_FE0 | MSR_FE1, \
+ SPEFSCR_INIT \
+ SR0_INIT \
+}
+#elif defined(CONFIG_PPC32)
#define INIT_THREAD { \
.ksp = INIT_SP, \
- .ksp_limit = INIT_SP_LIMIT, \
- .addr_limit = KERNEL_DS, \
.pgdir = swapper_pg_dir, \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
SPEFSCR_INIT \
+ SR0_INIT \
}
#else
#define INIT_THREAD { \
.ksp = INIT_SP, \
- .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
- .addr_limit = KERNEL_DS, \
.fpexc_mode = 0, \
- .fscr = FSCR_TAR | FSCR_EBB \
}
#endif
-#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.regs)
+#define task_pt_regs(tsk) ((tsk)->thread.regs)
-unsigned long get_wchan(struct task_struct *p);
+unsigned long __get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0)
#define KSTK_ESP(tsk) ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0)
@@ -350,27 +353,25 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
}
#ifdef CONFIG_PPC64
-#define cpu_relax() do { HMT_low(); HMT_medium(); barrier(); } while (0)
-
-#define spin_begin() HMT_low()
-#define spin_cpu_relax() barrier()
+#define spin_begin() \
+ asm volatile(ASM_FTR_IFCLR( \
+ "or 1,1,1", /* HMT_LOW */ \
+ "nop", /* v3.1 uses pause_short in cpu_relax instead */ \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
-#define spin_end() HMT_medium()
+#define spin_cpu_relax() \
+ asm volatile(ASM_FTR_IFCLR( \
+ "nop", /* Before v3.1 use priority nops in spin_begin/end */ \
+ PPC_WAIT(2, 0), /* aka pause_short */ \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
-#define spin_until_cond(cond) \
-do { \
- if (unlikely(!(cond))) { \
- spin_begin(); \
- do { \
- spin_cpu_relax(); \
- } while (!(cond)); \
- spin_end(); \
- } \
-} while (0)
+#define spin_end() \
+ asm volatile(ASM_FTR_IFCLR( \
+ "or 2,2,2", /* HMT_MEDIUM */ \
+ "nop", \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
-#else
-#define cpu_relax() barrier()
#endif
/* Check that a certain kernel stack pointer is valid in task_struct p */
@@ -402,28 +403,13 @@ static inline void prefetchw(const void *x)
#define spin_lock_prefetch(x) prefetchw(x)
-#define HAVE_ARCH_PICK_MMAP_LAYOUT
-
-#ifdef CONFIG_PPC64
-static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
-{
- if (is_32)
- return sp & 0x0ffffffffUL;
- return sp;
-}
-#else
-static inline unsigned long get_clean_sp(unsigned long sp, int is_32)
-{
- return sp;
-}
-#endif
-
/* asm stubs */
extern unsigned long isa300_idle_stop_noloss(unsigned long psscr_val);
extern unsigned long isa300_idle_stop_mayloss(unsigned long psscr_val);
extern unsigned long isa206_idle_insn_mayloss(unsigned long type);
#ifdef CONFIG_PPC_970_NAP
extern void power4_idle_nap(void);
+void power4_idle_nap_return(void);
#endif
extern unsigned long cpuidle_disable;
@@ -432,16 +418,12 @@ enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
extern void power7_idle_type(unsigned long type);
-extern void power9_idle_type(unsigned long stop_psscr_val,
+extern void arch300_idle_type(unsigned long stop_psscr_val,
unsigned long stop_psscr_mask);
+void pnv_power9_force_smt4_catch(void);
+void pnv_power9_force_smt4_release(void);
-extern void flush_instruction_cache(void);
-extern void hard_reset_now(void);
-extern void poweroff_now(void);
extern int fix_alignment(struct pt_regs *);
-extern void cvt_fd(float *from, double *to);
-extern void cvt_df(double *from, float *to);
-extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
#ifdef CONFIG_PPC64
/*
@@ -454,6 +436,16 @@ extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val);
#define NET_IP_ALIGN 0
#endif
+int do_mathemu(struct pt_regs *regs);
+int do_spe_mathemu(struct pt_regs *regs);
+int speround_handler(struct pt_regs *regs);
+
+/* VMX copying */
+int enter_vmx_usercopy(void);
+int exit_vmx_usercopy(void);
+int enter_vmx_ops(void);
+void *exit_vmx_ops(void *dest);
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index 94e3fd54f2c8..2e82820fbd64 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -12,15 +12,10 @@
* Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
*/
#include <linux/types.h>
-#include <asm/irq.h>
-#include <linux/atomic.h>
+#include <asm/firmware.h>
-/* These includes should be removed once implicit includes are cleaned up. */
-#include <linux/of.h>
-#include <linux/of_fdt.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
+struct device_node;
+struct property;
#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
#define OF_DT_END_NODE 0x2 /* End node */
@@ -117,6 +112,7 @@ extern int of_read_drc_info_cell(struct property **prop,
#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
#define OV1_PPC_3_00 0x80 /* set if we support PowerPC 3.00 */
+#define OV1_PPC_3_1 0x40 /* set if we support PowerPC 3.1 */
/* Option vector 2: Open Firmware options supported */
#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
@@ -146,8 +142,9 @@ extern int of_read_drc_info_cell(struct property **prop,
#define OV5_MSI 0x0201 /* PCIe/MSI support */
#define OV5_CMO 0x0480 /* Cooperative Memory Overcommitment */
#define OV5_XCMO 0x0440 /* Page Coalescing */
-#define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */
+#define OV5_FORM1_AFFINITY 0x0580 /* FORM1 NUMA affinity */
#define OV5_PRRN 0x0540 /* Platform Resource Reassignment */
+#define OV5_FORM2_AFFINITY 0x0520 /* Form2 NUMA affinity */
#define OV5_HP_EVT 0x0604 /* Hot Plug Event support */
#define OV5_RESIZE_HPT 0x0601 /* Hash Page Table resizing */
#define OV5_PFO_HW_RNG 0x1180 /* PFO Random Number Generator */
diff --git a/arch/powerpc/include/asm/ps3.h b/arch/powerpc/include/asm/ps3.h
index cb89e4bf55ce..8a0d8fb35328 100644
--- a/arch/powerpc/include/asm/ps3.h
+++ b/arch/powerpc/include/asm/ps3.h
@@ -71,6 +71,7 @@ struct ps3_dma_region_ops;
* @bus_addr: The 'translated' bus address of the region.
* @len: The length in bytes of the region.
* @offset: The offset from the start of memory of the region.
+ * @dma_mask: Device dma_mask.
* @ioid: The IOID of the device who owns this region
* @chunk_list: Opaque variable used by the ioc page manager.
* @region_ops: struct ps3_dma_region_ops - dma region operations
@@ -85,6 +86,7 @@ struct ps3_dma_region {
enum ps3_dma_region_type region_type;
unsigned long len;
unsigned long offset;
+ u64 dma_mask;
/* driver variables (set by ps3_dma_region_create) */
unsigned long bus_addr;
@@ -232,7 +234,7 @@ enum lv1_result {
static inline const char* ps3_result(int result)
{
-#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT)
+#if defined(DEBUG) || defined(PS3_VERBOSE_RESULT) || defined(CONFIG_PS3_VERBOSE_RESULT)
switch (result) {
case LV1_SUCCESS:
return "LV1_SUCCESS (0)";
@@ -378,8 +380,8 @@ struct ps3_system_bus_driver {
enum ps3_match_sub_id match_sub_id;
struct device_driver core;
int (*probe)(struct ps3_system_bus_device *);
- int (*remove)(struct ps3_system_bus_device *);
- int (*shutdown)(struct ps3_system_bus_device *);
+ void (*remove)(struct ps3_system_bus_device *);
+ void (*shutdown)(struct ps3_system_bus_device *);
/* int (*suspend)(struct ps3_system_bus_device *, pm_message_t); */
/* int (*resume)(struct ps3_system_bus_device *); */
};
diff --git a/arch/powerpc/include/asm/ps3av.h b/arch/powerpc/include/asm/ps3av.h
index 82db78fc169d..c8b0f2ffcd35 100644
--- a/arch/powerpc/include/asm/ps3av.h
+++ b/arch/powerpc/include/asm/ps3av.h
@@ -726,6 +726,4 @@ extern int ps3av_video_mode2res(u32, u32 *, u32 *);
extern int ps3av_video_mute(int);
extern int ps3av_audio_mute(int);
extern int ps3av_audio_mute_analog(int);
-extern int ps3av_dev_open(void);
-extern int ps3av_dev_close(void);
#endif /* _ASM_POWERPC_PS3AV_H_ */
diff --git a/arch/powerpc/include/asm/pte-walk.h b/arch/powerpc/include/asm/pte-walk.h
index 33fa5dd8ee6a..714a35f0d425 100644
--- a/arch/powerpc/include/asm/pte-walk.h
+++ b/arch/powerpc/include/asm/pte-walk.h
@@ -31,6 +31,35 @@ static inline pte_t *find_init_mm_pte(unsigned long ea, unsigned *hshift)
pgd_t *pgdir = init_mm.pgd;
return __find_linux_pte(pgdir, ea, NULL, hshift);
}
+
+/*
+ * Convert a kernel vmap virtual address (vmalloc or ioremap space) to a
+ * physical address, without taking locks. This can be used in real-mode.
+ */
+static inline phys_addr_t ppc_find_vmap_phys(unsigned long addr)
+{
+ pte_t *ptep;
+ phys_addr_t pa;
+ int hugepage_shift;
+
+ /*
+ * init_mm does not free page tables, and does not do THP. It may
+ * have huge pages from huge vmalloc / ioremap etc.
+ */
+ ptep = find_init_mm_pte(addr, &hugepage_shift);
+ if (WARN_ON(!ptep))
+ return 0;
+
+ pa = PFN_PHYS(pte_pfn(*ptep));
+
+ if (!hugepage_shift)
+ hugepage_shift = PAGE_SHIFT;
+
+ pa |= addr & ((1ul << hugepage_shift) - 1);
+
+ return pa;
+}
+
/*
* This is what we should always use. Any other lockless page table lookup needs
* careful audit against THP split.
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index ee3ada66deb5..2efec6d87049 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -19,8 +19,10 @@
#ifndef _ASM_POWERPC_PTRACE_H
#define _ASM_POWERPC_PTRACE_H
+#include <linux/err.h>
#include <uapi/asm/ptrace.h>
#include <asm/asm-const.h>
+#include <asm/reg.h>
#ifndef __ASSEMBLY__
struct pt_regs
@@ -42,26 +44,68 @@ struct pt_regs
unsigned long mq;
#endif
unsigned long trap;
- unsigned long dar;
- unsigned long dsisr;
+ union {
+ unsigned long dar;
+ unsigned long dear;
+ };
+ union {
+ unsigned long dsisr;
+ unsigned long esr;
+ };
unsigned long result;
};
};
-
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_KUAP)
union {
struct {
#ifdef CONFIG_PPC64
unsigned long ppr;
+ unsigned long exit_result;
#endif
+ union {
#ifdef CONFIG_PPC_KUAP
- unsigned long kuap;
+ unsigned long kuap;
+#endif
+#ifdef CONFIG_PPC_PKEY
+ unsigned long amr;
+#endif
+ };
+#ifdef CONFIG_PPC_PKEY
+ unsigned long iamr;
#endif
};
- unsigned long __pad[2]; /* Maintain 16 byte interrupt stack alignment */
+ unsigned long __pad[4]; /* Maintain 16 byte interrupt stack alignment */
};
+#endif
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+ struct { /* Must be a multiple of 16 bytes */
+ unsigned long mas0;
+ unsigned long mas1;
+ unsigned long mas2;
+ unsigned long mas3;
+ unsigned long mas6;
+ unsigned long mas7;
+ unsigned long srr0;
+ unsigned long srr1;
+ unsigned long csrr0;
+ unsigned long csrr1;
+ unsigned long dsrr0;
+ unsigned long dsrr1;
+ };
+#endif
};
#endif
+
+#define STACK_FRAME_WITH_PT_REGS (STACK_FRAME_OVERHEAD + sizeof(struct pt_regs))
+
+// Always displays as "REGS" in memory dumps
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define STACK_FRAME_REGS_MARKER ASM_CONST(0x52454753)
+#else
+#define STACK_FRAME_REGS_MARKER ASM_CONST(0x53474552)
+#endif
+
#ifdef __powerpc64__
/*
@@ -78,12 +122,11 @@ struct pt_regs
#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
-#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define STACK_FRAME_MIN_SIZE 32
#else
#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
@@ -99,7 +142,6 @@ struct pt_regs
#define KERNEL_REDZONE_SIZE 0
#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
-#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD)
#define STACK_FRAME_MARKER 2
#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD
@@ -110,6 +152,41 @@ struct pt_regs
#endif /* __powerpc64__ */
#ifndef __ASSEMBLY__
+#include <asm/paca.h>
+
+#ifdef CONFIG_SMP
+extern unsigned long profile_pc(struct pt_regs *regs);
+#else
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+long do_syscall_trace_enter(struct pt_regs *regs);
+void do_syscall_trace_leave(struct pt_regs *regs);
+
+static inline void set_return_regs_changed(void)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ local_paca->hsrr_valid = 0;
+ local_paca->srr_valid = 0;
+#endif
+}
+
+static inline void regs_set_return_ip(struct pt_regs *regs, unsigned long ip)
+{
+ regs->nip = ip;
+ set_return_regs_changed();
+}
+
+static inline void regs_set_return_msr(struct pt_regs *regs, unsigned long msr)
+{
+ regs->msr = msr;
+ set_return_regs_changed();
+}
+
+static inline void regs_add_return_ip(struct pt_regs *regs, long offset)
+{
+ regs_set_return_ip(regs, regs->nip + offset);
+}
static inline unsigned long instruction_pointer(struct pt_regs *regs)
{
@@ -119,7 +196,7 @@ static inline unsigned long instruction_pointer(struct pt_regs *regs)
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
- regs->nip = val;
+ regs_set_return_ip(regs, val);
}
static inline unsigned long user_stack_pointer(struct pt_regs *regs)
@@ -132,20 +209,80 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
return 0;
}
-#ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *regs);
+#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
+
+#define force_successful_syscall_return() \
+ do { \
+ set_thread_flag(TIF_NOERROR); \
+ } while(0)
+
+#define current_pt_regs() \
+ ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
+
+/*
+ * The 4 low bits (0xf) are available as flags to overload the trap word,
+ * because interrupt vectors have minimum alignment of 0x10. TRAP_FLAGS_MASK
+ * must cover the bits used as flags, including bit 0 which is used as the
+ * "norestart" bit.
+ */
+#ifdef __powerpc64__
+#define TRAP_FLAGS_MASK 0x1
#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
+/*
+ * On 4xx we use bit 1 in the trap word to indicate whether the exception
+ * is a critical exception (1 means it is).
+ */
+#define TRAP_FLAGS_MASK 0xf
+#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
+#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
+#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
+#endif /* __powerpc64__ */
+#define TRAP(regs) ((regs)->trap & ~TRAP_FLAGS_MASK)
+
+static __always_inline void set_trap(struct pt_regs *regs, unsigned long val)
+{
+ regs->trap = (regs->trap & TRAP_FLAGS_MASK) | (val & ~TRAP_FLAGS_MASK);
+}
+
+static inline bool trap_is_scv(struct pt_regs *regs)
+{
+ return (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x3000);
+}
+
+static inline bool trap_is_unsupported_scv(struct pt_regs *regs)
+{
+ return IS_ENABLED(CONFIG_PPC_BOOK3S_64) && TRAP(regs) == 0x7ff0;
+}
+
+static inline bool trap_is_syscall(struct pt_regs *regs)
+{
+ return (trap_is_scv(regs) || TRAP(regs) == 0xc00);
+}
+
+static inline bool trap_norestart(struct pt_regs *regs)
+{
+ return regs->trap & 0x1;
+}
+
+static __always_inline void set_trap_norestart(struct pt_regs *regs)
+{
+ regs->trap |= 0x1;
+}
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
static inline int is_syscall_success(struct pt_regs *regs)
{
- return !(regs->ccr & 0x10000000);
+ if (trap_is_scv(regs))
+ return !IS_ERR_VALUE((unsigned long)regs->gpr[3]);
+ else
+ return !(regs->ccr & 0x10000000);
}
static inline long regs_return_value(struct pt_regs *regs)
{
+ if (trap_is_scv(regs))
+ return regs->gpr[3];
+
if (is_syscall_success(regs))
return regs->gpr[3];
else
@@ -157,57 +294,30 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
regs->gpr[3] = rc;
}
-#ifdef __powerpc64__
-#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
-#else
-#define user_mode(regs) (((regs)->msr & MSR_PR) != 0)
-#endif
+static inline bool cpu_has_msr_ri(void)
+{
+ return !IS_ENABLED(CONFIG_BOOKE_OR_40x);
+}
-#define force_successful_syscall_return() \
- do { \
- set_thread_flag(TIF_NOERROR); \
- } while(0)
+static inline bool regs_is_unrecoverable(struct pt_regs *regs)
+{
+ return unlikely(cpu_has_msr_ri() && !(regs->msr & MSR_RI));
+}
-struct task_struct;
-extern int ptrace_get_reg(struct task_struct *task, int regno,
- unsigned long *data);
-extern int ptrace_put_reg(struct task_struct *task, int regno,
- unsigned long data);
+static inline void regs_set_recoverable(struct pt_regs *regs)
+{
+ if (cpu_has_msr_ri())
+ regs_set_return_msr(regs, regs->msr | MSR_RI);
+}
-#define current_pt_regs() \
- ((struct pt_regs *)((unsigned long)task_stack_page(current) + THREAD_SIZE) - 1)
-/*
- * We use the least-significant bit of the trap field to indicate
- * whether we have saved the full set of registers, or only a
- * partial set. A 1 there means the partial set.
- * On 4xx we use the next bit to indicate whether the exception
- * is a critical exception (1 means it is).
- */
-#define FULL_REGS(regs) (((regs)->trap & 1) == 0)
-#ifndef __powerpc64__
-#define IS_CRITICAL_EXC(regs) (((regs)->trap & 2) != 0)
-#define IS_MCHECK_EXC(regs) (((regs)->trap & 4) != 0)
-#define IS_DEBUG_EXC(regs) (((regs)->trap & 8) != 0)
-#endif /* ! __powerpc64__ */
-#define TRAP(regs) ((regs)->trap & ~0xF)
-#ifdef __powerpc64__
-#define NV_REG_POISON 0xdeadbeefdeadbeefUL
-#define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1)
-#else
-#define NV_REG_POISON 0xdeadbeef
-#define CHECK_FULL_REGS(regs) \
-do { \
- if ((regs)->trap & 1) \
- printk(KERN_CRIT "%s: partial register set\n", __func__); \
-} while (0)
-#endif /* __powerpc64__ */
+static inline void regs_set_unrecoverable(struct pt_regs *regs)
+{
+ if (cpu_has_msr_ri())
+ regs_set_return_msr(regs, regs->msr & ~MSR_RI);
+}
#define arch_has_single_step() (1)
-#ifndef CONFIG_BOOK3S_601
#define arch_has_block_step() (true)
-#else
-#define arch_has_block_step() (false)
-#endif
#define ARCH_HAS_USER_SINGLE_STEP_REPORT
/*
@@ -276,6 +386,8 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
#endif /* __ASSEMBLY__ */
#ifndef __powerpc64__
+/* We need PT_SOFTE defined at all time to avoid #ifdefs */
+#define PT_SOFTE PT_MQ
#else /* __powerpc64__ */
#define PT_FPSCR32 (PT_FPR0 + 2*32 + 1) /* each FP reg occupies 2 32-bit userspace slots */
#define PT_VR0_32 164 /* each Vector reg occupies 4 slots in 32-bit */
diff --git a/arch/powerpc/include/asm/qspinlock.h b/arch/powerpc/include/asm/qspinlock.h
new file mode 100644
index 000000000000..b676c4fb90fd
--- /dev/null
+++ b/arch/powerpc/include/asm/qspinlock.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_QSPINLOCK_H
+#define _ASM_POWERPC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm/paravirt.h>
+
+#define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_queued_spin_unlock(struct qspinlock *lock);
+
+static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ if (!is_shared_processor())
+ native_queued_spin_lock_slowpath(lock, val);
+ else
+ __pv_queued_spin_lock_slowpath(lock, val);
+}
+
+#define queued_spin_unlock queued_spin_unlock
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+ if (!is_shared_processor())
+ smp_store_release(&lock->locked, 0);
+ else
+ __pv_queued_spin_unlock(lock);
+}
+
+#else
+extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#endif
+
+static __always_inline void queued_spin_lock(struct qspinlock *lock)
+{
+ u32 val = 0;
+
+ if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
+ return;
+
+ queued_spin_lock_slowpath(lock, val);
+}
+#define queued_spin_lock queued_spin_lock
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define SPIN_THRESHOLD (1<<15) /* not tuned */
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+ if (*ptr != val)
+ return;
+ yield_to_any();
+ /*
+ * We could pass in a CPU here if waiting in the queue and yield to
+ * the previous CPU in the queue.
+ */
+}
+
+static __always_inline void pv_kick(int cpu)
+{
+ prod_cpu(cpu);
+}
+
+extern void __pv_init_lock_hash(void);
+
+static inline void pv_spinlocks_init(void)
+{
+ __pv_init_lock_hash();
+}
+
+#endif
+
+/*
+ * Queued spinlocks rely heavily on smp_cond_load_relaxed() to busy-wait,
+ * which was found to have performance problems if implemented with
+ * the preferred spin_begin()/spin_end() SMT priority pattern. Use the
+ * generic version instead.
+ */
+
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_POWERPC_QSPINLOCK_H */
diff --git a/arch/powerpc/include/asm/qspinlock_paravirt.h b/arch/powerpc/include/asm/qspinlock_paravirt.h
new file mode 100644
index 000000000000..6b60e7736a47
--- /dev/null
+++ b/arch/powerpc/include/asm/qspinlock_paravirt.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_QSPINLOCK_PARAVIRT_H
+#define _ASM_POWERPC_QSPINLOCK_PARAVIRT_H
+
+EXPORT_SYMBOL(__pv_queued_spin_unlock);
+
+#endif /* _ASM_POWERPC_QSPINLOCK_PARAVIRT_H */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 1aa46dff0957..1e8b2e04e626 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -12,14 +12,15 @@
#ifdef __KERNEL__
#include <linux/stringify.h>
+#include <linux/const.h>
#include <asm/cputable.h>
#include <asm/asm-const.h>
#include <asm/feature-fixups.h>
/* Pickup Book E specific registers. */
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
#include <asm/reg_booke.h>
-#endif /* CONFIG_BOOKE || CONFIG_40x */
+#endif
#ifdef CONFIG_FSL_EMB_PERFMON
#include <asm/reg_fsl_emb.h>
@@ -28,7 +29,6 @@
#include <asm/reg_8xx.h>
#define MSR_SF_LG 63 /* Enable 64 bit mode */
-#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
#define MSR_HV_LG 60 /* Hypervisor state */
#define MSR_TS_T_LG 34 /* Trans Mem state: Transactional */
#define MSR_TS_S_LG 33 /* Trans Mem state: Suspended */
@@ -68,13 +68,11 @@
#ifdef CONFIG_PPC64
#define MSR_SF __MASK(MSR_SF_LG) /* Enable 64 bit mode */
-#define MSR_ISF __MASK(MSR_ISF_LG) /* Interrupt 64b mode valid on 630 */
#define MSR_HV __MASK(MSR_HV_LG) /* Hypervisor state */
#define MSR_S __MASK(MSR_S_LG) /* Secure state */
#else
/* so tests for these bits fail on 32-bit */
#define MSR_SF 0
-#define MSR_ISF 0
#define MSR_HV 0
#define MSR_S 0
#endif
@@ -126,14 +124,14 @@
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
#else
-#define MSR_TM_ACTIVE(x) 0
+#define MSR_TM_ACTIVE(x) ((void)(x), 0)
#endif
#if defined(CONFIG_PPC_BOOK3S_64)
#define MSR_64BIT MSR_SF
/* Server variant */
-#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
+#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_HV)
#ifdef __BIG_ENDIAN__
#define MSR_ __MSR
#define MSR_IDLE (MSR_ME | MSR_SF | MSR_HV)
@@ -283,14 +281,16 @@
#define CTRL_CT1 0x40000000 /* thread 1 */
#define CTRL_TE 0x00c00000 /* thread enable */
#define CTRL_RUNLATCH 0x1
-#define SPRN_DAWR 0xB4
+#define SPRN_DAWR0 0xB4
+#define SPRN_DAWR1 0xB5
#define SPRN_RPR 0xBA /* Relative Priority Register */
#define SPRN_CIABR 0xBB
#define CIABR_PRIV 0x3
#define CIABR_PRIV_USER 1
#define CIABR_PRIV_SUPER 2
#define CIABR_PRIV_HYPER 3
-#define SPRN_DAWRX 0xBC
+#define SPRN_DAWRX0 0xBC
+#define SPRN_DAWRX1 0xBD
#define DAWRX_USER __MASK(0)
#define DAWRX_KERNEL __MASK(1)
#define DAWRX_HYP __MASK(2)
@@ -393,10 +393,12 @@
#define SPRN_PMMAR 0x356 /* Power Management Memory Activity Register */
#define SPRN_PSSCR 0x357 /* Processor Stop Status and Control Register (ISA 3.0) */
#define SPRN_PSSCR_PR 0x337 /* PSSCR ISA 3.0, privileged mode access */
+#define SPRN_TRIG2 0x372
#define SPRN_PMCR 0x374 /* Power Management Control Register */
#define SPRN_RWMR 0x375 /* Region-Weighting Mode Register */
/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_PREFIX_LG 13 /* Enable Prefix Instructions */
#define FSCR_SCV_LG 12 /* Enable System Call Vectored */
#define FSCR_MSGP_LG 10 /* Enable MSGP */
#define FSCR_TAR_LG 8 /* Enable Target Address Register */
@@ -408,10 +410,12 @@
#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
+#define FSCR_PREFIX __MASK(FSCR_PREFIX_LG)
#define FSCR_SCV __MASK(FSCR_SCV_LG)
#define FSCR_TAR __MASK(FSCR_TAR_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
+#define FSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
@@ -422,7 +426,7 @@
#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
#define HFSCR_FP __MASK(FSCR_FP_LG)
-#define HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
+#define HFSCR_INTR_CAUSE FSCR_INTR_CAUSE
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 ASM_CONST(0x8000000000000000)
@@ -438,6 +442,7 @@
#define LPCR_VRMA_LP1 ASM_CONST(0x0000800000000000)
#define LPCR_RMLS 0x1C000000 /* Implementation dependent RMO limit sel */
#define LPCR_RMLS_SH 26
+#define LPCR_HAIL ASM_CONST(0x0000000004000000) /* HV AIL (ISAv3.1) */
#define LPCR_ILE ASM_CONST(0x0000000002000000) /* !HV irqs set MSR:LE */
#define LPCR_AIL ASM_CONST(0x0000000001800000) /* Alternate interrupt location */
#define LPCR_AIL_0 ASM_CONST(0x0000000000000000) /* MMU off exception offset 0x0 */
@@ -468,7 +473,6 @@
#ifndef SPRN_LPID
#define SPRN_LPID 0x13F /* Logical Partition Identifier */
#endif
-#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
#define SPRN_HMER 0x150 /* Hypervisor maintenance exception reg */
#define HMER_DEBUG_TRIG (1ul << (63 - 17)) /* Debug trigger */
#define SPRN_HMEER 0x151 /* Hyp maintenance exception enable reg */
@@ -476,16 +480,18 @@
#define PCR_VEC_DIS (__MASK(63-0)) /* Vec. disable (bit NA since POWER8) */
#define PCR_VSX_DIS (__MASK(63-1)) /* VSX disable (bit NA since POWER8) */
#define PCR_TM_DIS (__MASK(63-2)) /* Trans. memory disable (POWER8) */
-#define PCR_HIGH_BITS (PCR_VEC_DIS | PCR_VSX_DIS | PCR_TM_DIS)
+#define PCR_MMA_DIS (__MASK(63-3)) /* Matrix-Multiply Accelerator */
+#define PCR_HIGH_BITS (PCR_MMA_DIS | PCR_VEC_DIS | PCR_VSX_DIS | PCR_TM_DIS)
/*
* These bits are used in the function kvmppc_set_arch_compat() to specify and
* determine both the compatibility level which we want to emulate and the
* compatibility level which the host is capable of emulating.
*/
+#define PCR_ARCH_300 0x10 /* Architecture 3.00 */
#define PCR_ARCH_207 0x8 /* Architecture 2.07 */
#define PCR_ARCH_206 0x4 /* Architecture 2.06 */
#define PCR_ARCH_205 0x2 /* Architecture 2.05 */
-#define PCR_LOW_BITS (PCR_ARCH_207 | PCR_ARCH_206 | PCR_ARCH_205)
+#define PCR_LOW_BITS (PCR_ARCH_207 | PCR_ARCH_206 | PCR_ARCH_205 | PCR_ARCH_300)
#define PCR_MASK ~(PCR_HIGH_BITS | PCR_LOW_BITS) /* PCR Reserved Bits */
#define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */
#define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */
@@ -512,6 +518,8 @@
#define SPRN_TSCR 0x399 /* Thread Switch Control Register */
#define SPRN_DEC 0x016 /* Decrement Register */
+#define SPRN_PIT 0x3DB /* Programmable Interval Timer (40x/BOOKE) */
+
#define SPRN_DER 0x095 /* Debug Enable Register */
#define DER_RSTE 0x40000000 /* Reset Interrupt */
#define DER_CHSTPE 0x20000000 /* Check Stop */
@@ -759,7 +767,7 @@
#endif
#define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */
-#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
+#define SRR1_ISI_N_G_OR_CIP 0x10000000 /* ISI: Access is no-exec or G or CI for a prefixed instruction */
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
@@ -786,6 +794,8 @@
#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
#define SRR1_MCE_MCP 0x00080000 /* Machine check signal caused interrupt */
+#define SRR1_BOUNDARY 0x10000000 /* Prefixed instruction crosses 64-byte boundary */
+#define SRR1_PREFIXED 0x20000000 /* Exception caused by prefixed instruction */
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
@@ -806,7 +816,7 @@
#define THRM1_TIN (1 << 31)
#define THRM1_TIV (1 << 30)
#define THRM1_THRES(x) ((x&0x7f)<<23)
-#define THRM3_SITV(x) ((x&0x3fff)<<1)
+#define THRM3_SITV(x) ((x & 0x1fff) << 1)
#define THRM1_TID (1<<2)
#define THRM1_TIE (1<<1)
#define THRM1_V (1<<0)
@@ -851,6 +861,7 @@
#define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */
#define MMCR0_EBE 0x00100000UL /* Event based branch enable */
#define MMCR0_PMCC 0x000c0000UL /* PMC control */
+#define MMCR0_PMCCEXT ASM_CONST(0x00000200) /* PMCCEXT control */
#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */
#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
#define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/
@@ -867,7 +878,9 @@
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
#define SPRN_MMCR2 785
+#define SPRN_MMCR3 754
#define SPRN_UMMCR2 769
+#define SPRN_UMMCR3 738
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -877,6 +890,7 @@
#define MMCRA_SLOT 0x07000000UL /* SLOT bits (37-39) */
#define MMCRA_SLOT_SHIFT 24
#define MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
+#define MMCRA_BHRB_DISABLE _UL(0x2000000000) // BHRB disable bit for ISA v3.1
#define POWER6_MMCRA_SDSYNC 0x0000080000000000ULL /* SDAR/SIAR synced */
#define POWER6_MMCRA_SIHV 0x0000040000000000ULL
#define POWER6_MMCRA_SIPR 0x0000020000000000ULL
@@ -909,6 +923,10 @@
#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
+#define SPRN_SIER2 752
+#define SPRN_SIER3 753
+#define SPRN_USIER2 736
+#define SPRN_USIER3 737
#define SPRN_SIAR 796
#define SPRN_SDAR 797
#define SPRN_TACR 888
@@ -1183,7 +1201,7 @@
#ifdef CONFIG_PPC_BOOK3S_32
#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
#define SPRN_SPRG_SCRATCH1 SPRN_SPRG1
-#define SPRN_SPRG_PGDIR SPRN_SPRG2
+#define SPRN_SPRG_SCRATCH2 SPRN_SPRG2
#define SPRN_SPRG_603_LRU SPRN_SPRG4
#endif
@@ -1212,14 +1230,9 @@
#define SPRN_SPRG_WSCRATCH_MC SPRN_SPRG1
#define SPRN_SPRG_RSCRATCH4 SPRN_SPRG7R
#define SPRN_SPRG_WSCRATCH4 SPRN_SPRG7W
-#ifdef CONFIG_E200
-#define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG6R
-#define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG6W
-#else
#define SPRN_SPRG_RSCRATCH_DBG SPRN_SPRG9
#define SPRN_SPRG_WSCRATCH_DBG SPRN_SPRG9
#endif
-#endif
#ifdef CONFIG_PPC_8xx
#define SPRN_SPRG_SCRATCH0 SPRN_SPRG0
@@ -1335,6 +1348,7 @@
#define PVR_POWER8NVL 0x004C
#define PVR_POWER8 0x004D
#define PVR_POWER9 0x004E
+#define PVR_POWER10 0x0080
#define PVR_BE 0x0070
#define PVR_PA6T 0x0090
@@ -1345,9 +1359,22 @@
#define PVR_ARCH_206p 0x0f100003
#define PVR_ARCH_207 0x0f000004
#define PVR_ARCH_300 0x0f000005
+#define PVR_ARCH_31 0x0f000006
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_PPC64) || defined(__CHECKER__)
+typedef struct {
+ u32 val;
+#ifdef CONFIG_PPC64
+ u32 suffix;
+#endif
+} __packed ppc_inst_t;
+#else
+typedef u32 ppc_inst_t;
+#endif
+
#define mfmsr() ({unsigned long rval; \
asm volatile("mfmsr %0" : "=r" (rval) : \
: "memory"); rval;})
@@ -1360,6 +1387,7 @@
#define mtmsr(v) asm volatile("mtmsr %0" : \
: "r" ((unsigned long)(v)) \
: "memory")
+#define __mtmsrd(v, l) BUILD_BUG()
#define __MTMSR "mtmsr"
#endif
@@ -1377,8 +1405,7 @@ static inline void mtmsr_isync(unsigned long val)
: "r" ((unsigned long)(v)) \
: "memory")
#endif
-#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",0" : \
- : : "memory")
+#define wrtspr(rn) asm volatile("mtspr " __stringify(rn) ",2" : : : "memory")
static inline void wrtee(unsigned long val)
{
@@ -1397,58 +1424,31 @@ static inline void msr_check_and_clear(unsigned long bits)
__msr_check_and_clear(bits);
}
-#ifdef __powerpc64__
-#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
-#define mftb() ({unsigned long rval; \
- asm volatile( \
- "90: mfspr %0, %2;\n" \
- ASM_FTR_IFSET( \
- "97: cmpwi %0,0;\n" \
- " beq- 90b;\n", "", %1) \
- : "=r" (rval) \
- : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
- rval;})
-#else
-#define mftb() ({unsigned long rval; \
- asm volatile("mfspr %0, %1" : \
- "=r" (rval) : "i" (SPRN_TBRL)); rval;})
-#endif /* !CONFIG_PPC_CELL */
-
-#else /* __powerpc64__ */
-
-#if defined(CONFIG_PPC_8xx)
-#define mftbl() ({unsigned long rval; \
- asm volatile("mftbl %0" : "=r" (rval)); rval;})
-#define mftbu() ({unsigned long rval; \
- asm volatile("mftbu %0" : "=r" (rval)); rval;})
-#else
-#define mftbl() ({unsigned long rval; \
- asm volatile("mfspr %0, %1" : "=r" (rval) : \
- "i" (SPRN_TBRL)); rval;})
-#define mftbu() ({unsigned long rval; \
- asm volatile("mfspr %0, %1" : "=r" (rval) : \
- "i" (SPRN_TBRU)); rval;})
-#endif
-#define mftb() mftbl()
-#endif /* !__powerpc64__ */
+#ifdef CONFIG_PPC32
+static inline u32 mfsr(u32 idx)
+{
+ u32 val;
-#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
-#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
+ if (__builtin_constant_p(idx))
+ asm volatile("mfsr %0, %1" : "=r" (val): "i" (idx >> 28));
+ else
+ asm volatile("mfsrin %0, %1" : "=r" (val): "r" (idx));
-#ifdef CONFIG_PPC32
-#define mfsrin(v) ({unsigned int rval; \
- asm volatile("mfsrin %0,%1" : "=r" (rval) : "r" (v)); \
- rval;})
+ return val;
+}
-static inline void mtsrin(u32 val, u32 idx)
+static inline void mtsr(u32 val, u32 idx)
{
- asm volatile("mtsrin %0, %1" : : "r" (val), "r" (idx));
+ if (__builtin_constant_p(idx))
+ asm volatile("mtsr %1, %0" : : "r" (val), "i" (idx >> 28));
+ else
+ asm volatile("mtsrin %0, %1" : : "r" (val), "r" (idx));
}
#endif
-#define proc_trap() asm volatile("trap")
+extern unsigned long current_stack_frame(void);
-extern unsigned long current_stack_pointer(void);
+register unsigned long current_stack_pointer asm("r1");
extern unsigned long scom970_read(unsigned int address);
extern void scom970_write(unsigned int address, unsigned long value);
@@ -1456,16 +1456,6 @@ extern void scom970_write(unsigned int address, unsigned long value);
struct pt_regs;
extern void ppc_save_regs(struct pt_regs *regs);
-
-static inline void update_power8_hid0(unsigned long hid0)
-{
- /*
- * The HID0 update on Power8 should at the very least be
- * preceded by a a SYNC instruction followed by an ISYNC
- * instruction
- */
- asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
-}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_REG_H */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index f26fe482fbca..af56980b6cdb 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -174,7 +174,6 @@
#define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */
#define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */
#define SPRN_MMUCFG 0x3F7 /* MMU Configuration Register */
-#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
#define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */
#define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */
#define SPRN_L2CSR1 0x3FA /* L2 Data Cache Control and Status Register 1 */
@@ -247,7 +246,7 @@
#define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */
#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
/* All e500 */
#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
@@ -282,20 +281,8 @@
#define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */
#endif
-#ifdef CONFIG_E200
-#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
-#define MCSR_CP_PERR 0x20000000UL /* Cache Push Parity Error */
-#define MCSR_CPERR 0x10000000UL /* Cache Parity Error */
-#define MCSR_EXCP_ERR 0x08000000UL /* ISI, ITLB, or Bus Error on 1st insn
- fetch for an exception handler */
-#define MCSR_BUS_IRERR 0x00000010UL /* Read Bus Error on instruction fetch*/
-#define MCSR_BUS_DRERR 0x00000008UL /* Read Bus Error on data load */
-#define MCSR_BUS_WRERR 0x00000004UL /* Write Bus Error on buffered
- store or cache line push */
-#endif
-
/* Bit definitions for the HID1 */
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
/* e500v1/v2 */
#define HID1_PLL_CFG_MASK 0xfc000000 /* PLL_CFG input pins */
#define HID1_RFXE 0x00020000 /* Read fault exception enable */
@@ -558,7 +545,7 @@
#define TCR_FIE 0x00800000 /* FIT Interrupt Enable */
#define TCR_ARE 0x00400000 /* Auto Reload Enable */
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
#define TCR_GET_WP(tcr) ((((tcr) & 0xC0000000) >> 30) | \
(((tcr) & 0x1E0000) >> 15))
#else
@@ -663,60 +650,6 @@
#define EPC_EPID 0x00003fff
#define EPC_EPID_SHIFT 0
-/*
- * The IBM-403 is an even more odd special case, as it is much
- * older than the IBM-405 series. We put these down here incase someone
- * wishes to support these machines again.
- */
-#ifdef CONFIG_403GCX
-/* Special Purpose Registers (SPRNs)*/
-#define SPRN_TBHU 0x3CC /* Time Base High User-mode */
-#define SPRN_TBLU 0x3CD /* Time Base Low User-mode */
-#define SPRN_CDBCR 0x3D7 /* Cache Debug Control Register */
-#define SPRN_TBHI 0x3DC /* Time Base High */
-#define SPRN_TBLO 0x3DD /* Time Base Low */
-#define SPRN_DBCR 0x3F2 /* Debug Control Register */
-#define SPRN_PBL1 0x3FC /* Protection Bound Lower 1 */
-#define SPRN_PBL2 0x3FE /* Protection Bound Lower 2 */
-#define SPRN_PBU1 0x3FD /* Protection Bound Upper 1 */
-#define SPRN_PBU2 0x3FF /* Protection Bound Upper 2 */
-
-
-/* Bit definitions for the DBCR. */
-#define DBCR_EDM DBCR0_EDM
-#define DBCR_IDM DBCR0_IDM
-#define DBCR_RST(x) (((x) & 0x3) << 28)
-#define DBCR_RST_NONE 0
-#define DBCR_RST_CORE 1
-#define DBCR_RST_CHIP 2
-#define DBCR_RST_SYSTEM 3
-#define DBCR_IC DBCR0_IC /* Instruction Completion Debug Evnt */
-#define DBCR_BT DBCR0_BT /* Branch Taken Debug Event */
-#define DBCR_EDE DBCR0_EDE /* Exception Debug Event */
-#define DBCR_TDE DBCR0_TDE /* TRAP Debug Event */
-#define DBCR_FER 0x00F80000 /* First Events Remaining Mask */
-#define DBCR_FT 0x00040000 /* Freeze Timers on Debug Event */
-#define DBCR_IA1 0x00020000 /* Instr. Addr. Compare 1 Enable */
-#define DBCR_IA2 0x00010000 /* Instr. Addr. Compare 2 Enable */
-#define DBCR_D1R 0x00008000 /* Data Addr. Compare 1 Read Enable */
-#define DBCR_D1W 0x00004000 /* Data Addr. Compare 1 Write Enable */
-#define DBCR_D1S(x) (((x) & 0x3) << 12) /* Data Adrr. Compare 1 Size */
-#define DAC_BYTE 0
-#define DAC_HALF 1
-#define DAC_WORD 2
-#define DAC_QUAD 3
-#define DBCR_D2R 0x00000800 /* Data Addr. Compare 2 Read Enable */
-#define DBCR_D2W 0x00000400 /* Data Addr. Compare 2 Write Enable */
-#define DBCR_D2S(x) (((x) & 0x3) << 8) /* Data Addr. Compare 2 Size */
-#define DBCR_SBT 0x00000040 /* Second Branch Taken Debug Event */
-#define DBCR_SED 0x00000020 /* Second Exception Debug Event */
-#define DBCR_STD 0x00000010 /* Second Trap Debug Event */
-#define DBCR_SIA 0x00000008 /* Second IAC Enable */
-#define DBCR_SDA 0x00000004 /* Second DAC Enable */
-#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */
-#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */
-#endif /* 403GCX */
-
/* Some 476 specific registers */
#define SPRN_SSPCR 830
#define SPRN_USPCR 831
@@ -758,6 +691,9 @@
#define mttmr(rn, v) asm volatile(MTTMR(rn, %0) : \
: "r" ((unsigned long)(v)) \
: "memory")
+
+extern unsigned long global_dbcr0[];
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_POWERPC_REG_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/rtas-types.h b/arch/powerpc/include/asm/rtas-types.h
new file mode 100644
index 000000000000..8df6235d64d1
--- /dev/null
+++ b/arch/powerpc/include/asm/rtas-types.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_RTAS_TYPES_H
+#define _ASM_POWERPC_RTAS_TYPES_H
+
+#include <linux/spinlock_types.h>
+
+typedef __be32 rtas_arg_t;
+
+struct rtas_args {
+ __be32 token;
+ __be32 nargs;
+ __be32 nret;
+ rtas_arg_t args[16];
+ rtas_arg_t *rets; /* Pointer to return values in args[]. */
+};
+
+struct rtas_t {
+ unsigned long entry; /* physical address pointer */
+ unsigned long base; /* physical address pointer */
+ unsigned long size;
+ arch_spinlock_t lock;
+ struct rtas_args args;
+ struct device_node *dev; /* virtual address pointer */
+};
+
+struct rtas_error_log {
+ /* Byte 0 */
+ u8 byte0; /* Architectural version */
+
+ /* Byte 1 */
+ u8 byte1;
+ /* XXXXXXXX
+ * XXX 3: Severity level of error
+ * XX 2: Degree of recovery
+ * X 1: Extended log present?
+ * XX 2: Reserved
+ */
+
+ /* Byte 2 */
+ u8 byte2;
+ /* XXXXXXXX
+ * XXXX 4: Initiator of event
+ * XXXX 4: Target of failed operation
+ */
+ u8 byte3; /* General event or error*/
+ __be32 extended_log_length; /* length in bytes */
+ unsigned char buffer[1]; /* Start of extended log */
+ /* Variable length. */
+};
+
+/* RTAS general extended event log, Version 6. The extended log starts
+ * from "buffer" field of struct rtas_error_log defined above.
+ */
+struct rtas_ext_event_log_v6 {
+ /* Byte 0 */
+ u8 byte0;
+ /* XXXXXXXX
+ * X 1: Log valid
+ * X 1: Unrecoverable error
+ * X 1: Recoverable (correctable or successfully retried)
+ * X 1: Bypassed unrecoverable error (degraded operation)
+ * X 1: Predictive error
+ * X 1: "New" log (always 1 for data returned from RTAS)
+ * X 1: Big Endian
+ * X 1: Reserved
+ */
+
+ /* Byte 1 */
+ u8 byte1; /* reserved */
+
+ /* Byte 2 */
+ u8 byte2;
+ /* XXXXXXXX
+ * X 1: Set to 1 (indicating log is in PowerPC format)
+ * XXX 3: Reserved
+ * XXXX 4: Log format used for bytes 12-2047
+ */
+
+ /* Byte 3 */
+ u8 byte3; /* reserved */
+ /* Byte 4-11 */
+ u8 reserved[8]; /* reserved */
+ /* Byte 12-15 */
+ __be32 company_id; /* Company ID of the company */
+ /* that defines the format for */
+ /* the vendor specific log type */
+ /* Byte 16-end of log */
+ u8 vendor_log[1]; /* Start of vendor specific log */
+ /* Variable length. */
+};
+
+/* Vendor specific Platform Event Log Format, Version 6, section header */
+struct pseries_errorlog {
+ __be16 id; /* 0x00 2-byte ASCII section ID */
+ __be16 length; /* 0x02 Section length in bytes */
+ u8 version; /* 0x04 Section version */
+ u8 subtype; /* 0x05 Section subtype */
+ __be16 creator_component; /* 0x06 Creator component ID */
+ u8 data[]; /* 0x08 Start of section data */
+};
+
+/* RTAS pseries hotplug errorlog section */
+struct pseries_hp_errorlog {
+ u8 resource;
+ u8 action;
+ u8 id_type;
+ u8 reserved;
+ union {
+ __be32 drc_index;
+ __be32 drc_count;
+ struct { __be32 count, index; } ic;
+ char drc_name[1];
+ } _drc_u;
+};
+
+#endif /* _ASM_POWERPC_RTAS_TYPES_H */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 3c1887351c71..56319aea646e 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <asm/page.h>
+#include <asm/rtas-types.h>
#include <linux/time.h>
#include <linux/cpumask.h>
@@ -18,15 +19,20 @@
#define RTAS_UNKNOWN_SERVICE (-1)
#define RTAS_INSTANTIATE_MAX (1ULL<<30) /* Don't instantiate rtas at/above this value */
-/* Buffer size for ppc_rtas system call. */
-#define RTAS_RMOBUF_MAX (64 * 1024)
+/* Memory set aside for sys_rtas to use with calls that need a work area. */
+#define RTAS_USER_REGION_SIZE (64 * 1024)
/* RTAS return status codes */
-#define RTAS_NOT_SUSPENDABLE -9004
#define RTAS_BUSY -2 /* RTAS Busy */
#define RTAS_EXTENDED_DELAY_MIN 9900
#define RTAS_EXTENDED_DELAY_MAX 9905
+/* statuses specific to ibm,suspend-me */
+#define RTAS_SUSPEND_ABORTED 9000 /* Suspension aborted */
+#define RTAS_NOT_SUSPENDABLE -9004 /* Partition not suspendable */
+#define RTAS_THREADS_ACTIVE -9005 /* Multiple processor threads active */
+#define RTAS_OUTSTANDING_COPROC -9006 /* Outstanding coprocessor operations */
+
/*
* In general to call RTAS use rtas_token("string") to lookup
* an RTAS token for the given string (e.g. "event-scan").
@@ -42,33 +48,6 @@
*
*/
-typedef __be32 rtas_arg_t;
-
-struct rtas_args {
- __be32 token;
- __be32 nargs;
- __be32 nret;
- rtas_arg_t args[16];
- rtas_arg_t *rets; /* Pointer to return values in args[]. */
-};
-
-struct rtas_t {
- unsigned long entry; /* physical address pointer */
- unsigned long base; /* physical address pointer */
- unsigned long size;
- arch_spinlock_t lock;
- struct rtas_args args;
- struct device_node *dev; /* virtual address pointer */
-};
-
-struct rtas_suspend_me_data {
- atomic_t working; /* number of cpus accessing this struct */
- atomic_t done;
- int token; /* ibm,suspend-me */
- atomic_t error;
- struct completion *complete; /* wait on this until working == 0 */
-};
-
/* RTAS event classes */
#define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */
#define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */
@@ -148,31 +127,6 @@ struct rtas_suspend_me_data {
/* RTAS check-exception vector offset */
#define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500
-struct rtas_error_log {
- /* Byte 0 */
- uint8_t byte0; /* Architectural version */
-
- /* Byte 1 */
- uint8_t byte1;
- /* XXXXXXXX
- * XXX 3: Severity level of error
- * XX 2: Degree of recovery
- * X 1: Extended log present?
- * XX 2: Reserved
- */
-
- /* Byte 2 */
- uint8_t byte2;
- /* XXXXXXXX
- * XXXX 4: Initiator of event
- * XXXX 4: Target of failed operation
- */
- uint8_t byte3; /* General event or error*/
- __be32 extended_log_length; /* length in bytes */
- unsigned char buffer[1]; /* Start of extended log */
- /* Variable length. */
-};
-
static inline uint8_t rtas_error_severity(const struct rtas_error_log *elog)
{
return (elog->byte1 & 0xE0) >> 5;
@@ -212,47 +166,6 @@ uint32_t rtas_error_extended_log_length(const struct rtas_error_log *elog)
#define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8))
-/* RTAS general extended event log, Version 6. The extended log starts
- * from "buffer" field of struct rtas_error_log defined above.
- */
-struct rtas_ext_event_log_v6 {
- /* Byte 0 */
- uint8_t byte0;
- /* XXXXXXXX
- * X 1: Log valid
- * X 1: Unrecoverable error
- * X 1: Recoverable (correctable or successfully retried)
- * X 1: Bypassed unrecoverable error (degraded operation)
- * X 1: Predictive error
- * X 1: "New" log (always 1 for data returned from RTAS)
- * X 1: Big Endian
- * X 1: Reserved
- */
-
- /* Byte 1 */
- uint8_t byte1; /* reserved */
-
- /* Byte 2 */
- uint8_t byte2;
- /* XXXXXXXX
- * X 1: Set to 1 (indicating log is in PowerPC format)
- * XXX 3: Reserved
- * XXXX 4: Log format used for bytes 12-2047
- */
-
- /* Byte 3 */
- uint8_t byte3; /* reserved */
- /* Byte 4-11 */
- uint8_t reserved[8]; /* reserved */
- /* Byte 12-15 */
- __be32 company_id; /* Company ID of the company */
- /* that defines the format for */
- /* the vendor specific log type */
- /* Byte 16-end of log */
- uint8_t vendor_log[1]; /* Start of vendor specific log */
- /* Variable length. */
-};
-
static
inline uint8_t rtas_ext_event_log_format(struct rtas_ext_event_log_v6 *ext_log)
{
@@ -287,16 +200,6 @@ inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log)
#define PSERIES_ELOG_SECT_ID_HOTPLUG (('H' << 8) | 'P')
#define PSERIES_ELOG_SECT_ID_MCE (('M' << 8) | 'C')
-/* Vendor specific Platform Event Log Format, Version 6, section header */
-struct pseries_errorlog {
- __be16 id; /* 0x00 2-byte ASCII section ID */
- __be16 length; /* 0x02 Section length in bytes */
- uint8_t version; /* 0x04 Section version */
- uint8_t subtype; /* 0x05 Section subtype */
- __be16 creator_component; /* 0x06 Creator component ID */
- uint8_t data[]; /* 0x08 Start of section data */
-};
-
static
inline uint16_t pseries_errorlog_id(struct pseries_errorlog *sect)
{
@@ -309,20 +212,6 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
return be16_to_cpu(sect->length);
}
-/* RTAS pseries hotplug errorlog section */
-struct pseries_hp_errorlog {
- u8 resource;
- u8 action;
- u8 id_type;
- u8 reserved;
- union {
- __be32 drc_index;
- __be32 drc_count;
- struct { __be32 count, index; } ic;
- char drc_name[1];
- } _drc_u;
-};
-
#define PSERIES_HP_ELOG_RESOURCE_CPU 1
#define PSERIES_HP_ELOG_RESOURCE_MEM 2
#define PSERIES_HP_ELOG_RESOURCE_SLOT 3
@@ -331,7 +220,6 @@ struct pseries_hp_errorlog {
#define PSERIES_HP_ELOG_ACTION_ADD 1
#define PSERIES_HP_ELOG_ACTION_REMOVE 2
-#define PSERIES_HP_ELOG_ACTION_READD 3
#define PSERIES_HP_ELOG_ID_DRC_NAME 1
#define PSERIES_HP_ELOG_ID_DRC_INDEX 2
@@ -358,6 +246,7 @@ extern void __noreturn rtas_restart(char *cmd);
extern void rtas_power_off(void);
extern void __noreturn rtas_halt(void);
extern void rtas_os_term(char *str);
+void rtas_activate_firmware(void);
extern int rtas_get_sensor(int sensor, int index, int *state);
extern int rtas_get_sensor_fast(int sensor, int index, int *state);
extern int rtas_get_power_level(int powerdomain, int *level);
@@ -366,11 +255,7 @@ extern bool rtas_indicator_present(int token, int *maxindex);
extern int rtas_set_indicator(int indicator, int index, int new_value);
extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
extern void rtas_progress(char *s, unsigned short hex);
-extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
-extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
-extern int rtas_online_cpus_mask(cpumask_var_t cpus);
-extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
-extern int rtas_ibm_suspend_me(u64 handle);
+int rtas_ibm_suspend_me(int *fw_status);
struct rtc_time;
extern time64_t rtas_get_boot_time(void);
@@ -378,7 +263,7 @@ extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
extern unsigned int rtas_busy_delay_time(int status);
-extern unsigned int rtas_busy_delay(int status);
+bool rtas_busy_delay(int status);
extern int early_init_dt_scan_rtas(unsigned long node,
const char *uname, int depth, void *data);
@@ -388,10 +273,14 @@ extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
#ifdef CONFIG_PPC_PSERIES
extern time64_t last_rtas_event;
extern int clobbering_unread_rtas_event(void);
-extern int pseries_devicetree_update(s32 scope);
extern void post_mobility_fixup(void);
+int rtas_syscall_dispatch_ibm_suspend_me(u64 handle);
#else
static inline int clobbering_unread_rtas_event(void) { return 0; }
+static inline int rtas_syscall_dispatch_ibm_suspend_me(u64 handle)
+{
+ return -EINVAL;
+}
#endif
#ifdef CONFIG_PPC_RTAS_DAEMON
@@ -466,7 +355,7 @@ extern void rtas_take_timebase(void);
static inline int page_is_rtas_user_buf(unsigned long pfn)
{
unsigned long paddr = (pfn << PAGE_SHIFT);
- if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_RMOBUF_MAX))
+ if (paddr >= rtas_rmo_buf && paddr < (rtas_rmo_buf + RTAS_USER_REGION_SIZE))
return 1;
return 0;
}
@@ -478,10 +367,16 @@ void rtas_initialize(void);
#else
static inline int page_is_rtas_user_buf(unsigned long pfn) { return 0;}
static inline void pSeries_coalesce_init(void) { }
-static inline void rtas_initialize(void) { };
+static inline void rtas_initialize(void) { }
#endif
extern int call_rtas(const char *, int, int, unsigned long *, ...);
+#ifdef CONFIG_HV_PERF_CTRS
+void read_24x7_sys_info(void);
+#else
+static inline void read_24x7_sys_info(void) { }
+#endif
+
#endif /* __KERNEL__ */
#endif /* _POWERPC_RTAS_H */
diff --git a/arch/powerpc/include/asm/runlatch.h b/arch/powerpc/include/asm/runlatch.h
index cfb390edf7d0..ceb66d761fe1 100644
--- a/arch/powerpc/include/asm/runlatch.h
+++ b/arch/powerpc/include/asm/runlatch.h
@@ -19,10 +19,9 @@ extern void __ppc64_runlatch_off(void);
do { \
if (cpu_has_feature(CPU_FTR_CTRL) && \
test_thread_local_flags(_TLF_RUNLATCH)) { \
- unsigned long msr = mfmsr(); \
__hard_irq_disable(); \
__ppc64_runlatch_off(); \
- if (msr & MSR_EE) \
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \
__hard_irq_enable(); \
} \
} while (0)
@@ -31,10 +30,9 @@ extern void __ppc64_runlatch_off(void);
do { \
if (cpu_has_feature(CPU_FTR_CTRL) && \
!test_thread_local_flags(_TLF_RUNLATCH)) { \
- unsigned long msr = mfmsr(); \
__hard_irq_disable(); \
__ppc64_runlatch_on(); \
- if (msr & MSR_EE) \
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) \
__hard_irq_enable(); \
} \
} while (0)
diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h
index 51209f6071c5..ac2033f134f0 100644
--- a/arch/powerpc/include/asm/seccomp.h
+++ b/arch/powerpc/include/asm/seccomp.h
@@ -8,4 +8,27 @@
#include <asm-generic/seccomp.h>
+#ifdef __LITTLE_ENDIAN__
+#define __SECCOMP_ARCH_LE __AUDIT_ARCH_LE
+#define __SECCOMP_ARCH_LE_NAME "le"
+#else
+#define __SECCOMP_ARCH_LE 0
+#define __SECCOMP_ARCH_LE_NAME
+#endif
+
+#ifdef CONFIG_PPC64
+# define SECCOMP_ARCH_NATIVE (AUDIT_ARCH_PPC64 | __SECCOMP_ARCH_LE)
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "ppc64" __SECCOMP_ARCH_LE_NAME
+# ifdef CONFIG_COMPAT
+# define SECCOMP_ARCH_COMPAT (AUDIT_ARCH_PPC | __SECCOMP_ARCH_LE)
+# define SECCOMP_ARCH_COMPAT_NR NR_syscalls
+# define SECCOMP_ARCH_COMPAT_NAME "ppc" __SECCOMP_ARCH_LE_NAME
+# endif
+#else /* !CONFIG_PPC64 */
+# define SECCOMP_ARCH_NATIVE (AUDIT_ARCH_PPC | __SECCOMP_ARCH_LE)
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "ppc" __SECCOMP_ARCH_LE_NAME
+#endif
+
#endif /* _ASM_POWERPC_SECCOMP_H */
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index d19871763ed4..9c00c9c0ca8f 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -6,31 +6,33 @@
#include <linux/elf.h>
#include <linux/uaccess.h>
-#define arch_is_kernel_initmem_freed arch_is_kernel_initmem_freed
+#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
+typedef struct func_desc func_desc_t;
+#endif
#include <asm-generic/sections.h>
-extern bool init_mem_is_free;
-
-static inline int arch_is_kernel_initmem_freed(unsigned long addr)
-{
- if (!init_mem_is_free)
- return 0;
+extern char __head_end[];
+extern char __srwx_boundary[];
- return addr >= (unsigned long)__init_begin &&
- addr < (unsigned long)__init_end;
-}
+/* Patch sites */
+extern s32 patch__call_flush_branch_caches1;
+extern s32 patch__call_flush_branch_caches2;
+extern s32 patch__call_flush_branch_caches3;
+extern s32 patch__flush_count_cache_return;
+extern s32 patch__flush_link_stack_return;
+extern s32 patch__call_kvm_flush_link_stack;
+extern s32 patch__call_kvm_flush_link_stack_p9;
+extern s32 patch__memset_nocache, patch__memcpy_nocache;
-extern char __head_end[];
+extern long flush_branch_caches;
+extern long kvm_flush_link_stack;
#ifdef __powerpc64__
extern char __start_interrupts[];
extern char __end_interrupts[];
-extern char __prom_init_toc_start[];
-extern char __prom_init_toc_end[];
-
#ifdef CONFIG_PPC_POWERNV
extern char start_real_trampolines[];
extern char end_real_trampolines[];
@@ -38,24 +40,16 @@ extern char start_virt_trampolines[];
extern char end_virt_trampolines[];
#endif
-static inline int in_kernel_text(unsigned long addr)
-{
- if (addr >= (unsigned long)_stext && addr < (unsigned long)__init_end)
- return 1;
-
- return 0;
-}
-
+/*
+ * This assumes the kernel is never compiled -mcmodel=small or
+ * the total .toc is always less than 64k.
+ */
static inline unsigned long kernel_toc_addr(void)
{
- /* Defined by the linker, see vmlinux.lds.S */
- extern unsigned long __toc_start;
-
- /*
- * The TOC register (r2) points 32kB into the TOC, so that 64kB of
- * the TOC can be addressed using a single machine instruction.
- */
- return (unsigned long)(&__toc_start) + 0x8000UL;
+ unsigned long toc_ptr;
+
+ asm volatile("mr %0, 2" : "=r" (toc_ptr));
+ return toc_ptr;
}
static inline int overlaps_interrupt_vector_text(unsigned long start,
@@ -75,31 +69,6 @@ static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
(unsigned long)_stext < end;
}
-#ifdef PPC64_ELF_ABI_v1
-
-#define HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR 1
-
-#undef dereference_function_descriptor
-static inline void *dereference_function_descriptor(void *ptr)
-{
- struct ppc64_opd_entry *desc = ptr;
- void *p;
-
- if (!probe_kernel_address(&desc->funcaddr, p))
- ptr = p;
- return ptr;
-}
-
-#undef dereference_kernel_function_descriptor
-static inline void *dereference_kernel_function_descriptor(void *ptr)
-{
- if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd)
- return ptr;
-
- return dereference_function_descriptor(ptr);
-}
-#endif /* PPC64_ELF_ABI_v1 */
-
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
index 7c05e95a5c44..27574f218b37 100644
--- a/arch/powerpc/include/asm/security_features.h
+++ b/arch/powerpc/include/asm/security_features.h
@@ -39,6 +39,11 @@ static inline bool security_ftr_enabled(u64 feature)
return !!(powerpc_security_features & feature);
}
+#ifdef CONFIG_PPC_BOOK3S_64
+enum stf_barrier_type stf_barrier_type_get(void);
+#else
+static inline enum stf_barrier_type stf_barrier_type_get(void) { return STF_BARRIER_NONE; }
+#endif
// Features indicating support for Spectre/Meltdown mitigations
@@ -63,6 +68,8 @@ static inline bool security_ftr_enabled(u64 feature)
// bcctr 2,0,0 triggers a hardware assisted count cache flush
#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull
+// bcctr 2,0,0 triggers a hardware assisted link stack flush
+#define SEC_FTR_BCCTR_LINK_FLUSH_ASSIST 0x0000000000002000ull
// Features indicating need for Spectre/Meltdown mitigations
@@ -84,12 +91,23 @@ static inline bool security_ftr_enabled(u64 feature)
// Software required to flush link stack on context switch
#define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
+// The L1-D cache should be flushed when entering the kernel
+#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull
+
+// The L1-D cache should be flushed after user accesses from the kernel
+#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
+
+// The STF flush should be executed on privilege state switch
+#define SEC_FTR_STF_BARRIER 0x0000000000010000ull
// Features enabled by default
#define SEC_FTR_DEFAULT \
(SEC_FTR_L1D_FLUSH_HV | \
SEC_FTR_L1D_FLUSH_PR | \
SEC_FTR_BNDS_CHK_SPEC_BAR | \
+ SEC_FTR_L1D_FLUSH_ENTRY | \
+ SEC_FTR_L1D_FLUSH_UACCESS | \
+ SEC_FTR_STF_BARRIER | \
SEC_FTR_FAVOUR_SECURITY)
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
diff --git a/arch/powerpc/include/asm/set_memory.h b/arch/powerpc/include/asm/set_memory.h
new file mode 100644
index 000000000000..7ebc807aa8cc
--- /dev/null
+++ b/arch/powerpc/include/asm/set_memory.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SET_MEMORY_H
+#define _ASM_POWERPC_SET_MEMORY_H
+
+#define SET_MEMORY_RO 0
+#define SET_MEMORY_RW 1
+#define SET_MEMORY_NX 2
+#define SET_MEMORY_X 3
+#define SET_MEMORY_NP 4 /* Set memory non present */
+#define SET_MEMORY_P 5 /* Set memory present */
+
+int change_memory_attr(unsigned long addr, int numpages, long action);
+
+static inline int set_memory_ro(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RO);
+}
+
+static inline int set_memory_rw(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_RW);
+}
+
+static inline int set_memory_nx(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_NX);
+}
+
+static inline int set_memory_x(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_X);
+}
+
+static inline int set_memory_np(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_NP);
+}
+
+static inline int set_memory_p(unsigned long addr, int numpages)
+{
+ return change_memory_attr(addr, numpages, SET_MEMORY_P);
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/setjmp.h b/arch/powerpc/include/asm/setjmp.h
index e9f81bb3f83b..f798e80e4106 100644
--- a/arch/powerpc/include/asm/setjmp.h
+++ b/arch/powerpc/include/asm/setjmp.h
@@ -7,7 +7,9 @@
#define JMP_BUF_LEN 23
-extern long setjmp(long *) __attribute__((returns_twice));
-extern void longjmp(long *, long) __attribute__((noreturn));
+typedef long jmp_buf[JMP_BUF_LEN];
+
+extern int setjmp(jmp_buf env) __attribute__((returns_twice));
+extern void longjmp(jmp_buf env, int val) __attribute__((noreturn));
#endif /* _ASM_POWERPC_SETJMP_H */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 65676e2325b8..e29e83f8a89c 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -7,14 +7,10 @@
#ifndef __ASSEMBLY__
extern void ppc_printk_progress(char *s, unsigned short hex);
-extern unsigned int rtas_data;
extern unsigned long long memory_limit;
-extern bool init_mem_is_free;
-extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
struct device_node;
-extern void note_scsi_host(struct device_node *, void *);
/* Used in very early kernel initialization. */
extern unsigned long reloc_offset(void);
@@ -30,12 +26,14 @@ void setup_panic(void);
#define ARCH_PANIC_TIMEOUT 180
#ifdef CONFIG_PPC_PSERIES
-extern void pseries_enable_reloc_on_exc(void);
+extern bool pseries_reloc_on_exception(void);
+extern bool pseries_enable_reloc_on_exc(void);
extern void pseries_disable_reloc_on_exc(void);
extern void pseries_big_endian_exceptions(void);
-extern void pseries_little_endian_exceptions(void);
+void __init pseries_little_endian_exceptions(void);
#else
-static inline void pseries_enable_reloc_on_exc(void) {}
+static inline bool pseries_reloc_on_exception(void) { return false; }
+static inline bool pseries_enable_reloc_on_exc(void) { return false; }
static inline void pseries_disable_reloc_on_exc(void) {}
static inline void pseries_big_endian_exceptions(void) {}
static inline void pseries_little_endian_exceptions(void) {}
@@ -52,27 +50,45 @@ enum l1d_flush_type {
};
void setup_rfi_flush(enum l1d_flush_type, bool enable);
+void setup_entry_flush(bool enable);
+void setup_uaccess_flush(bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
#ifdef CONFIG_PPC_BARRIER_NOSPEC
-void setup_barrier_nospec(void);
+void __init setup_barrier_nospec(void);
#else
-static inline void setup_barrier_nospec(void) { };
+static inline void setup_barrier_nospec(void) { }
#endif
+void do_uaccess_flush_fixups(enum l1d_flush_type types);
+void do_entry_flush_fixups(enum l1d_flush_type types);
void do_barrier_nospec_fixups(bool enable);
extern bool barrier_nospec_enabled;
#ifdef CONFIG_PPC_BARRIER_NOSPEC
void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
#else
-static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
+static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { }
#endif
-#ifdef CONFIG_PPC_FSL_BOOK3E
-void setup_spectre_v2(void);
+#ifdef CONFIG_PPC_E500
+void __init setup_spectre_v2(void);
#else
-static inline void setup_spectre_v2(void) {};
+static inline void setup_spectre_v2(void) {}
#endif
-void do_btb_flush_fixups(void);
+void __init do_btb_flush_fixups(void);
+
+#ifdef CONFIG_PPC32
+unsigned long __init early_init(unsigned long dt_ptr);
+void __init machine_init(u64 dt_ptr);
+#endif
+void __init early_setup(unsigned long dt_ptr);
+void early_setup_secondary(void);
+
+/* prom_init (OpenFirmware) */
+unsigned long __init prom_init(unsigned long r3, unsigned long r4,
+ unsigned long pp, unsigned long r6,
+ unsigned long r7, unsigned long kbase);
+
+extern struct seq_buf ppc_hw_desc;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h
index 0803ca8b9149..922d43700fb4 100644
--- a/arch/powerpc/include/asm/signal.h
+++ b/arch/powerpc/include/asm/signal.h
@@ -6,4 +6,12 @@
#include <uapi/asm/signal.h>
#include <uapi/asm/ptrace.h>
+struct pt_regs;
+void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
+
+unsigned long get_min_sigframe_size_32(void);
+unsigned long get_min_sigframe_size_64(void);
+unsigned long get_min_sigframe_size(void);
+unsigned long get_min_sigframe_size_compat(void);
+
#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/arch/powerpc/include/asm/simple_spinlock.h b/arch/powerpc/include/asm/simple_spinlock.h
new file mode 100644
index 000000000000..9dcc7e9993b9
--- /dev/null
+++ b/arch/powerpc/include/asm/simple_spinlock.h
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H
+#define _ASM_POWERPC_SIMPLE_SPINLOCK_H
+
+/*
+ * Simple spin lock operations.
+ *
+ * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
+ * Rework to support virtual processors
+ *
+ * Type of int is used as a full 64b word is not necessary.
+ *
+ * (the type definitions are in asm/simple_spinlock_types.h)
+ */
+#include <linux/irqflags.h>
+#include <asm/paravirt.h>
+#include <asm/paca.h>
+#include <asm/synch.h>
+#include <asm/ppc-opcode.h>
+
+#ifdef CONFIG_PPC64
+/* use 0x800000yy when locked, where yy == CPU number */
+#ifdef __BIG_ENDIAN__
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
+#else
+#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
+#endif
+#else
+#define LOCK_TOKEN 1
+#endif
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.slock == 0;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(READ_ONCE(*lock));
+}
+
+/*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+ */
+static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned long tmp, token;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
+
+ token = LOCK_TOKEN;
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2,%[eh]\n\
+ cmpwi 0,%0,0\n\
+ bne- 2f\n\
+ stwcx. %1,0,%2\n\
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+"2:"
+ : "=&r" (tmp)
+ : "r" (token), "r" (&lock->slock), [eh] "n" (eh)
+ : "cr0", "memory");
+
+ return tmp;
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ return __arch_spin_trylock(lock) == 0;
+}
+
+/*
+ * On a system with shared processors (that is, where a physical
+ * processor is multiplexed between several virtual processors),
+ * there is no point spinning on a lock if the holder of the lock
+ * isn't currently scheduled on a physical processor. Instead
+ * we detect this situation and ask the hypervisor to give the
+ * rest of our timeslice to the lock holder.
+ *
+ * So that we can tell which virtual processor is holding a lock,
+ * we put 0x80000000 | smp_processor_id() in the lock when it is
+ * held. Conveniently, we have a word in the paca that holds this
+ * value.
+ */
+
+#if defined(CONFIG_PPC_SPLPAR)
+/* We only yield to the hypervisor if we are in shared processor mode */
+void splpar_spin_yield(arch_spinlock_t *lock);
+void splpar_rw_yield(arch_rwlock_t *lock);
+#else /* SPLPAR */
+static inline void splpar_spin_yield(arch_spinlock_t *lock) {}
+static inline void splpar_rw_yield(arch_rwlock_t *lock) {}
+#endif
+
+static inline void spin_yield(arch_spinlock_t *lock)
+{
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
+ else
+ barrier();
+}
+
+static inline void rw_yield(arch_rwlock_t *lock)
+{
+ if (is_shared_processor())
+ splpar_rw_yield(lock);
+ else
+ barrier();
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ while (1) {
+ if (likely(__arch_spin_trylock(lock) == 0))
+ break;
+ do {
+ HMT_low();
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
+ } while (unlikely(lock->slock != 0));
+ HMT_medium();
+ }
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ __asm__ __volatile__("# arch_spin_unlock\n\t"
+ PPC_RELEASE_BARRIER: : :"memory");
+ lock->slock = 0;
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+#ifdef CONFIG_PPC64
+#define __DO_SIGN_EXTEND "extsw %0,%0\n"
+#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
+#else
+#define __DO_SIGN_EXTEND
+#define WRLOCK_TOKEN (-1)
+#endif
+
+/*
+ * This returns the old value in the lock + 1,
+ * so we got a read lock if the return value is > 0.
+ */
+static inline long __arch_read_trylock(arch_rwlock_t *rw)
+{
+ long tmp;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
+
+ __asm__ __volatile__(
+"1: lwarx %0,0,%1,%[eh]\n"
+ __DO_SIGN_EXTEND
+" addic. %0,%0,1\n\
+ ble- 2f\n"
+" stwcx. %0,0,%1\n\
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+"2:" : "=&r" (tmp)
+ : "r" (&rw->lock), [eh] "n" (eh)
+ : "cr0", "xer", "memory");
+
+ return tmp;
+}
+
+/*
+ * This returns the old value in the lock,
+ * so we got the write lock if the return value is 0.
+ */
+static inline long __arch_write_trylock(arch_rwlock_t *rw)
+{
+ long tmp, token;
+ unsigned int eh = IS_ENABLED(CONFIG_PPC64);
+
+ token = WRLOCK_TOKEN;
+ __asm__ __volatile__(
+"1: lwarx %0,0,%2,%[eh]\n\
+ cmpwi 0,%0,0\n\
+ bne- 2f\n"
+" stwcx. %1,0,%2\n\
+ bne- 1b\n"
+ PPC_ACQUIRE_BARRIER
+"2:" : "=&r" (tmp)
+ : "r" (token), "r" (&rw->lock), [eh] "n" (eh)
+ : "cr0", "memory");
+
+ return tmp;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ while (1) {
+ if (likely(__arch_read_trylock(rw) > 0))
+ break;
+ do {
+ HMT_low();
+ if (is_shared_processor())
+ splpar_rw_yield(rw);
+ } while (unlikely(rw->lock < 0));
+ HMT_medium();
+ }
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ while (1) {
+ if (likely(__arch_write_trylock(rw) == 0))
+ break;
+ do {
+ HMT_low();
+ if (is_shared_processor())
+ splpar_rw_yield(rw);
+ } while (unlikely(rw->lock != 0));
+ HMT_medium();
+ }
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ return __arch_read_trylock(rw) > 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ return __arch_write_trylock(rw) == 0;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ long tmp;
+
+ __asm__ __volatile__(
+ "# read_unlock\n\t"
+ PPC_RELEASE_BARRIER
+"1: lwarx %0,0,%1\n\
+ addic %0,%0,-1\n"
+" stwcx. %0,0,%1\n\
+ bne- 1b"
+ : "=&r"(tmp)
+ : "r"(&rw->lock)
+ : "cr0", "xer", "memory");
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ __asm__ __volatile__("# write_unlock\n\t"
+ PPC_RELEASE_BARRIER: : :"memory");
+ rw->lock = 0;
+}
+
+#define arch_spin_relax(lock) spin_yield(lock)
+#define arch_read_relax(lock) rw_yield(lock)
+#define arch_write_relax(lock) rw_yield(lock)
+
+#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/simple_spinlock_types.h b/arch/powerpc/include/asm/simple_spinlock_types.h
new file mode 100644
index 000000000000..08243338069d
--- /dev/null
+++ b/arch/powerpc/include/asm/simple_spinlock_types.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
+#define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile signed int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H */
diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
deleted file mode 100644
index c6f466f4c241..000000000000
--- a/arch/powerpc/include/asm/slice.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_SLICE_H
-#define _ASM_POWERPC_SLICE_H
-
-#ifdef CONFIG_PPC_BOOK3S_64
-#include <asm/book3s/64/slice.h>
-#elif defined(CONFIG_PPC_MMU_NOHASH_32)
-#include <asm/nohash/32/slice.h>
-#endif
-
-#ifndef __ASSEMBLY__
-
-struct mm_struct;
-
-#ifdef CONFIG_PPC_MM_SLICES
-
-#ifdef CONFIG_HUGETLB_PAGE
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-
-unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
- unsigned long flags, unsigned int psize,
- int topdown);
-
-unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr);
-
-void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
- unsigned long len, unsigned int psize);
-
-void slice_init_new_context_exec(struct mm_struct *mm);
-void slice_setup_new_exec(void);
-
-#else /* CONFIG_PPC_MM_SLICES */
-
-static inline void slice_init_new_context_exec(struct mm_struct *mm) {}
-
-static inline unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
-{
- return 0;
-}
-
-#endif /* CONFIG_PPC_MM_SLICES */
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_POWERPC_SLICE_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 49a25e2400f2..f63505d74932 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -28,9 +28,14 @@
extern int boot_cpuid;
extern int spinning_secondaries;
extern u32 *cpu_to_phys_id;
+extern bool coregroup_enabled;
-extern void cpu_die(void);
extern int cpu_to_chip_id(int cpu);
+extern int *chip_id_lookup_table;
+
+DECLARE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
+DECLARE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
+DECLARE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
#ifdef CONFIG_SMP
@@ -50,8 +55,14 @@ struct smp_ops_t {
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int nr);
int (*cpu_bootable)(unsigned int nr);
+#ifdef CONFIG_HOTPLUG_CPU
+ void (*cpu_offline_self)(void);
+#endif
};
+extern struct task_struct *secondary_current;
+
+void start_secondary(void *unused);
extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
extern void smp_send_debugger_break(void);
@@ -79,22 +90,7 @@ int is_cpu_dead(unsigned int cpu);
/* 32-bit */
extern int smp_hw_index[];
-/*
- * This is particularly ugly: it appears we can't actually get the definition
- * of task_struct here, but we need access to the CPU this task is running on.
- * Instead of using task_struct we're using _TASK_CPU which is extracted from
- * asm-offsets.h by kbuild to get the current processor ID.
- *
- * This also needs to be safeguarded when building asm-offsets.s because at
- * that time _TASK_CPU is not defined yet. It could have been guarded by
- * _TASK_CPU itself, but we want the build to fail if _TASK_CPU is missing
- * when building something else than asm-offsets.s
- */
-#ifdef GENERATING_ASM_OFFSETS
-#define raw_smp_processor_id() (0)
-#else
-#define raw_smp_processor_id() (*(unsigned int *)((void *)current + _TASK_CPU))
-#endif
+#define raw_smp_processor_id() (current_thread_info()->cpu)
#define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
static inline int get_hard_smp_processor_id(int cpu)
@@ -135,6 +131,21 @@ static inline struct cpumask *cpu_smallcore_mask(int cpu)
extern int cpu_to_core_id(int cpu);
+extern bool has_big_cores;
+extern bool thread_group_shares_l2;
+extern bool thread_group_shares_l3;
+
+#define cpu_smt_mask cpu_smt_mask
+#ifdef CONFIG_SCHED_SMT
+static inline const struct cpumask *cpu_smt_mask(int cpu)
+{
+ if (has_big_cores)
+ return per_cpu(cpu_smallcore_map, cpu);
+
+ return per_cpu(cpu_sibling_map, cpu);
+}
+#endif /* CONFIG_SCHED_SMT */
+
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
*
* Make sure this matches openpic_request_IPIs in open_pic.c, or what shows up
@@ -176,8 +187,8 @@ extern void __cpu_die(unsigned int cpu);
/* for UP */
#define hard_smp_processor_id() get_hard_smp_processor_id(0)
#define smp_setup_cpu_maps()
-static inline void inhibit_secondary_onlining(void) {}
-static inline void uninhibit_secondary_onlining(void) {}
+#define thread_group_shares_l2 0
+#define thread_group_shares_l3 0
static inline const struct cpumask *cpu_sibling_mask(int cpu)
{
return cpumask_of(cpu);
@@ -188,6 +199,10 @@ static inline const struct cpumask *cpu_smallcore_mask(int cpu)
return cpumask_of(cpu);
}
+static inline const struct cpumask *cpu_l2_cache_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
#endif /* CONFIG_SMP */
#ifdef CONFIG_PPC64
@@ -219,7 +234,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
#if defined(CONFIG_PPC64) && (defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE))
extern void smp_release_cpus(void);
#else
-static inline void smp_release_cpus(void) { };
+static inline void smp_release_cpus(void) { }
#endif
extern int smt_enabled_at_boot;
@@ -243,7 +258,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
* 64-bit but defining them all here doesn't harm
*/
extern void generic_secondary_smp_init(void);
-extern void generic_secondary_thread_init(void);
extern unsigned long __secondary_hold_spinloop;
extern unsigned long __secondary_hold_acknowledge;
extern char __secondary_hold;
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index 8dff086c0cab..2ac6ab903023 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -108,7 +108,7 @@
/*
* i2c commands
*
- * To issue an i2c command, first is to send a parameter block to the
+ * To issue an i2c command, first is to send a parameter block to
* the SMU. This is a command of type 0x9a with 9 bytes of header
* eventually followed by data for a write:
*
@@ -456,7 +456,7 @@ extern void smu_poll(void);
/*
* Init routine, presence check....
*/
-extern int smu_init(void);
+int __init smu_init(void);
extern int smu_present(void);
struct platform_device;
extern struct platform_device *smu_get_ofdev(void);
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index 3192d454a733..d072866842e4 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -13,14 +13,9 @@
#endif /* CONFIG_SPARSEMEM */
#ifdef CONFIG_MEMORY_HOTPLUG
-extern int create_section_mapping(unsigned long start, unsigned long end, int nid);
extern int remove_section_mapping(unsigned long start, unsigned long end);
-
-#ifdef CONFIG_PPC_BOOK3S_64
-extern int resize_hpt_for_hotplug(unsigned long new_mem_size);
-#else
-static inline int resize_hpt_for_hotplug(unsigned long new_mem_size) { return 0; }
-#endif
+extern int memory_add_physaddr_to_nid(u64 start);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
#ifdef CONFIG_NUMA
extern int hot_add_scn_to_nid(unsigned long scn_addr);
@@ -31,6 +26,5 @@ static inline int hot_add_scn_to_nid(unsigned long scn_addr)
}
#endif /* CONFIG_NUMA */
#endif /* CONFIG_MEMORY_HOTPLUG */
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 860228e917dc..bd75872a6334 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -3,316 +3,19 @@
#define __ASM_SPINLOCK_H
#ifdef __KERNEL__
-/*
- * Simple spin lock operations.
- *
- * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
- * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
- * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
- * Rework to support virtual processors
- *
- * Type of int is used as a full 64b word is not necessary.
- *
- * (the type definitions are in asm/spinlock_types.h)
- */
-#include <linux/jump_label.h>
-#include <linux/irqflags.h>
-#ifdef CONFIG_PPC64
-#include <asm/paca.h>
-#include <asm/hvcall.h>
-#endif
-#include <asm/synch.h>
-#include <asm/ppc-opcode.h>
-#include <asm/asm-405.h>
-
-#ifdef CONFIG_PPC64
-/* use 0x800000yy when locked, where yy == CPU number */
-#ifdef __BIG_ENDIAN__
-#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
-#else
-#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
-#endif
-#else
-#define LOCK_TOKEN 1
-#endif
-
-#ifdef CONFIG_PPC_PSERIES
-DECLARE_STATIC_KEY_FALSE(shared_processor);
-
-#define vcpu_is_preempted vcpu_is_preempted
-static inline bool vcpu_is_preempted(int cpu)
-{
- if (!static_branch_unlikely(&shared_processor))
- return false;
- return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
-}
-#endif
-
-static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
-{
- return lock.slock == 0;
-}
-
-static inline int arch_spin_is_locked(arch_spinlock_t *lock)
-{
- smp_mb();
- return !arch_spin_value_unlocked(*lock);
-}
-
-/*
- * This returns the old value in the lock, so we succeeded
- * in getting the lock if the return value is 0.
- */
-static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
-{
- unsigned long tmp, token;
-
- token = LOCK_TOKEN;
- __asm__ __volatile__(
-"1: " PPC_LWARX(%0,0,%2,1) "\n\
- cmpwi 0,%0,0\n\
- bne- 2f\n\
- stwcx. %1,0,%2\n\
- bne- 1b\n"
- PPC_ACQUIRE_BARRIER
-"2:"
- : "=&r" (tmp)
- : "r" (token), "r" (&lock->slock)
- : "cr0", "memory");
-
- return tmp;
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
- return __arch_spin_trylock(lock) == 0;
-}
-
-/*
- * On a system with shared processors (that is, where a physical
- * processor is multiplexed between several virtual processors),
- * there is no point spinning on a lock if the holder of the lock
- * isn't currently scheduled on a physical processor. Instead
- * we detect this situation and ask the hypervisor to give the
- * rest of our timeslice to the lock holder.
- *
- * So that we can tell which virtual processor is holding a lock,
- * we put 0x80000000 | smp_processor_id() in the lock when it is
- * held. Conveniently, we have a word in the paca that holds this
- * value.
- */
-
-#if defined(CONFIG_PPC_SPLPAR)
-/* We only yield to the hypervisor if we are in shared processor mode */
-void splpar_spin_yield(arch_spinlock_t *lock);
-void splpar_rw_yield(arch_rwlock_t *lock);
-#else /* SPLPAR */
-static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
-static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
-#endif
-
-static inline bool is_shared_processor(void)
-{
-#ifdef CONFIG_PPC_SPLPAR
- return static_branch_unlikely(&shared_processor);
+#ifdef CONFIG_PPC_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#include <asm/qrwlock.h>
#else
- return false;
+#include <asm/simple_spinlock.h>
#endif
-}
-
-static inline void spin_yield(arch_spinlock_t *lock)
-{
- if (is_shared_processor())
- splpar_spin_yield(lock);
- else
- barrier();
-}
-
-static inline void rw_yield(arch_rwlock_t *lock)
-{
- if (is_shared_processor())
- splpar_rw_yield(lock);
- else
- barrier();
-}
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- while (1) {
- if (likely(__arch_spin_trylock(lock) == 0))
- break;
- do {
- HMT_low();
- if (is_shared_processor())
- splpar_spin_yield(lock);
- } while (unlikely(lock->slock != 0));
- HMT_medium();
- }
-}
-static inline
-void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
- unsigned long flags_dis;
-
- while (1) {
- if (likely(__arch_spin_trylock(lock) == 0))
- break;
- local_save_flags(flags_dis);
- local_irq_restore(flags);
- do {
- HMT_low();
- if (is_shared_processor())
- splpar_spin_yield(lock);
- } while (unlikely(lock->slock != 0));
- HMT_medium();
- local_irq_restore(flags_dis);
- }
-}
-#define arch_spin_lock_flags arch_spin_lock_flags
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- __asm__ __volatile__("# arch_spin_unlock\n\t"
- PPC_RELEASE_BARRIER: : :"memory");
- lock->slock = 0;
-}
-
-/*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
- *
- * NOTE! it is quite common to have readers in interrupts
- * but no interrupt writers. For those circumstances we
- * can "mix" irq-safe locks - any writer needs to get a
- * irq-safe write-lock, but readers can get non-irqsafe
- * read-locks.
- */
+/* See include/linux/spinlock.h */
+#define smp_mb__after_spinlock() smp_mb()
-#ifdef CONFIG_PPC64
-#define __DO_SIGN_EXTEND "extsw %0,%0\n"
-#define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
-#else
-#define __DO_SIGN_EXTEND
-#define WRLOCK_TOKEN (-1)
+#ifndef CONFIG_PARAVIRT_SPINLOCKS
+static inline void pv_spinlocks_init(void) { }
#endif
-/*
- * This returns the old value in the lock + 1,
- * so we got a read lock if the return value is > 0.
- */
-static inline long __arch_read_trylock(arch_rwlock_t *rw)
-{
- long tmp;
-
- __asm__ __volatile__(
-"1: " PPC_LWARX(%0,0,%1,1) "\n"
- __DO_SIGN_EXTEND
-" addic. %0,%0,1\n\
- ble- 2f\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1\n\
- bne- 1b\n"
- PPC_ACQUIRE_BARRIER
-"2:" : "=&r" (tmp)
- : "r" (&rw->lock)
- : "cr0", "xer", "memory");
-
- return tmp;
-}
-
-/*
- * This returns the old value in the lock,
- * so we got the write lock if the return value is 0.
- */
-static inline long __arch_write_trylock(arch_rwlock_t *rw)
-{
- long tmp, token;
-
- token = WRLOCK_TOKEN;
- __asm__ __volatile__(
-"1: " PPC_LWARX(%0,0,%2,1) "\n\
- cmpwi 0,%0,0\n\
- bne- 2f\n"
- PPC405_ERR77(0,%1)
-" stwcx. %1,0,%2\n\
- bne- 1b\n"
- PPC_ACQUIRE_BARRIER
-"2:" : "=&r" (tmp)
- : "r" (token), "r" (&rw->lock)
- : "cr0", "memory");
-
- return tmp;
-}
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
- while (1) {
- if (likely(__arch_read_trylock(rw) > 0))
- break;
- do {
- HMT_low();
- if (is_shared_processor())
- splpar_rw_yield(rw);
- } while (unlikely(rw->lock < 0));
- HMT_medium();
- }
-}
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
- while (1) {
- if (likely(__arch_write_trylock(rw) == 0))
- break;
- do {
- HMT_low();
- if (is_shared_processor())
- splpar_rw_yield(rw);
- } while (unlikely(rw->lock != 0));
- HMT_medium();
- }
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
- return __arch_read_trylock(rw) > 0;
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
- return __arch_write_trylock(rw) == 0;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
- long tmp;
-
- __asm__ __volatile__(
- "# read_unlock\n\t"
- PPC_RELEASE_BARRIER
-"1: lwarx %0,0,%1\n\
- addic %0,%0,-1\n"
- PPC405_ERR77(0,%1)
-" stwcx. %0,0,%1\n\
- bne- 1b"
- : "=&r"(tmp)
- : "r"(&rw->lock)
- : "cr0", "xer", "memory");
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
- __asm__ __volatile__("# write_unlock\n\t"
- PPC_RELEASE_BARRIER: : :"memory");
- rw->lock = 0;
-}
-
-#define arch_spin_relax(lock) spin_yield(lock)
-#define arch_read_relax(lock) rw_yield(lock)
-#define arch_write_relax(lock) rw_yield(lock)
-
-/* See include/linux/spinlock.h */
-#define smp_mb__after_spinlock() smp_mb()
-
#endif /* __KERNEL__ */
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h
index 87adaf13b7e8..d5f8a74ed2e8 100644
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
@@ -2,20 +2,15 @@
#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
#define _ASM_POWERPC_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly"
#endif
-typedef struct {
- volatile unsigned int slock;
-} arch_spinlock_t;
-
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
-
-typedef struct {
- volatile signed int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+#ifdef CONFIG_PPC_QUEUED_SPINLOCKS
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
+#else
+#include <asm/simple_spinlock_types.h>
+#endif
#endif
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h
index 9666491bcb8a..96ad4510c895 100644
--- a/arch/powerpc/include/asm/spu.h
+++ b/arch/powerpc/include/asm/spu.h
@@ -201,20 +201,6 @@ int spu_64k_pages_available(void);
struct mm_struct;
extern void spu_flush_all_slbs(struct mm_struct *mm);
-/* This interface allows a profiler (e.g., OProfile) to store a ref
- * to spu context information that it creates. This caching technique
- * avoids the need to recreate this information after a save/restore operation.
- *
- * Assumes the caller has already incremented the ref count to
- * profile_info; then spu_context_destroy must call kref_put
- * on prof_info_kref.
- */
-void spu_set_profile_private_kref(struct spu_context *ctx,
- struct kref *prof_info_kref,
- void ( * prof_info_release) (struct kref *kref));
-
-void *spu_get_profile_private_kref(struct spu_context *ctx);
-
/* system callbacks from the SPU */
struct spu_syscall_block {
u64 nr_ret;
@@ -263,27 +249,8 @@ void unregister_spu_syscalls(struct spufs_calls *calls);
int spu_add_dev_attr(struct device_attribute *attr);
void spu_remove_dev_attr(struct device_attribute *attr);
-int spu_add_dev_attr_group(struct attribute_group *attrs);
-void spu_remove_dev_attr_group(struct attribute_group *attrs);
-
-/*
- * Notifier blocks:
- *
- * oprofile can get notified when a context switch is performed
- * on an spe. The notifer function that gets called is passed
- * a pointer to the SPU structure as well as the object-id that
- * identifies the binary running on that SPU now.
- *
- * For a context save, the object-id that is passed is zero,
- * identifying that the kernel will run from that moment on.
- *
- * For a context restore, the object-id is the value written
- * to object-id spufs file from user space and the notifer
- * function can assume that spu->ctx is valid.
- */
-struct notifier_block;
-int spu_switch_event_register(struct notifier_block * n);
-int spu_switch_event_unregister(struct notifier_block * n);
+int spu_add_dev_attr_group(const struct attribute_group *attrs);
+void spu_remove_dev_attr_group(const struct attribute_group *attrs);
extern void notify_spus_active(void);
extern void do_notify_spus_active(void);
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index 769f055509c9..50950deedb87 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
*/
+#include <asm/inst.h>
struct pt_regs;
@@ -12,12 +13,11 @@ struct pt_regs;
* we don't allow putting a breakpoint on an mtmsrd instruction.
* Similarly we don't allow breakpoints on rfid instructions.
* These macros tell us if an instruction is a mtmsrd or rfid.
- * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
- * and an mtmsrd (64-bit).
+ * Note that these return true for both mtmsr/rfi (32-bit)
+ * and mtmsrd/rfid (64-bit).
*/
-#define IS_MTMSRD(instr) (((instr) & 0xfc0007be) == 0x7c000124)
-#define IS_RFID(instr) (((instr) & 0xfc0007fe) == 0x4c000024)
-#define IS_RFI(instr) (((instr) & 0xfc0007fe) == 0x4c000064)
+#define IS_MTMSRD(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
+#define IS_RFID(instr) ((ppc_inst_val(instr) & 0xfc0007be) == 0x4c000024)
enum instruction_type {
COMPUTE, /* arith/logical/CR op, etc. */
@@ -39,6 +39,7 @@ enum instruction_type {
CACHEOP,
BARRIER,
SYSCALL,
+ SYSCALL_VECTORED_0,
MFMSR,
MTMSR,
RFI,
@@ -48,6 +49,8 @@ enum instruction_type {
#define INSTR_TYPE_MASK 0x1f
+#define OP_IS_LOAD(type) ((LOAD <= (type) && (type) <= LOAD_VSX) || (type) == LARX)
+#define OP_IS_STORE(type) ((STORE <= (type) && (type) <= STORE_VSX) || (type) == STCX)
#define OP_IS_LOAD_STORE(type) (LOAD <= (type) && (type) <= STCX)
/* Compute flags, ORed in with type */
@@ -89,14 +92,24 @@ enum instruction_type {
#define VSX_LDLEFT 4 /* load VSX register from left */
#define VSX_CHECK_VEC 8 /* check MSR_VEC not MSR_VSX for reg >= 32 */
+/* Prefixed flag, ORed in with type */
+#define PREFIXED 0x800
+
/* Size field in type word */
#define SIZE(n) ((n) << 12)
#define GETSIZE(w) ((w) >> 12)
#define GETTYPE(t) ((t) & INSTR_TYPE_MASK)
+#define GETLENGTH(t) (((t) & PREFIXED) ? 8 : 4)
#define MKOP(t, f, s) ((t) | (f) | SIZE(s))
+/* Prefix instruction operands */
+#define GET_PREFIX_RA(i) (((i) >> 16) & 0x1f)
+#define GET_PREFIX_R(i) ((i) & (1ul << 20))
+
+extern s32 patch__exec_instr;
+
struct instruction_op {
int type;
int reg;
@@ -132,7 +145,7 @@ union vsx_reg {
* otherwise.
*/
extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
- unsigned int instr);
+ ppc_inst_t instr);
/*
* Emulate an instruction that can be executed just by updating
@@ -149,7 +162,7 @@ void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
* 0 if it could not be emulated, or -1 for an instruction that
* should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
*/
-extern int emulate_step(struct pt_regs *regs, unsigned int instr);
+int emulate_step(struct pt_regs *regs, ppc_inst_t instr);
/*
* Emulate a load or store instruction by reading/writing the
diff --git a/arch/powerpc/include/asm/static_call.h b/arch/powerpc/include/asm/static_call.h
new file mode 100644
index 000000000000..de1018cc522b
--- /dev/null
+++ b/arch/powerpc/include/asm/static_call.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_STATIC_CALL_H
+#define _ASM_POWERPC_STATIC_CALL_H
+
+#define __PPC_SCT(name, inst) \
+ asm(".pushsection .text, \"ax\" \n" \
+ ".align 5 \n" \
+ ".globl " STATIC_CALL_TRAMP_STR(name) " \n" \
+ STATIC_CALL_TRAMP_STR(name) ": \n" \
+ inst " \n" \
+ " lis 12,2f@ha \n" \
+ " lwz 12,2f@l(12) \n" \
+ " mtctr 12 \n" \
+ " bctr \n" \
+ "1: li 3, 0 \n" \
+ " blr \n" \
+ "2: .long 0 \n" \
+ ".type " STATIC_CALL_TRAMP_STR(name) ", @function \n" \
+ ".size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
+ ".popsection \n")
+
+#define PPC_SCT_RET0 20 /* Offset of label 1 */
+#define PPC_SCT_DATA 28 /* Offset of label 2 */
+
+#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) __PPC_SCT(name, "b " #func)
+#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) __PPC_SCT(name, "blr")
+#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) __PPC_SCT(name, "b .+20")
+
+#endif /* _ASM_POWERPC_STATIC_CALL_H */
diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h
index b72692702f35..2aa0e31e6884 100644
--- a/arch/powerpc/include/asm/string.h
+++ b/arch/powerpc/include/asm/string.h
@@ -28,7 +28,7 @@ extern void * memcpy(void *,const void *,__kernel_size_t);
extern void * memmove(void *,const void *,__kernel_size_t);
extern int memcmp(const void *,const void *,__kernel_size_t);
extern void * memchr(const void *,int,__kernel_size_t);
-extern void * memcpy_flushcache(void *,const void *,__kernel_size_t);
+void memcpy_flushcache(void *dest, const void *src, size_t size);
void *__memset(void *s, int c, __kernel_size_t count);
void *__memcpy(void *to, const void *from, __kernel_size_t n);
@@ -53,9 +53,7 @@ void *__memmove(void *to, const void *from, __kernel_size_t n);
#ifndef CONFIG_KASAN
#define __HAVE_ARCH_MEMSET32
#define __HAVE_ARCH_MEMSET64
-#define __HAVE_ARCH_MEMCPY_MCSAFE
-extern int memcpy_mcsafe(void *dst, const void *src, __kernel_size_t sz);
extern void *__memset16(uint16_t *, uint16_t v, __kernel_size_t);
extern void *__memset32(uint32_t *, uint32_t v, __kernel_size_t);
extern void *__memset64(uint64_t *, uint64_t v, __kernel_size_t);
diff --git a/arch/powerpc/include/asm/svm.h b/arch/powerpc/include/asm/svm.h
index 85580b30aba4..a02bd54b8948 100644
--- a/arch/powerpc/include/asm/svm.h
+++ b/arch/powerpc/include/asm/svm.h
@@ -10,6 +10,8 @@
#ifdef CONFIG_PPC_SVM
+#include <asm/reg.h>
+
static inline bool is_secure_guest(void)
{
return mfmsr() & MSR_S;
diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
index 3c1a1cd16128..4203b5e0a88e 100644
--- a/arch/powerpc/include/asm/swiotlb.h
+++ b/arch/powerpc/include/asm/swiotlb.h
@@ -9,6 +9,7 @@
#include <linux/swiotlb.h>
extern unsigned int ppc_swiotlb_enable;
+extern unsigned int ppc_swiotlb_flags;
#ifdef CONFIG_SWIOTLB
void swiotlb_detect_4g(void);
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 5b03d8a82409..aee25e3ebf96 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -5,6 +5,7 @@
#ifndef _ASM_POWERPC_SWITCH_TO_H
#define _ASM_POWERPC_SWITCH_TO_H
+#include <linux/sched.h>
#include <asm/reg.h>
struct thread_struct;
@@ -22,6 +23,16 @@ extern void switch_booke_debug_regs(struct debug_reg *new_debug);
extern int emulate_altivec(struct pt_regs *);
+#ifdef CONFIG_PPC_BOOK3S_64
+void restore_math(struct pt_regs *regs);
+#else
+static inline void restore_math(struct pt_regs *regs)
+{
+}
+#endif
+
+void restore_tm_state(struct pt_regs *regs);
+
extern void flush_all_to_thread(struct task_struct *);
extern void giveup_all(struct task_struct *);
@@ -51,6 +62,15 @@ static inline void disable_kernel_altivec(void)
#else
static inline void save_altivec(struct task_struct *t) { }
static inline void __giveup_altivec(struct task_struct *t) { }
+static inline void enable_kernel_altivec(void)
+{
+ BUILD_BUG();
+}
+
+static inline void disable_kernel_altivec(void)
+{
+ BUILD_BUG();
+}
#endif
#ifdef CONFIG_VSX
@@ -60,6 +80,16 @@ static inline void disable_kernel_vsx(void)
{
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
+#else
+static inline void enable_kernel_vsx(void)
+{
+ BUILD_BUG();
+}
+
+static inline void disable_kernel_vsx(void)
+{
+ BUILD_BUG();
+}
#endif
#ifdef CONFIG_SPE
@@ -91,7 +121,8 @@ static inline void clear_task_ebb(struct task_struct *t)
#endif
}
-extern int set_thread_uses_vas(void);
+void kvmppc_save_user_regs(void);
+void kvmppc_save_current_sprs(void);
extern int set_thread_tidr(struct task_struct *t);
diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
index aca70fb43147..b0b4c64870d7 100644
--- a/arch/powerpc/include/asm/synch.h
+++ b/arch/powerpc/include/asm/synch.h
@@ -3,8 +3,9 @@
#define _ASM_POWERPC_SYNCH_H
#ifdef __KERNEL__
+#include <asm/cputable.h>
#include <asm/feature-fixups.h>
-#include <asm/asm-const.h>
+#include <asm/ppc-opcode.h>
#ifndef __ASSEMBLY__
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
@@ -13,18 +14,37 @@ extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
static inline void eieio(void)
{
- __asm__ __volatile__ ("eieio" : : : "memory");
+ if (IS_ENABLED(CONFIG_BOOKE))
+ __asm__ __volatile__ ("mbar" : : : "memory");
+ else
+ __asm__ __volatile__ ("eieio" : : : "memory");
}
static inline void isync(void)
{
__asm__ __volatile__ ("isync" : : : "memory");
}
+
+static inline void ppc_after_tlbiel_barrier(void)
+{
+ asm volatile("ptesync": : :"memory");
+ /*
+ * POWER9, POWER10 need a cp_abort after tlbiel to ensure the copy is
+ * invalidated correctly. If this is not done, the paste can take data
+ * from the physical address that was translated at copy time.
+ *
+ * POWER9 in practice does not need this, because address spaces with
+ * accelerators mapped will use tlbie (which does invalidate the copy)
+ * to invalidate translations. It's not possible to limit POWER10 this
+ * way due to local copy-paste.
+ */
+ asm volatile(ASM_FTR_IFSET(PPC_CP_ABORT, "", %0) : : "i" (CPU_FTR_ARCH_31) : "memory");
+}
#endif /* __ASSEMBLY__ */
#if defined(__powerpc64__)
# define LWSYNC lwsync
-#elif defined(CONFIG_E500)
+#elif defined(CONFIG_PPC_E500)
# define LWSYNC \
START_LWSYNC_SECTION(96); \
sync; \
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 38d62acfdce7..3dd36c5e334a 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -14,9 +14,16 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+typedef long (*syscall_fn)(const struct pt_regs *);
+#else
+typedef long (*syscall_fn)(unsigned long, unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long);
+#endif
+
/* ftrace syscalls requires exporting the sys_call_table */
-extern const unsigned long sys_call_table[];
-extern const unsigned long compat_sys_call_table[];
+extern const syscall_fn sys_call_table[];
+extern const syscall_fn compat_sys_call_table[];
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
@@ -26,7 +33,10 @@ static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
* This is important for seccomp so that compat tasks can set r0 = -1
* to reject the syscall.
*/
- return TRAP(regs) == 0xc00 ? regs->gpr[0] : -1;
+ if (trap_is_syscall(regs))
+ return regs->gpr[0];
+ else
+ return -1;
}
static inline void syscall_rollback(struct task_struct *task,
@@ -38,11 +48,17 @@ static inline void syscall_rollback(struct task_struct *task,
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
- /*
- * If the system call failed,
- * regs->gpr[3] contains a positive ERRORCODE.
- */
- return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ if (trap_is_scv(regs)) {
+ unsigned long error = regs->gpr[3];
+
+ return IS_ERR_VALUE(error) ? error : 0;
+ } else {
+ /*
+ * If the system call failed,
+ * regs->gpr[3] contains a positive ERRORCODE.
+ */
+ return (regs->ccr & 0x10000000UL) ? -regs->gpr[3] : 0;
+ }
}
static inline long syscall_get_return_value(struct task_struct *task,
@@ -55,18 +71,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
- /*
- * In the general case it's not obvious that we must deal with CCR
- * here, as the syscall exit path will also do that for us. However
- * there are some places, eg. the signal code, which check ccr to
- * decide if the value in r3 is actually an error.
- */
- if (error) {
- regs->ccr |= 0x10000000L;
- regs->gpr[3] = error;
+ if (trap_is_scv(regs)) {
+ regs->gpr[3] = (long) error ?: val;
} else {
- regs->ccr &= ~0x10000000L;
- regs->gpr[3] = val;
+ /*
+ * In the general case it's not obvious that we must deal with
+ * CCR here, as the syscall exit path will also do that for us.
+ * However there are some places, eg. the signal code, which
+ * check ccr to decide if the value in r3 is actually an error.
+ */
+ if (error) {
+ regs->ccr |= 0x10000000L;
+ regs->gpr[3] = error;
+ } else {
+ regs->ccr &= ~0x10000000L;
+ regs->gpr[3] = val;
+ }
}
}
@@ -77,10 +97,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long val, mask = -1UL;
unsigned int n = 6;
-#ifdef CONFIG_COMPAT
- if (test_tsk_thread_flag(task, TIF_32BIT))
+ if (is_tsk_32bit_task(task))
mask = 0xffffffff;
-#endif
+
while (n--) {
if (n == 0)
val = regs->orig_gpr3;
@@ -91,28 +110,13 @@ static inline void syscall_get_arguments(struct task_struct *task,
}
}
-static inline void syscall_set_arguments(struct task_struct *task,
- struct pt_regs *regs,
- const unsigned long *args)
-{
- memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
-
- /* Also copy the first argument into orig_gpr3 */
- regs->orig_gpr3 = args[0];
-}
-
static inline int syscall_get_arch(struct task_struct *task)
{
- int arch;
-
- if (IS_ENABLED(CONFIG_PPC64) && !test_tsk_thread_flag(task, TIF_32BIT))
- arch = AUDIT_ARCH_PPC64;
+ if (is_tsk_32bit_task(task))
+ return AUDIT_ARCH_PPC;
+ else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ return AUDIT_ARCH_PPC64LE;
else
- arch = AUDIT_ARCH_PPC;
-
-#ifdef __LITTLE_ENDIAN__
- arch |= __AUDIT_ARCH_LE;
-#endif
- return arch;
+ return AUDIT_ARCH_PPC64;
}
#endif /* _ASM_SYSCALL_H */
diff --git a/arch/powerpc/include/asm/syscall_wrapper.h b/arch/powerpc/include/asm/syscall_wrapper.h
new file mode 100644
index 000000000000..67486c67e8a2
--- /dev/null
+++ b/arch/powerpc/include/asm/syscall_wrapper.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * syscall_wrapper.h - powerpc specific wrappers to syscall definitions
+ *
+ * Based on arch/{x86,arm64}/include/asm/syscall_wrapper.h
+ */
+
+#ifndef __ASM_POWERPC_SYSCALL_WRAPPER_H
+#define __ASM_POWERPC_SYSCALL_WRAPPER_H
+
+struct pt_regs;
+
+#define SC_POWERPC_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->gpr[3],,regs->gpr[4],,regs->gpr[5] \
+ ,,regs->gpr[6],,regs->gpr[7],,regs->gpr[8])
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ long sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(sys##name, ERRNO); \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ long sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_POWERPC_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ long sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(sys_##sname, ERRNO); \
+ long sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL(name) \
+ long sys_##name(const struct pt_regs *regs); \
+ long __weak sys_##name(const struct pt_regs *regs) \
+ { \
+ return sys_ni_syscall(); \
+ }
+
+#endif // __ASM_POWERPC_SYSCALL_WRAPPER_H
diff --git a/arch/powerpc/include/asm/syscalls.h b/arch/powerpc/include/asm/syscalls.h
index 398171fdcd9f..6d51b007b59e 100644
--- a/arch/powerpc/include/asm/syscalls.h
+++ b/arch/powerpc/include/asm/syscalls.h
@@ -6,17 +6,156 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
+#include <linux/compat.h>
+
+#include <asm/syscall.h>
+#ifdef CONFIG_PPC64
+#include <asm/syscalls_32.h>
+#endif
+#include <asm/unistd.h>
+#include <asm/ucontext.h>
+
+#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+long sys_ni_syscall(void);
+#else
+long sys_ni_syscall(const struct pt_regs *regs);
+#endif
struct rtas_args;
-asmlinkage long sys_mmap(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, off_t offset);
-asmlinkage long sys_mmap2(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff);
-asmlinkage long ppc64_personality(unsigned long personality);
-asmlinkage long sys_rtas(struct rtas_args __user *uargs);
+/*
+ * long long munging:
+ * The 32 bit ABI passes long longs in an odd even register pair.
+ * High and low parts are swapped depending on endian mode,
+ * so define a macro (similar to mips linux32) to handle that.
+ */
+#ifdef __LITTLE_ENDIAN__
+#define merge_64(low, high) (((u64)high << 32) | low)
+#else
+#define merge_64(high, low) (((u64)high << 32) | low)
+#endif
+
+/*
+ * PowerPC architecture-specific syscalls
+ */
+
+#ifndef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+
+long sys_rtas(struct rtas_args __user *uargs);
+
+#ifdef CONFIG_PPC64
+long sys_ppc64_personality(unsigned long personality);
+#ifdef CONFIG_COMPAT
+long compat_sys_ppc64_personality(unsigned long personality);
+#endif /* CONFIG_COMPAT */
+#endif /* CONFIG_PPC64 */
+
+long sys_swapcontext(struct ucontext __user *old_ctx,
+ struct ucontext __user *new_ctx, long ctx_size);
+long sys_mmap(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t offset);
+long sys_mmap2(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+long sys_switch_endian(void);
+
+#ifdef CONFIG_PPC32
+long sys_sigreturn(void);
+long sys_debug_setcontext(struct ucontext __user *ctx, int ndbg,
+ struct sig_dbg_op __user *dbg);
+#endif
+
+long sys_rt_sigreturn(void);
+
+long sys_subpage_prot(unsigned long addr,
+ unsigned long len, u32 __user *map);
+
+#ifdef CONFIG_COMPAT
+long compat_sys_swapcontext(struct ucontext32 __user *old_ctx,
+ struct ucontext32 __user *new_ctx,
+ int ctx_size);
+long compat_sys_old_getrlimit(unsigned int resource,
+ struct compat_rlimit __user *rlim);
+long compat_sys_sigreturn(void);
+long compat_sys_rt_sigreturn(void);
+#endif /* CONFIG_COMPAT */
+
+/*
+ * Architecture specific signatures required by long long munging:
+ * The 32 bit ABI passes long longs in an odd even register pair.
+ * The following signatures provide a machine long parameter for
+ * each register that will be supplied. The implementation is
+ * responsible for combining parameter pairs.
+ */
+
+#ifdef CONFIG_PPC32
+long sys_ppc_pread64(unsigned int fd,
+ char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long sys_ppc_pwrite64(unsigned int fd,
+ const char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long sys_ppc_readahead(int fd, u32 r4,
+ u32 offset1, u32 offset2, u32 count);
+long sys_ppc_truncate64(const char __user *path, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long sys_ppc_ftruncate64(unsigned int fd, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
+ size_t len, int advice);
+long sys_ppc_sync_file_range2(int fd, unsigned int flags,
+ unsigned int offset1,
+ unsigned int offset2,
+ unsigned int nbytes1,
+ unsigned int nbytes2);
+long sys_ppc_fallocate(int fd, int mode, u32 offset1, u32 offset2,
+ u32 len1, u32 len2);
+#endif
+#ifdef CONFIG_COMPAT
+long compat_sys_mmap2(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+long compat_sys_ppc_pread64(unsigned int fd,
+ char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long compat_sys_ppc_pwrite64(unsigned int fd,
+ const char __user *ubuf, compat_size_t count,
+ u32 reg6, u32 pos1, u32 pos2);
+long compat_sys_ppc_readahead(int fd, u32 r4,
+ u32 offset1, u32 offset2, u32 count);
+long compat_sys_ppc_truncate64(const char __user *path, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long compat_sys_ppc_ftruncate64(unsigned int fd, u32 reg4,
+ unsigned long len1, unsigned long len2);
+long compat_sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
+ size_t len, int advice);
+long compat_sys_ppc_sync_file_range2(int fd, unsigned int flags,
+ unsigned int offset1,
+ unsigned int offset2,
+ unsigned int nbytes1,
+ unsigned int nbytes2);
+#endif /* CONFIG_COMPAT */
+
+#if defined(CONFIG_PPC32) || defined(CONFIG_COMPAT)
+long sys_ppc_fadvise64_64(int fd, int advice,
+ u32 offset_high, u32 offset_low,
+ u32 len_high, u32 len_low);
+#endif
+
+#else
+
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#define __SYSCALL(nr, entry) \
+ long entry(const struct pt_regs *regs);
+
+#ifdef CONFIG_PPC64
+#include <asm/syscall_table_64.h>
+#else
+#include <asm/syscall_table_32.h>
+#endif /* CONFIG_PPC64 */
+
+#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_SYSCALLS_H */
diff --git a/arch/powerpc/kernel/ppc32.h b/arch/powerpc/include/asm/syscalls_32.h
index 2346f8c7ff2e..749255568be9 100644
--- a/arch/powerpc/kernel/ppc32.h
+++ b/arch/powerpc/include/asm/syscalls_32.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _PPC64_PPC32_H
-#define _PPC64_PPC32_H
+#ifndef _ASM_POWERPC_SYSCALLS_32_H
+#define _ASM_POWERPC_SYSCALLS_32_H
#include <linux/compat.h>
#include <asm/siginfo.h>
@@ -57,4 +57,4 @@ struct ucontext32 {
struct mcontext32 uc_mcontext;
};
-#endif /* _PPC64_PPC32_H */
+#endif // _ASM_POWERPC_SYSCALLS_32_H
diff --git a/arch/powerpc/include/asm/task_size_64.h b/arch/powerpc/include/asm/task_size_64.h
index c993482237ed..5a709951c901 100644
--- a/arch/powerpc/include/asm/task_size_64.h
+++ b/arch/powerpc/include/asm/task_size_64.h
@@ -44,11 +44,7 @@
*/
#define TASK_SIZE_USER32 (0x0000000100000000UL - (1 * PAGE_SIZE))
-#define TASK_SIZE_OF(tsk) \
- (test_tsk_thread_flag(tsk, TIF_32BIT) ? TASK_SIZE_USER32 : \
- TASK_SIZE_USER64)
-
-#define TASK_SIZE TASK_SIZE_OF(current)
+#define TASK_SIZE (is_32bit_task() ? TASK_SIZE_USER32 : TASK_SIZE_USER64)
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
@@ -76,4 +72,12 @@
#define STACK_TOP_MAX TASK_SIZE_USER64
#define STACK_TOP (is_32bit_task() ? STACK_TOP_USER32 : STACK_TOP_USER64)
+#define arch_get_mmap_base(addr, base) \
+ (((addr) > DEFAULT_MAP_WINDOW) ? (base) + TASK_SIZE - DEFAULT_MAP_WINDOW : (base))
+
+#define arch_get_mmap_end(addr, len, flags) \
+ (((addr) > DEFAULT_MAP_WINDOW) || \
+ (((flags) & MAP_FIXED) && ((addr) + (len) > DEFAULT_MAP_WINDOW)) ? TASK_SIZE : \
+ DEFAULT_MAP_WINDOW)
+
#endif /* _ASM_POWERPC_TASK_SIZE_64_H */
diff --git a/arch/powerpc/include/asm/tce.h b/arch/powerpc/include/asm/tce.h
index db5fc2f2262d..0c34d2756d92 100644
--- a/arch/powerpc/include/asm/tce.h
+++ b/arch/powerpc/include/asm/tce.h
@@ -19,15 +19,7 @@
#define TCE_VB 0
#define TCE_PCI 1
-/* TCE page size is 4096 bytes (1 << 12) */
-
-#define TCE_SHIFT 12
-#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
-
#define TCE_ENTRY_SIZE 8 /* each TCE is 64 bits */
-
-#define TCE_RPN_MASK 0xfffffffffful /* 40-bit RPN (4K pages) */
-#define TCE_RPN_SHIFT 12
#define TCE_VALID 0x800 /* TCE valid */
#define TCE_ALLIO 0x400 /* TCE valid for all lpars */
#define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
diff --git a/arch/powerpc/include/asm/termios.h b/arch/powerpc/include/asm/termios.h
deleted file mode 100644
index 205de8f8a9d3..000000000000
--- a/arch/powerpc/include/asm/termios.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Liberally adapted from alpha/termios.h. In particular, the c_cc[]
- * fields have been reordered so that termio & termios share the
- * common subset in the same order (for brain dead programs that don't
- * know or care about the differences).
- */
-#ifndef _ASM_POWERPC_TERMIOS_H
-#define _ASM_POWERPC_TERMIOS_H
-
-#include <uapi/asm/termios.h>
-
-/* ^C ^\ del ^U ^D 1 0 0 0 0 ^W ^R ^Z ^Q ^S ^V ^U */
-#define INIT_C_CC "\003\034\177\025\004\001\000\000\000\000\027\022\032\021\023\026\025"
-
-#include <asm-generic/termios-base.h>
-
-#endif /* _ASM_POWERPC_TERMIOS_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index a2270749b282..af58f1ed3952 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -14,10 +14,16 @@
#ifdef __KERNEL__
-#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT
+#ifdef CONFIG_KASAN
+#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1)
+#else
+#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT
+#endif
+
+#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT
#define THREAD_SHIFT PAGE_SHIFT
#else
-#define THREAD_SHIFT CONFIG_THREAD_SHIFT
+#define THREAD_SHIFT MIN_THREAD_SHIFT
#endif
#define THREAD_SIZE (1 << THREAD_SHIFT)
@@ -38,7 +44,6 @@
#ifndef __ASSEMBLY__
#include <linux/cache.h>
#include <asm/processor.h>
-#include <asm/page.h>
#include <asm/accounting.h>
#define SLB_PRELOAD_NR 16U
@@ -48,8 +53,11 @@
struct thread_info {
int preempt_count; /* 0 => preemptable,
<0 => BUG */
+#ifdef CONFIG_SMP
+ unsigned int cpu;
+#endif
unsigned long local_flags; /* private flags for thread */
-#ifdef CONFIG_LIVEPATCH
+#ifdef CONFIG_LIVEPATCH_64
unsigned long *livepatch_sp;
#endif
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC32)
@@ -77,10 +85,8 @@ struct thread_info {
/* how to get the thread information struct from C */
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
-#ifdef CONFIG_PPC_BOOK3S_64
void arch_setup_new_exec(void);
#define arch_setup_new_exec arch_setup_new_exec
-#endif
#endif /* __ASSEMBLY__ */
@@ -90,13 +96,12 @@ void arch_setup_new_exec(void);
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
-#define TIF_FSCHECK 3 /* Check FS is USER_DS on return */
+#define TIF_NOTIFY_SIGNAL 3 /* signal notifications exist */
#define TIF_SYSCALL_EMU 4 /* syscall emulation active */
#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
#define TIF_PATCH_PENDING 6 /* pending live patching update */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SINGLESTEP 8 /* singlestepping active */
-#define TIF_NOHZ 9 /* in adaptive nohz mode */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
#define TIF_NOERROR 12 /* Force successful syscall return */
@@ -116,6 +121,7 @@ void arch_setup_new_exec(void);
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_SIGNAL (1<<TIF_NOTIFY_SIGNAL)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1<<TIF_32BIT)
#define _TIF_RESTORE_TM (1<<TIF_RESTORE_TM)
@@ -129,17 +135,15 @@ void arch_setup_new_exec(void);
#define _TIF_UPROBE (1<<TIF_UPROBE)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
-#define _TIF_NOHZ (1<<TIF_NOHZ)
-#define _TIF_FSCHECK (1<<TIF_FSCHECK)
#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
- _TIF_NOHZ | _TIF_SYSCALL_EMU)
+ _TIF_SYSCALL_EMU)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
_TIF_RESTORE_TM | _TIF_PATCH_PENDING | \
- _TIF_FSCHECK)
+ _TIF_NOTIFY_SIGNAL)
#define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR)
/* Bits in local_flags */
@@ -156,16 +160,24 @@ void arch_setup_new_exec(void);
#ifndef __ASSEMBLY__
+static inline void clear_thread_local_flags(unsigned int flags)
+{
+ struct thread_info *ti = current_thread_info();
+ ti->local_flags &= ~flags;
+}
+
static inline bool test_thread_local_flags(unsigned int flags)
{
struct thread_info *ti = current_thread_info();
return (ti->local_flags & flags) != 0;
}
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_COMPAT
#define is_32bit_task() (test_thread_flag(TIF_32BIT))
+#define is_tsk_32bit_task(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT))
#else
-#define is_32bit_task() (1)
+#define is_32bit_task() (IS_ENABLED(CONFIG_PPC32))
+#define is_tsk_32bit_task(tsk) (IS_ENABLED(CONFIG_PPC32))
#endif
#if defined(CONFIG_PPC64)
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 08dbe3e6831c..9f50766c4623 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -15,16 +15,19 @@
#include <asm/processor.h>
#include <asm/cpu_has_feature.h>
+#include <asm/vdso/timebase.h>
/* time.c */
+extern u64 decrementer_max;
+
extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec;
extern struct clock_event_device decrementer_clockevent;
+extern u64 decrementer_max;
extern void generic_calibrate_decr(void);
-extern void hdec_interrupt(struct pt_regs *regs);
/* Some sane defaults: 125 MHz timebase, 1GHz processor */
extern unsigned long ppc_proc_freq;
@@ -39,98 +42,12 @@ struct div_result {
u64 result_low;
};
-/* Accessor functions for the timebase (RTC on 601) registers. */
-/* If one day CONFIG_POWER is added just define __USE_RTC as 1 */
-#define __USE_RTC() (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
-
-#ifdef CONFIG_PPC64
-
-/* For compatibility, get_tbl() is defined as get_tb() on ppc64 */
-#define get_tbl get_tb
-
-#else
-
-static inline unsigned long get_tbl(void)
-{
-#if defined(CONFIG_403GCX)
- unsigned long tbl;
- asm volatile("mfspr %0, 0x3dd" : "=r" (tbl));
- return tbl;
-#else
- return mftbl();
-#endif
-}
-
-static inline unsigned int get_tbu(void)
-{
-#ifdef CONFIG_403GCX
- unsigned int tbu;
- asm volatile("mfspr %0, 0x3dc" : "=r" (tbu));
- return tbu;
-#else
- return mftbu();
-#endif
-}
-#endif /* !CONFIG_PPC64 */
-
-static inline unsigned int get_rtcl(void)
-{
- unsigned int rtcl;
-
- asm volatile("mfrtcl %0" : "=r" (rtcl));
- return rtcl;
-}
-
-static inline u64 get_rtc(void)
-{
- unsigned int hi, lo, hi2;
-
- do {
- asm volatile("mfrtcu %0; mfrtcl %1; mfrtcu %2"
- : "=r" (hi), "=r" (lo), "=r" (hi2));
- } while (hi2 != hi);
- return (u64)hi * 1000000000 + lo;
-}
-
static inline u64 get_vtb(void)
{
-#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_ARCH_207S))
return mfspr(SPRN_VTB);
-#endif
- return 0;
-}
-
-#ifdef CONFIG_PPC64
-static inline u64 get_tb(void)
-{
- return mftb();
-}
-#else /* CONFIG_PPC64 */
-static inline u64 get_tb(void)
-{
- unsigned int tbhi, tblo, tbhi2;
-
- do {
- tbhi = get_tbu();
- tblo = get_tbl();
- tbhi2 = get_tbu();
- } while (tbhi != tbhi2);
- return ((u64)tbhi << 32) | tblo;
-}
-#endif /* !CONFIG_PPC64 */
-
-static inline u64 get_tb_or_rtc(void)
-{
- return __USE_RTC() ? get_rtc() : get_tb();
-}
-
-static inline void set_tb(unsigned int upper, unsigned int lower)
-{
- mtspr(SPRN_TBWL, 0);
- mtspr(SPRN_TBWU, upper);
- mtspr(SPRN_TBWL, lower);
+ return 0;
}
/* Accessor functions for the decrementer register.
@@ -141,11 +58,10 @@ static inline void set_tb(unsigned int upper, unsigned int lower)
*/
static inline u64 get_dec(void)
{
-#if defined(CONFIG_40x)
- return (mfspr(SPRN_PIT));
-#else
- return (mfspr(SPRN_DEC));
-#endif
+ if (IS_ENABLED(CONFIG_40x))
+ return mfspr(SPRN_PIT);
+
+ return mfspr(SPRN_DEC);
}
/*
@@ -155,23 +71,17 @@ static inline u64 get_dec(void)
*/
static inline void set_dec(u64 val)
{
-#if defined(CONFIG_40x)
- mtspr(SPRN_PIT, (u32) val);
-#else
-#ifndef CONFIG_BOOKE
- --val;
-#endif
- mtspr(SPRN_DEC, val);
-#endif /* not 40x */
+ if (IS_ENABLED(CONFIG_40x))
+ mtspr(SPRN_PIT, (u32)val);
+ else if (IS_ENABLED(CONFIG_BOOKE))
+ mtspr(SPRN_DEC, val);
+ else
+ mtspr(SPRN_DEC, val - 1);
}
static inline unsigned long tb_ticks_since(unsigned long tstamp)
{
- if (__USE_RTC()) {
- int delta = get_rtcl() - (unsigned int) tstamp;
- return delta < 0 ? delta + 1000000000 : delta;
- }
- return get_tbl() - tstamp;
+ return mftb() - tstamp;
}
#define mulhwu(x,y) \
@@ -192,8 +102,23 @@ extern void __init time_init(void);
DECLARE_PER_CPU(u64, decrementers_next_tb);
+static inline u64 timer_get_next_tb(void)
+{
+ return __this_cpu_read(decrementers_next_tb);
+}
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void timer_rearm_host_dec(u64 now);
+#endif
+
/* Convert timebase ticks to nanoseconds */
unsigned long long tb_to_ns(unsigned long long tb_ticks);
+void timer_broadcast_interrupt(void);
+
+/* SPLPAR and VIRT_CPU_ACCOUNTING_NATIVE */
+void pseries_accumulate_stolen_time(void);
+u64 pseries_calculate_stolen_time(u64 stop_tb);
+
#endif /* __KERNEL__ */
#endif /* __POWERPC_TIME_H */
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index d2d2c4bd8435..14b4489de52c 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -9,7 +9,7 @@
*/
#include <asm/cputable.h>
-#include <asm/reg.h>
+#include <asm/vdso/timebase.h>
#define CLOCK_TICK_RATE 1024000 /* Underlying HZ */
@@ -17,11 +17,9 @@ typedef unsigned long cycles_t;
static inline cycles_t get_cycles(void)
{
- if (IS_ENABLED(CONFIG_BOOK3S_601))
- return 0;
-
return mftb();
}
+#define get_cycles get_cycles
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TIMEX_H */
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index 7f3a8b902325..b3de6102a907 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -10,9 +10,8 @@
#ifdef __KERNEL__
#ifndef __powerpc64__
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#endif
-#include <asm/pgalloc.h>
#ifndef __powerpc64__
#include <asm/page.h>
#include <asm/mmu.h>
@@ -20,8 +19,6 @@
#include <linux/pagemap.h>
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry __tlb_remove_tlb_entry
#define tlb_flush tlb_flush
@@ -41,9 +38,6 @@ extern void tlb_flush(struct mmu_gather *tlb);
/* Get the generic bits... */
#include <asm-generic/tlb.h>
-extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
- unsigned long address);
-
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
unsigned long address)
{
@@ -67,19 +61,6 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
return false;
return cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm));
}
-static inline void mm_reset_thread_local(struct mm_struct *mm)
-{
- WARN_ON(atomic_read(&mm->context.copros) > 0);
- /*
- * It's possible for mm_access to take a reference on mm_users to
- * access the remote mm from another thread, but it's not allowed
- * to set mm_cpumask, so mm_users may be > 1 here.
- */
- WARN_ON(current->mm != mm);
- atomic_set(&mm->context.active_cpus, 1);
- cpumask_clear(mm_cpumask(mm));
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-}
#else /* CONFIG_PPC_BOOK3S_64 */
static inline int mm_is_thread_local(struct mm_struct *mm)
{
@@ -100,5 +81,11 @@ static inline int mm_is_thread_local(struct mm_struct *mm)
}
#endif
+#define arch_supports_page_table_move arch_supports_page_table_move
+static inline bool arch_supports_page_table_move(void)
+{
+ return radix_enabled();
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_TLB_H */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 2f7e1ea5089e..8a4d4f4d9749 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -6,6 +6,7 @@
struct device;
struct device_node;
+struct drmem_lmb;
#ifdef CONFIG_NUMA
@@ -35,7 +36,7 @@ static inline int pcibus_to_node(struct pci_bus *bus)
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
-extern int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc);
+int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc);
extern int __node_distance(int, int);
#define node_distance(a, b) __node_distance(a, b)
@@ -43,7 +44,6 @@ extern void __init dump_numa_cpu_topology(void);
extern int sysfs_add_device_to_node(struct device *dev, int nid);
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
-extern int numa_update_cpu_topology(bool cpus_locked);
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
{
@@ -62,6 +62,15 @@ static inline int early_cpu_to_node(int cpu)
*/
return (nid < 0) ? 0 : nid;
}
+
+int of_drconf_to_nid_single(struct drmem_lmb *lmb);
+void update_numa_distance(struct device_node *node);
+
+extern void map_cpu_to_node(int cpu, int node);
+#ifdef CONFIG_HOTPLUG_CPU
+extern void unmap_cpu_from_node(unsigned long cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
#else
static inline int early_cpu_to_node(int cpu) { return 0; }
@@ -78,52 +87,43 @@ static inline void sysfs_remove_device_from_node(struct device *dev,
{
}
-static inline int numa_update_cpu_topology(bool cpus_locked)
+static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
+
+static inline int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
{
return 0;
}
-static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
-
-static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
+static inline int of_drconf_to_nid_single(struct drmem_lmb *lmb)
{
- return 0;
+ return first_online_node;
}
+static inline void update_numa_distance(struct device_node *node) {}
+
+#ifdef CONFIG_SMP
+static inline void map_cpu_to_node(int cpu, int node) {}
+#ifdef CONFIG_HOTPLUG_CPU
+static inline void unmap_cpu_from_node(unsigned long cpu) {}
+#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_SMP */
+
#endif /* CONFIG_NUMA */
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
-extern int start_topology_update(void);
-extern int stop_topology_update(void);
-extern int prrn_is_enabled(void);
-extern int find_and_online_cpu_nid(int cpu);
-extern int timed_topology_update(int nsecs);
-extern void __init shared_proc_topology_init(void);
+void find_and_update_cpu_nid(int cpu);
+extern int cpu_to_coregroup_id(int cpu);
#else
-static inline int start_topology_update(void)
-{
- return 0;
-}
-static inline int stop_topology_update(void)
-{
- return 0;
-}
-static inline int prrn_is_enabled(void)
-{
- return 0;
-}
-static inline int find_and_online_cpu_nid(int cpu)
-{
- return 0;
-}
-static inline int timed_topology_update(int nsecs)
+static inline void find_and_update_cpu_nid(int cpu) {}
+static inline int cpu_to_coregroup_id(int cpu)
{
+#ifdef CONFIG_SMP
+ return cpu_to_core_id(cpu);
+#else
return 0;
+#endif
}
-#ifdef CONFIG_SMP
-static inline void shared_proc_topology_init(void) {}
-#endif
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
#include <asm-generic/topology.h>
@@ -135,11 +135,11 @@ static inline void shared_proc_topology_init(void) {}
#include <asm/smp.h>
#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
+
#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu))
-int dlpar_cpu_readd(int cpu);
#endif
#endif
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h
index f1630c553efe..93157a661dcc 100644
--- a/arch/powerpc/include/asm/types.h
+++ b/arch/powerpc/include/asm/types.h
@@ -11,24 +11,10 @@
#include <uapi/asm/types.h>
-#ifdef __powerpc64__
-#if defined(_CALL_ELF) && _CALL_ELF == 2
-#define PPC64_ELF_ABI_v2
-#else
-#define PPC64_ELF_ABI_v1
-#endif
-#endif /* __powerpc64__ */
-
#ifndef __ASSEMBLY__
typedef __vector128 vector128;
-typedef struct {
- unsigned long entry;
- unsigned long toc;
- unsigned long env;
-} func_descr_t;
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_TYPES_H */
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 2f500debae21..3ddc65c63a49 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -2,69 +2,17 @@
#ifndef _ARCH_POWERPC_UACCESS_H
#define _ARCH_POWERPC_UACCESS_H
-#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/extable.h>
#include <asm/kup.h>
-/*
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- *
- * The fs/ds values are now the highest legal address in the "segment".
- * This simplifies the checking in the routines below.
- */
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-#define KERNEL_DS MAKE_MM_SEG(~0UL)
#ifdef __powerpc64__
/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
-#define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
-#else
-#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
-#endif
-
-#define get_fs() (current->thread.addr_limit)
-
-static inline void set_fs(mm_segment_t fs)
-{
- current->thread.addr_limit = fs;
- /* On user-mode return check addr_limit (fs) is correct */
- set_thread_flag(TIF_FSCHECK);
-}
-
-#define segment_eq(a, b) ((a).seg == (b).seg)
-
-#define user_addr_max() (get_fs().seg)
-
-#ifdef __powerpc64__
-/*
- * This check is sufficient because there is a large enough
- * gap between user addresses and the kernel addresses
- */
-#define __access_ok(addr, size, segment) \
- (((addr) <= (segment).seg) && ((size) <= (segment).seg))
-
-#else
-
-static inline int __access_ok(unsigned long addr, unsigned long size,
- mm_segment_t seg)
-{
- if (addr > seg.seg)
- return 0;
- return (size == 0 || size - 1 <= seg.seg - addr);
-}
-
+#define TASK_SIZE_MAX TASK_SIZE_USER64
#endif
-#define access_ok(addr, size) \
- (__chk_user_ptr(addr), \
- __access_ok((__force unsigned long)(addr), (size), get_fs()))
+#include <asm-generic/access_ok.h>
/*
* These are the main single-value transfer routines. They automatically
@@ -85,127 +33,93 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
* exception handling means that it's no longer "just"...)
*
*/
-#define get_user(x, ptr) \
- __get_user_check((x), (ptr), sizeof(*(ptr)))
-#define put_user(x, ptr) \
- __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
-
-#define __get_user(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
-#define __put_user(x, ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
-
-#define __get_user_allowed(x, ptr) \
- __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
-#define __put_user_allowed(x, ptr) \
- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
-
-#define __get_user_inatomic(x, ptr) \
- __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
-#define __put_user_inatomic(x, ptr) \
- __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+({ \
+ long __pu_err; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+ __typeof__(*(ptr)) __pu_val = (__typeof__(*(ptr)))(x); \
+ __typeof__(sizeof(*(ptr))) __pu_size = sizeof(*(ptr)); \
+ \
+ might_fault(); \
+ do { \
+ __label__ __pu_failed; \
+ \
+ allow_write_to_user(__pu_addr, __pu_size); \
+ __put_user_size_goto(__pu_val, __pu_addr, __pu_size, __pu_failed); \
+ prevent_write_to_user(__pu_addr, __pu_size); \
+ __pu_err = 0; \
+ break; \
+ \
+__pu_failed: \
+ prevent_write_to_user(__pu_addr, __pu_size); \
+ __pu_err = -EFAULT; \
+ } while (0); \
+ \
+ __pu_err; \
+})
-extern long __put_user_bad(void);
+#define put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *_pu_addr = (ptr); \
+ \
+ access_ok(_pu_addr, sizeof(*(ptr))) ? \
+ __put_user(x, _pu_addr) : -EFAULT; \
+})
/*
* We don't tell gcc that we are accessing memory, but this is OK
* because we do not write to any memory gcc knows about, so there
* are no aliasing issues.
*/
-#define __put_user_asm(x, addr, err, op) \
- __asm__ __volatile__( \
- "1: " op " %1,0(%2) # put_user\n" \
- "2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: li %0,%3\n" \
- " b 2b\n" \
- ".previous\n" \
- EX_TABLE(1b, 3b) \
- : "=r" (err) \
- : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+#define __put_user_asm_goto(x, addr, label, op) \
+ asm_volatile_goto( \
+ "1: " op "%U1%X1 %0,%1 # put_user\n" \
+ EX_TABLE(1b, %l2) \
+ : \
+ : "r" (x), "m<>" (*addr) \
+ : \
+ : label)
#ifdef __powerpc64__
-#define __put_user_asm2(x, ptr, retval) \
- __put_user_asm(x, ptr, retval, "std")
+#define __put_user_asm2_goto(x, ptr, label) \
+ __put_user_asm_goto(x, ptr, label, "std")
#else /* __powerpc64__ */
-#define __put_user_asm2(x, addr, err) \
- __asm__ __volatile__( \
- "1: stw %1,0(%2)\n" \
- "2: stw %1+1,4(%2)\n" \
- "3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: li %0,%3\n" \
- " b 3b\n" \
- ".previous\n" \
- EX_TABLE(1b, 4b) \
- EX_TABLE(2b, 4b) \
- : "=r" (err) \
- : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+#define __put_user_asm2_goto(x, addr, label) \
+ asm_volatile_goto( \
+ "1: stw%X1 %0, %1\n" \
+ "2: stw%X1 %L0, %L1\n" \
+ EX_TABLE(1b, %l2) \
+ EX_TABLE(2b, %l2) \
+ : \
+ : "r" (x), "m" (*addr) \
+ : \
+ : label)
#endif /* __powerpc64__ */
-#define __put_user_size_allowed(x, ptr, size, retval) \
+#define __put_user_size_goto(x, ptr, size, label) \
do { \
- retval = 0; \
+ __typeof__(*(ptr)) __user *__pus_addr = (ptr); \
+ \
switch (size) { \
- case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
- case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
- case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
- case 8: __put_user_asm2(x, ptr, retval); break; \
- default: __put_user_bad(); \
+ case 1: __put_user_asm_goto(x, __pus_addr, label, "stb"); break; \
+ case 2: __put_user_asm_goto(x, __pus_addr, label, "sth"); break; \
+ case 4: __put_user_asm_goto(x, __pus_addr, label, "stw"); break; \
+ case 8: __put_user_asm2_goto(x, __pus_addr, label); break; \
+ default: BUILD_BUG(); \
} \
} while (0)
-#define __put_user_size(x, ptr, size, retval) \
-do { \
- allow_write_to_user(ptr, size); \
- __put_user_size_allowed(x, ptr, size, retval); \
- prevent_write_to_user(ptr, size); \
-} while (0)
-
-#define __put_user_nocheck(x, ptr, size, do_allow) \
-({ \
- long __pu_err; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- if (!is_kernel_addr((unsigned long)__pu_addr)) \
- might_fault(); \
- __chk_user_ptr(ptr); \
- if (do_allow) \
- __put_user_size((x), __pu_addr, (size), __pu_err); \
- else \
- __put_user_size_allowed((x), __pu_addr, (size), __pu_err); \
- __pu_err; \
-})
-
-#define __put_user_check(x, ptr, size) \
-({ \
- long __pu_err = -EFAULT; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- might_fault(); \
- if (access_ok(__pu_addr, size)) \
- __put_user_size((x), __pu_addr, (size), __pu_err); \
- __pu_err; \
-})
-
-#define __put_user_nosleep(x, ptr, size) \
-({ \
- long __pu_err; \
- __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
- __chk_user_ptr(ptr); \
- __put_user_size((x), __pu_addr, (size), __pu_err); \
- __pu_err; \
-})
-
-
-extern long __get_user_bad(void);
-
/*
* This does an atomic 128 byte aligned load from userspace.
* Upto caller to do enable_kernel_vmx() before calling!
*/
#define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
__asm__ __volatile__( \
+ ".machine push\n" \
+ ".machine altivec\n" \
"1: lvx 0,0,%1 # get user\n" \
" stvx 0,0,%2 # put kernel\n" \
+ ".machine pop\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %0,%3\n" \
@@ -215,9 +129,62 @@ extern long __get_user_bad(void);
: "=r" (err) \
: "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+#define __get_user_asm_goto(x, addr, label, op) \
+ asm_volatile_goto( \
+ "1: "op"%U1%X1 %0, %1 # get_user\n" \
+ EX_TABLE(1b, %l2) \
+ : "=r" (x) \
+ : "m<>" (*addr) \
+ : \
+ : label)
+
+#ifdef __powerpc64__
+#define __get_user_asm2_goto(x, addr, label) \
+ __get_user_asm_goto(x, addr, label, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2_goto(x, addr, label) \
+ asm_volatile_goto( \
+ "1: lwz%X1 %0, %1\n" \
+ "2: lwz%X1 %L0, %L1\n" \
+ EX_TABLE(1b, %l2) \
+ EX_TABLE(2b, %l2) \
+ : "=&r" (x) \
+ : "m" (*addr) \
+ : \
+ : label)
+#endif /* __powerpc64__ */
+
+#define __get_user_size_goto(x, ptr, size, label) \
+do { \
+ BUILD_BUG_ON(size > sizeof(x)); \
+ switch (size) { \
+ case 1: __get_user_asm_goto(x, (u8 __user *)ptr, label, "lbz"); break; \
+ case 2: __get_user_asm_goto(x, (u16 __user *)ptr, label, "lhz"); break; \
+ case 4: __get_user_asm_goto(x, (u32 __user *)ptr, label, "lwz"); break; \
+ case 8: __get_user_asm2_goto(x, (u64 __user *)ptr, label); break; \
+ default: x = 0; BUILD_BUG(); \
+ } \
+} while (0)
+
+#define __get_user_size_allowed(x, ptr, size, retval) \
+do { \
+ __label__ __gus_failed; \
+ \
+ __get_user_size_goto(x, ptr, size, __gus_failed); \
+ retval = 0; \
+ break; \
+__gus_failed: \
+ x = 0; \
+ retval = -EFAULT; \
+} while (0)
+
+#else /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
#define __get_user_asm(x, addr, err, op) \
__asm__ __volatile__( \
- "1: "op" %1,0(%2) # get_user\n" \
+ "1: "op"%U2%X2 %1, %2 # get_user\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %0,%3\n" \
@@ -226,7 +193,7 @@ extern long __get_user_bad(void);
".previous\n" \
EX_TABLE(1b, 3b) \
: "=r" (err), "=r" (x) \
- : "b" (addr), "i" (-EFAULT), "0" (err))
+ : "m<>" (*addr), "i" (-EFAULT), "0" (err))
#ifdef __powerpc64__
#define __get_user_asm2(x, addr, err) \
@@ -234,8 +201,8 @@ extern long __get_user_bad(void);
#else /* __powerpc64__ */
#define __get_user_asm2(x, addr, err) \
__asm__ __volatile__( \
- "1: lwz %1,0(%2)\n" \
- "2: lwz %1+1,4(%2)\n" \
+ "1: lwz%X2 %1, %2\n" \
+ "2: lwz%X2 %L1, %L2\n" \
"3:\n" \
".section .fixup,\"ax\"\n" \
"4: li %0,%3\n" \
@@ -246,31 +213,33 @@ extern long __get_user_bad(void);
EX_TABLE(1b, 4b) \
EX_TABLE(2b, 4b) \
: "=r" (err), "=&r" (x) \
- : "b" (addr), "i" (-EFAULT), "0" (err))
+ : "m" (*addr), "i" (-EFAULT), "0" (err))
#endif /* __powerpc64__ */
#define __get_user_size_allowed(x, ptr, size, retval) \
do { \
retval = 0; \
- __chk_user_ptr(ptr); \
- if (size > sizeof(x)) \
- (x) = __get_user_bad(); \
+ BUILD_BUG_ON(size > sizeof(x)); \
switch (size) { \
- case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
- case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
- case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
- case 8: __get_user_asm2(x, ptr, retval); break; \
- default: (x) = __get_user_bad(); \
+ case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break; \
+ case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break; \
+ case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break; \
+ case 8: __get_user_asm2(x, (u64 __user *)ptr, retval); break; \
+ default: x = 0; BUILD_BUG(); \
} \
} while (0)
-#define __get_user_size(x, ptr, size, retval) \
+#define __get_user_size_goto(x, ptr, size, label) \
do { \
- allow_read_from_user(ptr, size); \
- __get_user_size_allowed(x, ptr, size, retval); \
- prevent_read_from_user(ptr, size); \
+ long __gus_retval; \
+ \
+ __get_user_size_allowed(x, ptr, size, __gus_retval); \
+ if (__gus_retval) \
+ goto label; \
} while (0)
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
@@ -278,50 +247,31 @@ do { \
#define __long_type(x) \
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
-#define __get_user_nocheck(x, ptr, size, do_allow) \
+#define __get_user(x, ptr) \
({ \
long __gu_err; \
__long_type(*(ptr)) __gu_val; \
__typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- __chk_user_ptr(ptr); \
- if (!is_kernel_addr((unsigned long)__gu_addr)) \
- might_fault(); \
- barrier_nospec(); \
- if (do_allow) \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- else \
- __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \
+ __typeof__(sizeof(*(ptr))) __gu_size = sizeof(*(ptr)); \
+ \
+ might_fault(); \
+ allow_read_from_user(__gu_addr, __gu_size); \
+ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
+ prevent_read_from_user(__gu_addr, __gu_size); \
(x) = (__typeof__(*(ptr)))__gu_val; \
+ \
__gu_err; \
})
-#define __get_user_check(x, ptr, size) \
+#define get_user(x, ptr) \
({ \
- long __gu_err = -EFAULT; \
- __long_type(*(ptr)) __gu_val = 0; \
- __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- might_fault(); \
- if (access_ok(__gu_addr, (size))) { \
- barrier_nospec(); \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- } \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
- __gu_err; \
-})
-
-#define __get_user_nosleep(x, ptr, size) \
-({ \
- long __gu_err; \
- __long_type(*(ptr)) __gu_val; \
- __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
- __chk_user_ptr(ptr); \
- barrier_nospec(); \
- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
- (x) = (__force __typeof__(*(ptr)))__gu_val; \
- __gu_err; \
+ __typeof__(*(ptr)) __user *_gu_addr = (ptr); \
+ \
+ access_ok(_gu_addr, sizeof(*(ptr))) ? \
+ __get_user(x, _gu_addr) : \
+ ((x) = (__force __typeof__(*(ptr)))0, -EFAULT); \
})
-
/* more complex routines */
extern unsigned long __copy_tofrom_user(void __user *to,
@@ -333,7 +283,6 @@ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
unsigned long ret;
- barrier_nospec();
allow_read_write_user(to, from, n);
ret = __copy_tofrom_user(to, from, n);
prevent_read_write_user(to, from, n);
@@ -345,32 +294,7 @@ static inline unsigned long raw_copy_from_user(void *to,
const void __user *from, unsigned long n)
{
unsigned long ret;
- if (__builtin_constant_p(n) && (n <= 8)) {
- ret = 1;
-
- switch (n) {
- case 1:
- barrier_nospec();
- __get_user_size(*(u8 *)to, from, 1, ret);
- break;
- case 2:
- barrier_nospec();
- __get_user_size(*(u16 *)to, from, 2, ret);
- break;
- case 4:
- barrier_nospec();
- __get_user_size(*(u32 *)to, from, 4, ret);
- break;
- case 8:
- barrier_nospec();
- __get_user_size(*(u64 *)to, from, 8, ret);
- break;
- }
- if (ret == 0)
- return 0;
- }
- barrier_nospec();
allow_read_from_user(from, n);
ret = __copy_tofrom_user((__force void __user *)to, from, n);
prevent_read_from_user(from, n);
@@ -378,79 +302,63 @@ static inline unsigned long raw_copy_from_user(void *to,
}
static inline unsigned long
-raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
-{
- if (__builtin_constant_p(n) && (n <= 8)) {
- unsigned long ret = 1;
-
- switch (n) {
- case 1:
- __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
- break;
- case 2:
- __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
- break;
- case 4:
- __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
- break;
- case 8:
- __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
- break;
- }
- if (ret == 0)
- return 0;
- }
-
- return __copy_tofrom_user(to, (__force const void __user *)from, n);
-}
-
-static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
unsigned long ret;
allow_write_to_user(to, n);
- ret = raw_copy_to_user_allowed(to, from, n);
+ ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
prevent_write_to_user(to, n);
return ret;
}
-static __always_inline unsigned long __must_check
-copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
-{
- if (likely(check_copy_size(from, n, true))) {
- if (access_ok(to, n)) {
- allow_write_to_user(to, n);
- n = memcpy_mcsafe((void *)to, from, n);
- prevent_write_to_user(to, n);
- }
- }
-
- return n;
-}
-
unsigned long __arch_clear_user(void __user *addr, unsigned long size);
-static inline unsigned long clear_user(void __user *addr, unsigned long size)
+static inline unsigned long __clear_user(void __user *addr, unsigned long size)
{
- unsigned long ret = size;
+ unsigned long ret;
+
might_fault();
- if (likely(access_ok(addr, size))) {
- allow_write_to_user(addr, size);
- ret = __arch_clear_user(addr, size);
- prevent_write_to_user(addr, size);
- }
+ allow_write_to_user(addr, size);
+ ret = __arch_clear_user(addr, size);
+ prevent_write_to_user(addr, size);
return ret;
}
-static inline unsigned long __clear_user(void __user *addr, unsigned long size)
+static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
- return clear_user(addr, size);
+ return likely(access_ok(addr, size)) ? __clear_user(addr, size) : size;
}
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+unsigned long __must_check
+copy_mc_generic(void *to, const void *from, unsigned long size);
+
+static inline unsigned long __must_check
+copy_mc_to_kernel(void *to, const void *from, unsigned long size)
+{
+ return copy_mc_generic(to, from, size);
+}
+#define copy_mc_to_kernel copy_mc_to_kernel
+
+static inline unsigned long __must_check
+copy_mc_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (check_copy_size(from, n, true)) {
+ if (access_ok(to, n)) {
+ allow_write_to_user(to, n);
+ n = copy_mc_generic((void *)to, from, n);
+ prevent_write_to_user(to, n);
+ }
+ }
+
+ return n;
+}
+#endif
+
extern long __copy_from_user_flushcache(void *dst, const void __user *src,
unsigned size);
extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
@@ -460,6 +368,9 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
{
if (unlikely(!access_ok(ptr, len)))
return false;
+
+ might_fault();
+
allow_read_write_user((void __user *)ptr, ptr, len);
return true;
}
@@ -468,10 +379,93 @@ static __must_check inline bool user_access_begin(const void __user *ptr, size_t
#define user_access_save prevent_user_access_return
#define user_access_restore restore_user_access
-#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
-#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
-#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
+static __must_check inline bool
+user_read_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
+
+ might_fault();
+
+ allow_read_from_user(ptr, len);
+ return true;
+}
+#define user_read_access_begin user_read_access_begin
+#define user_read_access_end prevent_current_read_from_user
+
+static __must_check inline bool
+user_write_access_begin(const void __user *ptr, size_t len)
+{
+ if (unlikely(!access_ok(ptr, len)))
+ return false;
+
+ might_fault();
+
+ allow_write_to_user((void __user *)ptr, len);
+ return true;
+}
+#define user_write_access_begin user_write_access_begin
+#define user_write_access_end prevent_current_write_to_user
+
+#define unsafe_get_user(x, p, e) do { \
+ __long_type(*(p)) __gu_val; \
+ __typeof__(*(p)) __user *__gu_addr = (p); \
+ \
+ __get_user_size_goto(__gu_val, __gu_addr, sizeof(*(p)), e); \
+ (x) = (__typeof__(*(p)))__gu_val; \
+} while (0)
+
+#define unsafe_put_user(x, p, e) \
+ __put_user_size_goto((__typeof__(*(p)))(x), (p), sizeof(*(p)), e)
+
+#define unsafe_copy_from_user(d, s, l, e) \
+do { \
+ u8 *_dst = (u8 *)(d); \
+ const u8 __user *_src = (const u8 __user *)(s); \
+ size_t _len = (l); \
+ int _i; \
+ \
+ for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
+ unsafe_get_user(*(u64 *)(_dst + _i), (u64 __user *)(_src + _i), e); \
+ if (_len & 4) { \
+ unsafe_get_user(*(u32 *)(_dst + _i), (u32 __user *)(_src + _i), e); \
+ _i += 4; \
+ } \
+ if (_len & 2) { \
+ unsafe_get_user(*(u16 *)(_dst + _i), (u16 __user *)(_src + _i), e); \
+ _i += 2; \
+ } \
+ if (_len & 1) \
+ unsafe_get_user(*(u8 *)(_dst + _i), (u8 __user *)(_src + _i), e); \
+} while (0)
+
#define unsafe_copy_to_user(d, s, l, e) \
- unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
+do { \
+ u8 __user *_dst = (u8 __user *)(d); \
+ const u8 *_src = (const u8 *)(s); \
+ size_t _len = (l); \
+ int _i; \
+ \
+ for (_i = 0; _i < (_len & ~(sizeof(u64) - 1)); _i += sizeof(u64)) \
+ unsafe_put_user(*(u64 *)(_src + _i), (u64 __user *)(_dst + _i), e); \
+ if (_len & 4) { \
+ unsafe_put_user(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e); \
+ _i += 4; \
+ } \
+ if (_len & 2) { \
+ unsafe_put_user(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e); \
+ _i += 2; \
+ } \
+ if (_len & 1) \
+ unsafe_put_user(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e); \
+} while (0)
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+ __get_user_size_goto(*((type *)(dst)), \
+ (__force type __user *)(src), sizeof(type), err_label)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+ __put_user_size_goto(*((type *)(src)), \
+ (__force type __user *)(dst), sizeof(type), err_label)
#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 0ea9e70ed78b..b1f094728b35 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -15,44 +15,44 @@ extern void (*udbg_flush)(void);
extern int (*udbg_getc)(void);
extern int (*udbg_getc_poll)(void);
-extern void udbg_puts(const char *s);
-extern int udbg_write(const char *s, int n);
+void udbg_puts(const char *s);
+int udbg_write(const char *s, int n);
-extern void register_early_udbg_console(void);
-extern void udbg_printf(const char *fmt, ...)
+void register_early_udbg_console(void);
+void udbg_printf(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
-extern void udbg_progress(char *s, unsigned short hex);
+void udbg_progress(char *s, unsigned short hex);
-extern void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
-extern void udbg_uart_init_pio(unsigned long port, unsigned int stride);
+void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
+void __init udbg_uart_init_pio(unsigned long port, unsigned int stride);
-extern void udbg_uart_setup(unsigned int speed, unsigned int clock);
-extern unsigned int udbg_probe_uart_speed(unsigned int clock);
+void __init udbg_uart_setup(unsigned int speed, unsigned int clock);
+unsigned int __init udbg_probe_uart_speed(unsigned int clock);
struct device_node;
-extern void udbg_scc_init(int force_scc);
-extern int udbg_adb_init(int force_btext);
-extern void udbg_adb_init_early(void);
-
-extern void __init udbg_early_init(void);
-extern void __init udbg_init_debug_lpar(void);
-extern void __init udbg_init_debug_lpar_hvsi(void);
-extern void __init udbg_init_pmac_realmode(void);
-extern void __init udbg_init_maple_realmode(void);
-extern void __init udbg_init_pas_realmode(void);
-extern void __init udbg_init_rtas_panel(void);
-extern void __init udbg_init_rtas_console(void);
-extern void __init udbg_init_debug_beat(void);
-extern void __init udbg_init_btext(void);
-extern void __init udbg_init_44x_as1(void);
-extern void __init udbg_init_40x_realmode(void);
-extern void __init udbg_init_cpm(void);
-extern void __init udbg_init_usbgecko(void);
-extern void __init udbg_init_memcons(void);
-extern void __init udbg_init_ehv_bc(void);
-extern void __init udbg_init_ps3gelic(void);
-extern void __init udbg_init_debug_opal_raw(void);
-extern void __init udbg_init_debug_opal_hvsi(void);
+void __init udbg_scc_init(int force_scc);
+int udbg_adb_init(int force_btext);
+void udbg_adb_init_early(void);
+
+void __init udbg_early_init(void);
+void __init udbg_init_debug_lpar(void);
+void __init udbg_init_debug_lpar_hvsi(void);
+void __init udbg_init_pmac_realmode(void);
+void __init udbg_init_maple_realmode(void);
+void __init udbg_init_pas_realmode(void);
+void __init udbg_init_rtas_panel(void);
+void __init udbg_init_rtas_console(void);
+void __init udbg_init_btext(void);
+void __init udbg_init_44x_as1(void);
+void __init udbg_init_40x_realmode(void);
+void __init udbg_init_cpm(void);
+void __init udbg_init_usbgecko(void);
+void __init udbg_init_memcons(void);
+void __init udbg_init_ehv_bc(void);
+void __init udbg_init_ps3gelic(void);
+void __init udbg_init_debug_opal_raw(void);
+void __init udbg_init_debug_opal_hvsi(void);
+void __init udbg_init_debug_16550(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UDBG_H */
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
deleted file mode 100644
index ce69c5eff95e..000000000000
--- a/arch/powerpc/include/asm/unaligned.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_POWERPC_UNALIGNED_H
-#define _ASM_POWERPC_UNALIGNED_H
-
-#ifdef __KERNEL__
-
-/*
- * The PowerPC can do unaligned accesses itself based on its endian mode.
- */
-#include <linux/unaligned/access_ok.h>
-#include <linux/unaligned/generic.h>
-
-#ifdef __LITTLE_ENDIAN__
-#define get_unaligned __get_unaligned_le
-#define put_unaligned __put_unaligned_le
-#else
-#define get_unaligned __get_unaligned_be
-#define put_unaligned __put_unaligned_be
-#endif
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index b0720c7c3fcf..659a996c75aa 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -9,8 +9,6 @@
#define NR_syscalls __NR_syscalls
-#define __NR__exit __NR_exit
-
#ifndef __ASSEMBLY__
#include <linux/types.h>
@@ -31,6 +29,7 @@
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
+#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLD_UNAME
@@ -39,11 +38,14 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
#ifdef CONFIG_PPC32
#define __ARCH_WANT_OLD_STAT
+#define __ARCH_WANT_SYS_OLD_SELECT
#endif
#ifdef CONFIG_PPC64
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_NEWFSTATAT
+#define __ARCH_WANT_COMPAT_STAT
+#define __ARCH_WANT_COMPAT_FALLOCATE
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
#define __ARCH_WANT_SYS_FORK
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index 2bbdf27d09b5..4fea116d3d37 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -12,9 +12,9 @@
#include <linux/notifier.h>
#include <asm/probes.h>
-typedef ppc_opcode_t uprobe_opcode_t;
+typedef u32 uprobe_opcode_t;
-#define MAX_UINSN_BYTES 4
+#define MAX_UINSN_BYTES 8
#define UPROBE_XOL_SLOT_BYTES (MAX_UINSN_BYTES)
/* The following alias is needed for reference from arch-agnostic code */
@@ -23,8 +23,8 @@ typedef ppc_opcode_t uprobe_opcode_t;
struct arch_uprobe {
union {
- u32 insn;
- u32 ixol;
+ u32 insn[2];
+ u32 ixol[2];
};
};
diff --git a/arch/powerpc/include/asm/user.h b/arch/powerpc/include/asm/user.h
index 99443b8594e7..7fae7e597ba4 100644
--- a/arch/powerpc/include/asm/user.h
+++ b/arch/powerpc/include/asm/user.h
@@ -44,9 +44,4 @@ struct user {
char u_comm[32]; /* user command name */
};
-#define NBPG PAGE_SIZE
-#define UPAGES 1
-#define HOST_TEXT_START_ADDR (u.start_code)
-#define HOST_DATA_START_ADDR (u.start_data)
-#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
#endif /* _ASM_POWERPC_USER_H */
diff --git a/arch/powerpc/include/asm/vas.h b/arch/powerpc/include/asm/vas.h
index f93e6b0f5c84..c36f71e01c0f 100644
--- a/arch/powerpc/include/asm/vas.h
+++ b/arch/powerpc/include/asm/vas.h
@@ -5,8 +5,10 @@
#ifndef _ASM_POWERPC_VAS_H
#define _ASM_POWERPC_VAS_H
-
-struct vas_window;
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
+#include <asm/icswx.h>
+#include <uapi/asm/vas-api.h>
/*
* Min and max FIFO sizes are based on Version 1.05 Section 3.1.4.25
@@ -28,6 +30,16 @@ struct vas_window;
#define VAS_THRESH_FIFO_GT_EIGHTH_FULL 3
/*
+ * VAS window Linux status bits
+ */
+#define VAS_WIN_ACTIVE 0x0 /* Used in platform independent */
+ /* vas mmap() */
+/* Window is closed in the hypervisor due to lost credit */
+#define VAS_WIN_NO_CRED_CLOSE 0x00000001
+/* Window is closed due to migration */
+#define VAS_WIN_MIGRATE_CLOSE 0x00000002
+
+/*
* Get/Set bit fields
*/
#define GET_FIELD(m, v) (((v) & (m)) >> MASK_LSH(m))
@@ -49,10 +61,72 @@ enum vas_cop_type {
};
/*
+ * User space VAS windows are opened by tasks and take references
+ * to pid and mm until windows are closed.
+ * Stores pid, mm, and tgid for each window.
+ */
+struct vas_user_win_ref {
+ struct pid *pid; /* PID of owner */
+ struct pid *tgid; /* Thread group ID of owner */
+ struct mm_struct *mm; /* Linux process mm_struct */
+ struct mutex mmap_mutex; /* protects paste address mmap() */
+ /* with DLPAR close/open windows */
+ struct vm_area_struct *vma; /* Save VMA and used in DLPAR ops */
+};
+
+/*
+ * Common VAS window struct on PowerNV and PowerVM
+ */
+struct vas_window {
+ u32 winid;
+ u32 wcreds_max; /* Window credits */
+ u32 status; /* Window status used in OS */
+ enum vas_cop_type cop;
+ struct vas_user_win_ref task_ref;
+ char *dbgname;
+ struct dentry *dbgdir;
+};
+
+/*
+ * User space window operations used for powernv and powerVM
+ */
+struct vas_user_win_ops {
+ struct vas_window * (*open_win)(int vas_id, u64 flags,
+ enum vas_cop_type);
+ u64 (*paste_addr)(struct vas_window *);
+ int (*close_win)(struct vas_window *);
+};
+
+static inline void put_vas_user_win_ref(struct vas_user_win_ref *ref)
+{
+ /* Drop references to pid, tgid, and mm */
+ put_pid(ref->pid);
+ put_pid(ref->tgid);
+ if (ref->mm)
+ mmdrop(ref->mm);
+}
+
+static inline void vas_user_win_add_mm_context(struct vas_user_win_ref *ref)
+{
+ mm_context_add_vas_window(ref->mm);
+ /*
+ * Even a process that has no foreign real address mapping can
+ * use an unpaired COPY instruction (to no real effect). Issue
+ * CP_ABORT to clear any pending COPY and prevent a covert
+ * channel.
+ *
+ * __switch_to() will issue CP_ABORT on future context switches
+ * if process / thread has any open VAS window (Use
+ * current->mm->context.vas_windows).
+ */
+ asm volatile(PPC_CP_ABORT);
+}
+
+/*
* Receive window attributes specified by the (in-kernel) owner of window.
*/
struct vas_rx_win_attr {
- void *rx_fifo;
+ u64 rx_fifo;
int rx_fifo_size;
int wcreds_max;
@@ -86,7 +160,6 @@ struct vas_tx_win_attr {
int wcreds_max;
int lpid;
int pidr; /* hardware PID (from SPRN_PID) */
- int pid; /* linux process id */
int pswid;
int rsvd_txbuf_count;
int tc_mode;
@@ -101,6 +174,7 @@ struct vas_tx_win_attr {
bool rx_win_ord_mode;
};
+#ifdef CONFIG_PPC_POWERNV
/*
* Helper to map a chip id to VAS id.
* For POWER9, this is a 1:1 mapping. In the future this maybe a 1:N
@@ -163,4 +237,58 @@ int vas_copy_crb(void *crb, int offset);
*/
int vas_paste_crb(struct vas_window *win, int offset, bool re);
+int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
+ const char *name);
+void vas_unregister_api_powernv(void);
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+
+/* VAS Capabilities */
+#define VAS_GZIP_QOS_FEAT 0x1
+#define VAS_GZIP_DEF_FEAT 0x2
+#define VAS_GZIP_QOS_FEAT_BIT PPC_BIT(VAS_GZIP_QOS_FEAT) /* Bit 1 */
+#define VAS_GZIP_DEF_FEAT_BIT PPC_BIT(VAS_GZIP_DEF_FEAT) /* Bit 2 */
+
+/* NX Capabilities */
+#define VAS_NX_GZIP_FEAT 0x1
+#define VAS_NX_GZIP_FEAT_BIT PPC_BIT(VAS_NX_GZIP_FEAT) /* Bit 1 */
+
+/*
+ * These structs are used to retrieve overall VAS capabilities that
+ * the hypervisor provides.
+ */
+struct hv_vas_all_caps {
+ __be64 descriptor;
+ __be64 feat_type;
+} __packed __aligned(0x1000);
+
+struct vas_all_caps {
+ u64 descriptor;
+ u64 feat_type;
+};
+
+int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result);
+int vas_register_api_pseries(struct module *mod,
+ enum vas_cop_type cop_type, const char *name);
+void vas_unregister_api_pseries(void);
+#endif
+
+/*
+ * Register / unregister coprocessor type to VAS API which will be exported
+ * to user space. Applications can use this API to open / close window
+ * which can be used to send / receive requests directly to cooprcessor.
+ *
+ * Only NX GZIP coprocessor type is supported now, but this API can be
+ * used for others in future.
+ */
+int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ const char *name,
+ const struct vas_user_win_ops *vops);
+void vas_unregister_coproc_api(void);
+
+int get_vas_user_win_ref(struct vas_user_win_ref *task_ref);
+void vas_update_csb(struct coprocessor_request_block *crb,
+ struct vas_user_win_ref *task_ref);
+void vas_dump_crb(struct coprocessor_request_block *crb);
#endif /* __ASM_POWERPC_VAS_H */
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index b5e1f8f8a05c..7650b6ce14c8 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -1,52 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PPC64_VDSO_H__
-#define __PPC64_VDSO_H__
+#ifndef _ASM_POWERPC_VDSO_H
+#define _ASM_POWERPC_VDSO_H
-#ifdef __KERNEL__
-
-/* Default link addresses for the vDSOs */
-#define VDSO32_LBASE 0x0
-#define VDSO64_LBASE 0x0
+#define VDSO_VERSION_STRING LINUX_2.6.15
-/* Default map addresses for 32bit vDSO */
-#define VDSO32_MBASE 0x100000
+#ifndef __ASSEMBLY__
-#define VDSO_VERSION_STRING LINUX_2.6.15
+#ifdef CONFIG_PPC64
+#include <generated/vdso64-offsets.h>
+#endif
-/* Define if 64 bits VDSO has procedure descriptors */
-#undef VDS64_HAS_DESCRIPTORS
+#ifdef CONFIG_VDSO32
+#include <generated/vdso32-offsets.h>
+#endif
-#ifndef __ASSEMBLY__
+#define VDSO64_SYMBOL(base, name) ((unsigned long)(base) + (vdso64_offset_##name))
-/* Offsets relative to thread->vdso_base */
-extern unsigned long vdso64_rt_sigtramp;
-extern unsigned long vdso32_sigtramp;
-extern unsigned long vdso32_rt_sigtramp;
+#define VDSO32_SYMBOL(base, name) ((unsigned long)(base) + (vdso32_offset_##name))
int vdso_getcpu_init(void);
#else /* __ASSEMBLY__ */
#ifdef __VDSO64__
-#ifdef VDS64_HAS_DESCRIPTORS
-#define V_FUNCTION_BEGIN(name) \
- .globl name; \
- .section ".opd","a"; \
- .align 3; \
- name: \
- .quad .name,.TOC.@tocbase,0; \
- .previous; \
- .globl .name; \
- .type .name,@function; \
- .name: \
-
-#define V_FUNCTION_END(name) \
- .size .name,.-.name;
-
-#define V_LOCAL_FUNC(name) (.name)
-
-#else /* VDS64_HAS_DESCRIPTORS */
-
#define V_FUNCTION_BEGIN(name) \
.globl name; \
name: \
@@ -55,8 +31,6 @@ int vdso_getcpu_init(void);
.size name,.-name;
#define V_LOCAL_FUNC(name) (name)
-
-#endif /* VDS64_HAS_DESCRIPTORS */
#endif /* __VDSO64__ */
#ifdef __VDSO32__
@@ -75,6 +49,4 @@ int vdso_getcpu_init(void);
#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
-#endif /* __PPC64_VDSO_H__ */
+#endif /* _ASM_POWERPC_VDSO_H */
diff --git a/arch/powerpc/include/asm/vdso/clocksource.h b/arch/powerpc/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000000..c1ba56b82ee5
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_VDSO_CLOCKSOURCE_H
+#define _ASM_POWERPC_VDSO_CLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000000..f0a4cf01e85c
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
+#define _ASM_POWERPC_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+#include <asm/vdso/timebase.h>
+#include <asm/barrier.h>
+#include <asm/unistd.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+#define VDSO_HAS_TIME 1
+
+static __always_inline int do_syscall_2(const unsigned long _r0, const unsigned long _r3,
+ const unsigned long _r4)
+{
+ register long r0 asm("r0") = _r0;
+ register unsigned long r3 asm("r3") = _r3;
+ register unsigned long r4 asm("r4") = _r4;
+ register int ret asm ("r3");
+
+ asm volatile(
+ " sc\n"
+ " bns+ 1f\n"
+ " neg %0, %0\n"
+ "1:\n"
+ : "=r" (ret), "+r" (r4), "+r" (r0)
+ : "r" (r3)
+ : "memory", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "r12", "cr0", "ctr");
+
+ return ret;
+}
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz)
+{
+ return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz);
+}
+
+#ifdef __powerpc64__
+
+static __always_inline
+int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
+}
+
+#else
+
+#define BUILD_VDSO32 1
+
+static __always_inline
+int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ return do_syscall_2(__NR_clock_gettime64, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ return do_syscall_2(__NR_clock_getres_time64, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ return do_syscall_2(__NR_clock_gettime, _clkid, (unsigned long)_ts);
+}
+
+static __always_inline
+int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
+}
+#endif
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+ const struct vdso_data *vd)
+{
+ return get_tb();
+}
+
+const struct vdso_data *__arch_get_vdso_data(void);
+
+#ifdef CONFIG_TIME_NS
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+{
+ return (void *)vd + PAGE_SIZE;
+}
+#endif
+
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+ return true;
+}
+#define vdso_clocksource_ok vdso_clocksource_ok
+
+/*
+ * powerpc specific delta calculation.
+ *
+ * This variant removes the masking of the subtraction because the
+ * clocksource mask of all VDSO capable clocksources on powerpc is U64_MAX
+ * which would result in a pointless operation. The compiler cannot
+ * optimize it away as the mask comes from the vdso data and is not compile
+ * time constant.
+ */
+static __always_inline u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+{
+ return (cycles - last) * mult;
+}
+#define vdso_calc_delta vdso_calc_delta
+
+#ifndef __powerpc64__
+static __always_inline u64 vdso_shift_ns(u64 ns, unsigned long shift)
+{
+ u32 hi = ns >> 32;
+ u32 lo = ns;
+
+ lo >>= shift;
+ lo |= hi << (32 - shift);
+ hi >>= shift;
+
+ if (likely(hi == 0))
+ return lo;
+
+ return ((u64)hi << 32) | lo;
+}
+#define vdso_shift_ns vdso_shift_ns
+#endif
+
+#ifdef __powerpc64__
+int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts,
+ const struct vdso_data *vd);
+int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res,
+ const struct vdso_data *vd);
+#else
+int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts,
+ const struct vdso_data *vd);
+int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
+ const struct vdso_data *vd);
+int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
+ const struct vdso_data *vd);
+#endif
+int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
+ const struct vdso_data *vd);
+__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time,
+ const struct vdso_data *vd);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/powerpc/include/asm/vdso/processor.h b/arch/powerpc/include/asm/vdso/processor.h
new file mode 100644
index 000000000000..80d13207c568
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/processor.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_POWERPC_VDSO_PROCESSOR_H
+#define _ASM_POWERPC_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+/* Macros for adjusting thread priority (hardware multi-threading) */
+#ifdef CONFIG_PPC64
+#define HMT_very_low() asm volatile("or 31, 31, 31 # very low priority")
+#define HMT_low() asm volatile("or 1, 1, 1 # low priority")
+#define HMT_medium_low() asm volatile("or 6, 6, 6 # medium low priority")
+#define HMT_medium() asm volatile("or 2, 2, 2 # medium priority")
+#define HMT_medium_high() asm volatile("or 5, 5, 5 # medium high priority")
+#define HMT_high() asm volatile("or 3, 3, 3 # high priority")
+#else
+#define HMT_very_low()
+#define HMT_low()
+#define HMT_medium_low()
+#define HMT_medium()
+#define HMT_medium_high()
+#define HMT_high()
+#endif
+
+#ifdef CONFIG_PPC64
+#define cpu_relax() \
+ asm volatile(ASM_FTR_IFCLR( \
+ /* Pre-POWER10 uses low ; medium priority nops */ \
+ "or 1,1,1 ; or 2,2,2", \
+ /* POWER10 onward uses pause_short (wait 2,0) */ \
+ PPC_WAIT(2, 0), \
+ %0) :: "i" (CPU_FTR_ARCH_31) : "memory")
+#else
+#define cpu_relax() barrier()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_VDSO_PROCESSOR_H */
diff --git a/arch/powerpc/include/asm/vdso/timebase.h b/arch/powerpc/include/asm/vdso/timebase.h
new file mode 100644
index 000000000000..e9245f86a46c
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/timebase.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Common timebase prototypes and such for all ppc machines.
+ */
+
+#ifndef _ASM_POWERPC_VDSO_TIMEBASE_H
+#define _ASM_POWERPC_VDSO_TIMEBASE_H
+
+#include <asm/reg.h>
+
+/*
+ * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
+ * version below in the else case of the ifdef.
+ */
+#if defined(__powerpc64__) && (defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500))
+#define mftb() ({unsigned long rval; \
+ asm volatile( \
+ "90: mfspr %0, %2;\n" \
+ ASM_FTR_IFSET( \
+ "97: cmpwi %0,0;\n" \
+ " beq- 90b;\n", "", %1) \
+ : "=r" (rval) \
+ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
+ rval;})
+#elif defined(CONFIG_PPC_8xx)
+#define mftb() ({unsigned long rval; \
+ asm volatile("mftbl %0" : "=r" (rval)); rval;})
+#else
+#define mftb() ({unsigned long rval; \
+ asm volatile("mfspr %0, %1" : \
+ "=r" (rval) : "i" (SPRN_TBRL)); rval;})
+#endif /* !CONFIG_PPC_CELL */
+
+#if defined(CONFIG_PPC_8xx)
+#define mftbu() ({unsigned long rval; \
+ asm volatile("mftbu %0" : "=r" (rval)); rval;})
+#else
+#define mftbu() ({unsigned long rval; \
+ asm volatile("mfspr %0, %1" : "=r" (rval) : \
+ "i" (SPRN_TBRU)); rval;})
+#endif
+
+#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
+#define mttbu(v) asm volatile("mttbu %0":: "r"(v))
+
+static __always_inline u64 get_tb(void)
+{
+ unsigned int tbhi, tblo, tbhi2;
+
+ /*
+ * We use __powerpc64__ here not CONFIG_PPC64 because we want the compat
+ * VDSO to use the 32-bit compatible version in the while loop below.
+ */
+ if (__is_defined(__powerpc64__))
+ return mftb();
+
+ do {
+ tbhi = mftbu();
+ tblo = mftb();
+ tbhi2 = mftbu();
+ } while (tbhi != tbhi2);
+
+ return ((u64)tbhi << 32) | tblo;
+}
+
+static inline void set_tb(unsigned int upper, unsigned int lower)
+{
+ mtspr(SPRN_TBWL, 0);
+ mtspr(SPRN_TBWU, upper);
+ mtspr(SPRN_TBWL, lower);
+}
+
+#endif /* _ASM_POWERPC_VDSO_TIMEBASE_H */
diff --git a/arch/powerpc/include/asm/vdso/vsyscall.h b/arch/powerpc/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000000..48cf23f1e273
--- /dev/null
+++ b/arch/powerpc/include/asm/vdso/vsyscall.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_VDSO_VSYSCALL_H
+#define _ASM_POWERPC_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <asm/vdso_datapage.h>
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline
+struct vdso_data *__arch_get_k_vdso_data(void)
+{
+ return vdso_data->data;
+}
+#define __arch_get_k_vdso_data __arch_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_VDSO_VSYSCALL_H */
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h
index b9ef6cf50ea5..a585c8e538ff 100644
--- a/arch/powerpc/include/asm/vdso_datapage.h
+++ b/arch/powerpc/include/asm/vdso_datapage.h
@@ -36,6 +36,7 @@
#include <linux/unistd.h>
#include <linux/time.h>
+#include <vdso/datapage.h>
#define SYSCALL_MAP_SIZE ((NR_syscalls + 31) / 32)
@@ -45,7 +46,7 @@
#ifdef CONFIG_PPC64
-struct vdso_data {
+struct vdso_arch_data {
__u8 eye_catcher[16]; /* Eyecatcher: SYSTEMCFG:PPC64 0x00 */
struct { /* Systemcfg version numbers */
__u32 major; /* Major number 0x10 */
@@ -59,13 +60,13 @@ struct vdso_data {
__u32 processor; /* Processor type 0x1C */
__u64 processorCount; /* # of physical processors 0x20 */
__u64 physicalMemorySize; /* Size of real memory(B) 0x28 */
- __u64 tb_orig_stamp; /* Timebase at boot 0x30 */
+ __u64 tb_orig_stamp; /* (NU) Timebase at boot 0x30 */
__u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */
- __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */
- __u64 stamp_xsec; /* 0x48 */
- __u64 tb_update_count; /* Timebase atomicity ctr 0x50 */
- __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */
- __u32 tz_dsttime; /* Type of dst correction 0x5C */
+ __u64 tb_to_xs; /* (NU) Inverse of TB to 2^20 0x40 */
+ __u64 stamp_xsec; /* (NU) 0x48 */
+ __u64 tb_update_count; /* (NU) Timebase atomicity ctr 0x50 */
+ __u32 tz_minuteswest; /* (NU) Min. west of Greenwich 0x58 */
+ __u32 tz_dsttime; /* (NU) Type of dst correction 0x5C */
__u32 dcache_size; /* L1 d-cache size 0x60 */
__u32 dcache_line_size; /* L1 d-cache line size 0x64 */
__u32 icache_size; /* L1 i-cache size 0x68 */
@@ -78,14 +79,10 @@ struct vdso_data {
__u32 icache_block_size; /* L1 i-cache block size */
__u32 dcache_log_block_size; /* L1 d-cache log block size */
__u32 icache_log_block_size; /* L1 i-cache log block size */
- __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
- __s32 wtom_clock_nsec; /* Wall to monotonic clock nsec */
- __s64 wtom_clock_sec; /* Wall to monotonic clock sec */
- __s64 stamp_xtime_sec; /* xtime secs as at tb_orig_stamp */
- __s64 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */
- __u32 hrtimer_res; /* hrtimer resolution */
- __u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
- __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
+ __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
+ __u32 compat_syscall_map[SYSCALL_MAP_SIZE]; /* Map of compat syscalls */
+
+ struct vdso_data data[CS_BASES];
};
#else /* CONFIG_PPC64 */
@@ -93,35 +90,25 @@ struct vdso_data {
/*
* And here is the simpler 32 bits version
*/
-struct vdso_data {
- __u64 tb_orig_stamp; /* Timebase at boot 0x30 */
+struct vdso_arch_data {
__u64 tb_ticks_per_sec; /* Timebase tics / sec 0x38 */
- __u64 tb_to_xs; /* Inverse of TB to 2^20 0x40 */
- __u64 stamp_xsec; /* 0x48 */
- __u32 tb_update_count; /* Timebase atomicity ctr 0x50 */
- __u32 tz_minuteswest; /* Minutes west of Greenwich 0x58 */
- __u32 tz_dsttime; /* Type of dst correction 0x5C */
- __s32 wtom_clock_sec; /* Wall to monotonic clock */
- __s32 wtom_clock_nsec;
- __s32 stamp_xtime_sec; /* xtime seconds as at tb_orig_stamp */
- __s32 stamp_xtime_nsec; /* xtime nsecs as at tb_orig_stamp */
- __u32 stamp_sec_fraction; /* fractional seconds of stamp_xtime */
- __u32 hrtimer_res; /* hrtimer resolution */
- __u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
+ __u32 syscall_map[SYSCALL_MAP_SIZE]; /* Map of syscalls */
+ __u32 compat_syscall_map[0]; /* No compat syscalls on PPC32 */
+ struct vdso_data data[CS_BASES];
};
#endif /* CONFIG_PPC64 */
-extern struct vdso_data *vdso_data;
+extern struct vdso_arch_data *vdso_data;
#else /* __ASSEMBLY__ */
-.macro get_datapage ptr, tmp
+.macro get_datapage ptr
bcl 20, 31, .+4
+999:
mflr \ptr
- addi \ptr, \ptr, (__kernel_datapage_offset - (.-4))@l
- lwz \tmp, 0(\ptr)
- add \ptr, \tmp, \ptr
+ addis \ptr, \ptr, (_vdso_datapage - 999b)@ha
+ addi \ptr, \ptr, (_vdso_datapage - 999b)@l
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/vermagic.h b/arch/powerpc/include/asm/vermagic.h
new file mode 100644
index 000000000000..b054a8576e5d
--- /dev/null
+++ b/arch/powerpc/include/asm/vermagic.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#ifdef CONFIG_MPROFILE_KERNEL
+#define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel "
+#else
+#define MODULE_ARCH_VERMAGIC_FTRACE ""
+#endif
+
+#ifdef CONFIG_RELOCATABLE
+#define MODULE_ARCH_VERMAGIC_RELOCATABLE "relocatable "
+#else
+#define MODULE_ARCH_VERMAGIC_RELOCATABLE ""
+#endif
+
+#define MODULE_ARCH_VERMAGIC \
+ MODULE_ARCH_VERMAGIC_FTRACE MODULE_ARCH_VERMAGIC_RELOCATABLE
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 0cf52746531b..e7479a4abf96 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -113,7 +113,8 @@ struct vio_driver {
const char *name;
const struct vio_device_id *id_table;
int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
- int (*remove)(struct vio_dev *dev);
+ void (*remove)(struct vio_dev *dev);
+ void (*shutdown)(struct vio_dev *dev);
/* A driver must have a get_desired_dma() function to
* be loaded in a CMO environment if it uses DMA.
*/
diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h
index b992dfaaa161..4c69ece52a31 100644
--- a/arch/powerpc/include/asm/vmalloc.h
+++ b/arch/powerpc/include/asm/vmalloc.h
@@ -1,4 +1,24 @@
#ifndef _ASM_POWERPC_VMALLOC_H
#define _ASM_POWERPC_VMALLOC_H
+#include <asm/mmu.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ /* HPT does not cope with large pages in the vmalloc area */
+ return radix_enabled();
+}
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return radix_enabled();
+}
+
+#endif
+
#endif /* _ASM_POWERPC_VMALLOC_H */
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
index f3f4710d4ff5..46c31fb8748d 100644
--- a/arch/powerpc/include/asm/word-at-a-time.h
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -7,7 +7,7 @@
#include <linux/kernel.h>
#include <asm/asm-compat.h>
-#include <asm/ppc_asm.h>
+#include <asm/extable.h>
#ifdef __BIG_ENDIAN__
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 8e903b3f9c24..89090485bec1 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -38,13 +38,13 @@ static inline int icp_native_init(void) { return -ENODEV; }
/* PAPR ICP */
#ifdef CONFIG_PPC_ICP_HV
-extern int icp_hv_init(void);
+int __init icp_hv_init(void);
#else
static inline int icp_hv_init(void) { return -ENODEV; }
#endif
#ifdef CONFIG_PPC_POWERNV
-extern int icp_opal_init(void);
+int __init icp_opal_init(void);
extern void icp_opal_flush_interrupt(void);
#else
static inline int icp_opal_init(void) { return -ENODEV; }
@@ -65,8 +65,12 @@ struct icp_ops {
extern const struct icp_ops *icp_ops;
+#ifdef CONFIG_PPC_ICS_NATIVE
/* Native ICS */
extern int ics_native_init(void);
+#else
+static inline int ics_native_init(void) { return -ENODEV; }
+#endif
/* RTAS ICS */
#ifdef CONFIG_PPC_ICS_RTAS
@@ -85,10 +89,11 @@ static inline int ics_opal_init(void) { return -ENODEV; }
/* ICS instance, hooked up to chip_data of an irq */
struct ics {
struct list_head link;
- int (*map)(struct ics *ics, unsigned int virq);
+ int (*check)(struct ics *ics, unsigned int hwirq);
void (*mask_unknown)(struct ics *ics, unsigned long vec);
long (*get_server)(struct ics *ics, unsigned long vec);
int (*host_match)(struct ics *ics, struct device_node *node);
+ struct irq_chip *chip;
char data[];
};
@@ -154,7 +159,6 @@ extern void xics_setup_cpu(void);
extern void xics_update_irq_servers(void);
extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join);
extern void xics_mask_unknown_vec(unsigned int vec);
-extern irqreturn_t xics_ipi_dispatch(int cpu);
extern void xics_smp_probe(void);
extern void xics_register_ics(struct ics *ics);
extern void xics_teardown_cpu(void);
diff --git a/arch/powerpc/include/asm/xilinx_intc.h b/arch/powerpc/include/asm/xilinx_intc.h
deleted file mode 100644
index ca9aa162fb09..000000000000
--- a/arch/powerpc/include/asm/xilinx_intc.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Xilinx intc external definitions
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- */
-#ifndef _ASM_POWERPC_XILINX_INTC_H
-#define _ASM_POWERPC_XILINX_INTC_H
-
-#ifdef __KERNEL__
-
-extern void __init xilinx_intc_init_tree(void);
-extern unsigned int xintc_get_irq(void);
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_XILINX_INTC_H */
diff --git a/arch/powerpc/include/asm/xilinx_pci.h b/arch/powerpc/include/asm/xilinx_pci.h
deleted file mode 100644
index 7a8275caf6af..000000000000
--- a/arch/powerpc/include/asm/xilinx_pci.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Xilinx pci external definitions
- *
- * Copyright 2009 Roderick Colenbrander
- * Copyright 2009 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef INCLUDE_XILINX_PCI
-#define INCLUDE_XILINX_PCI
-
-#ifdef CONFIG_XILINX_PCI
-extern void __init xilinx_pci_init(void);
-#else
-static inline void __init xilinx_pci_init(void) { return; }
-#endif
-
-#endif /* INCLUDE_XILINX_PCI */
diff --git a/arch/powerpc/include/asm/xive-regs.h b/arch/powerpc/include/asm/xive-regs.h
index 33aee7490cbb..cf8bb6ac4463 100644
--- a/arch/powerpc/include/asm/xive-regs.h
+++ b/arch/powerpc/include/asm/xive-regs.h
@@ -37,6 +37,14 @@
#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
+/*
+ * Load-after-store ordering
+ *
+ * Adding this offset to the load address will enforce
+ * load-after-store ordering. This is required to use StoreEOI.
+ */
+#define XIVE_ESB_LD_ST_MO 0x40 /* Load-after-store ordering */
+
#define XIVE_ESB_VAL_P 0x2
#define XIVE_ESB_VAL_Q 0x1
#define XIVE_ESB_INVALID 0xFF
@@ -72,10 +80,13 @@
#define TM_QW0W2_VU PPC_BIT32(0)
#define TM_QW0W2_LOGIC_SERV PPC_BITMASK32(1,31) // XX 2,31 ?
#define TM_QW1W2_VO PPC_BIT32(0)
+#define TM_QW1W2_HO PPC_BIT32(1) /* P10 XIVE2 */
#define TM_QW1W2_OS_CAM PPC_BITMASK32(8,31)
#define TM_QW2W2_VP PPC_BIT32(0)
+#define TM_QW2W2_HP PPC_BIT32(1) /* P10 XIVE2 */
#define TM_QW2W2_POOL_CAM PPC_BITMASK32(8,31)
#define TM_QW3W2_VT PPC_BIT32(0)
+#define TM_QW3W2_HT PPC_BIT32(1) /* P10 XIVE2 */
#define TM_QW3W2_LP PPC_BIT32(6)
#define TM_QW3W2_LE PPC_BIT32(7)
#define TM_QW3W2_T PPC_BIT32(31)
diff --git a/arch/powerpc/include/asm/xive.h b/arch/powerpc/include/asm/xive.h
index 93f982dbb3d4..92930b0b5d0e 100644
--- a/arch/powerpc/include/asm/xive.h
+++ b/arch/powerpc/include/asm/xive.h
@@ -5,6 +5,8 @@
#ifndef _ASM_POWERPC_XIVE_H
#define _ASM_POWERPC_XIVE_H
+#include <asm/opal-api.h>
+
#define XIVE_INVALID_VP 0xffffffff
#ifdef CONFIG_PPC_XIVE
@@ -58,13 +60,13 @@ struct xive_irq_data {
};
#define XIVE_IRQ_FLAG_STORE_EOI 0x01
#define XIVE_IRQ_FLAG_LSI 0x02
-#define XIVE_IRQ_FLAG_SHIFT_BUG 0x04
-#define XIVE_IRQ_FLAG_MASK_FW 0x08
-#define XIVE_IRQ_FLAG_EOI_FW 0x10
+/* #define XIVE_IRQ_FLAG_SHIFT_BUG 0x04 */ /* P9 DD1.0 workaround */
+/* #define XIVE_IRQ_FLAG_MASK_FW 0x08 */ /* P9 DD1.0 workaround */
+/* #define XIVE_IRQ_FLAG_EOI_FW 0x10 */ /* P9 DD1.0 workaround */
#define XIVE_IRQ_FLAG_H_INT_ESB 0x20
/* Special flag set by KVM for excalation interrupts */
-#define XIVE_IRQ_NO_EOI 0x80
+#define XIVE_IRQ_FLAG_NO_EOI 0x80
#define XIVE_INVALID_CHIP_ID -1
@@ -100,6 +102,7 @@ void xive_flush_interrupt(void);
/* xmon hook */
void xmon_xive_do_dump(int cpu);
int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
+void xmon_xive_get_irq_all(void);
/* APIs used by KVM */
u32 xive_native_default_eq_shift(void);
@@ -108,7 +111,7 @@ void xive_native_free_vp_block(u32 vp_base);
int xive_native_populate_irq_data(u32 hw_irq,
struct xive_irq_data *data);
void xive_cleanup_irq_data(struct xive_irq_data *xd);
-u32 xive_native_alloc_irq(void);
+void xive_irq_free_data(unsigned int virq);
void xive_native_free_irq(u32 irq);
int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
@@ -123,6 +126,7 @@ int xive_native_enable_vp(u32 vp_id, bool single_escalation);
int xive_native_disable_vp(u32 vp_id);
int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
bool xive_native_has_single_escalation(void);
+bool xive_native_has_save_restore(void);
int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
u64 *out_qpage,
@@ -137,6 +141,12 @@ int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
u32 qindex);
int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
bool xive_native_has_queue_state_support(void);
+extern u32 xive_native_alloc_irq_on_chip(u32 chip_id);
+
+static inline u32 xive_native_alloc_irq(void)
+{
+ return xive_native_alloc_irq_on_chip(OPAL_XIVE_ANY_CHIP);
+}
#else
@@ -148,7 +158,6 @@ static inline void xive_smp_probe(void) { }
static inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
static inline void xive_smp_setup_cpu(void) { }
static inline void xive_smp_disable_cpu(void) { }
-static inline void xive_kexec_teardown_cpu(int secondary) { }
static inline void xive_shutdown(void) { }
static inline void xive_flush_interrupt(void) { }
diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h
index 454a7fc6113b..f2d44b44f46c 100644
--- a/arch/powerpc/include/asm/xmon.h
+++ b/arch/powerpc/include/asm/xmon.h
@@ -12,13 +12,13 @@
#ifdef CONFIG_XMON
extern void xmon_setup(void);
-extern void xmon_register_spus(struct list_head *list);
+void __init xmon_register_spus(struct list_head *list);
struct pt_regs;
extern int xmon(struct pt_regs *excp);
extern irqreturn_t xmon_irq(int, void *);
#else
-static inline void xmon_setup(void) { };
-static inline void xmon_register_spus(struct list_head *list) { };
+static inline void xmon_setup(void) { }
+static inline void xmon_register_spus(struct list_head *list) { }
#endif
#if defined(CONFIG_XMON) && defined(CONFIG_SMP)
diff --git a/arch/powerpc/include/asm/xor_altivec.h b/arch/powerpc/include/asm/xor_altivec.h
index 6ca923510b59..294620a25f80 100644
--- a/arch/powerpc/include/asm/xor_altivec.h
+++ b/arch/powerpc/include/asm/xor_altivec.h
@@ -3,17 +3,20 @@
#define _ASM_POWERPC_XOR_ALTIVEC_H
#ifdef CONFIG_ALTIVEC
-
-void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in);
-void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in);
-void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in);
-void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in);
+void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
#endif
#endif /* _ASM_POWERPC_XOR_ALTIVEC_H */
diff --git a/arch/powerpc/include/uapi/asm/auxvec.h b/arch/powerpc/include/uapi/asm/auxvec.h
index 7af21dc0e320..aa7c16215453 100644
--- a/arch/powerpc/include/uapi/asm/auxvec.h
+++ b/arch/powerpc/include/uapi/asm/auxvec.h
@@ -48,6 +48,8 @@
#define AT_L3_CACHESIZE 46
#define AT_L3_CACHEGEOMETRY 47
-#define AT_VECTOR_SIZE_ARCH 14 /* entries in ARCH_DLINFO */
+#define AT_MINSIGSTKSZ 51 /* stack needed for signal delivery */
+
+#define AT_VECTOR_SIZE_ARCH 15 /* entries in ARCH_DLINFO */
#endif
diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
deleted file mode 100644
index 5e1e648aeec4..000000000000
--- a/arch/powerpc/include/uapi/asm/bpf_perf_event.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
-#define _UAPI__ASM_BPF_PERF_EVENT_H__
-
-#include <asm/ptrace.h>
-
-typedef struct user_pt_regs bpf_user_pt_regs_t;
-
-#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 540592034740..731b97dc2d15 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -50,6 +50,8 @@
#define PPC_FEATURE2_DARN 0x00200000 /* darn random number insn */
#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
#define PPC_FEATURE2_HTM_NO_SUSPEND 0x00080000 /* TM w/out suspended state */
+#define PPC_FEATURE2_ARCH_3_1 0x00040000 /* ISA 3.1 */
+#define PPC_FEATURE2_MMA 0x00020000 /* Matrix Multiply Assist */
/*
* IMPORTANT!
diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
index 860c59291bfc..308857123a08 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -289,12 +289,4 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
/* Keep this the last entry. */
#define R_PPC64_NUM 253
-/* There's actually a third entry here, but it's unused */
-struct ppc64_opd_entry
-{
- unsigned long funcaddr;
- unsigned long r2;
-};
-
-
#endif /* _UAPI_ASM_POWERPC_ELF_H */
diff --git a/arch/powerpc/include/uapi/asm/errno.h b/arch/powerpc/include/uapi/asm/errno.h
index cc79856896a1..4ba87de32be0 100644
--- a/arch/powerpc/include/uapi/asm/errno.h
+++ b/arch/powerpc/include/uapi/asm/errno.h
@@ -2,6 +2,7 @@
#ifndef _ASM_POWERPC_ERRNO_H
#define _ASM_POWERPC_ERRNO_H
+#undef EDEADLOCK
#include <asm-generic/errno.h>
#undef EDEADLOCK
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 264e266a85bf..9f18fa090f1f 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -640,6 +640,13 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
#define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0)
+/* POWER10 registers */
+#define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1)
+#define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2)
+#define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3)
+#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4)
+#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5)
+
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
*/
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h
index be48c2215fa2..a809b1b44ddf 100644
--- a/arch/powerpc/include/uapi/asm/kvm_para.h
+++ b/arch/powerpc/include/uapi/asm/kvm_para.h
@@ -31,7 +31,7 @@
* Struct fields are always 32 or 64 bit aligned, depending on them being 32
* or 64 bit wide respectively.
*
- * See Documentation/virt/kvm/ppc-pv.txt
+ * See Documentation/virt/kvm/ppc-pv.rst
*/
struct kvm_vcpu_arch_shared {
__u64 scratch1;
diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h
new file mode 100644
index 000000000000..17439925045c
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/papr_pdsm.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * PAPR nvDimm Specific Methods (PDSM) and structs for libndctl
+ *
+ * (C) Copyright IBM 2020
+ *
+ * Author: Vaibhav Jain <vaibhav at linux.ibm.com>
+ */
+
+#ifndef _UAPI_ASM_POWERPC_PAPR_PDSM_H_
+#define _UAPI_ASM_POWERPC_PAPR_PDSM_H_
+
+#include <linux/types.h>
+#include <linux/ndctl.h>
+
+/*
+ * PDSM Envelope:
+ *
+ * The ioctl ND_CMD_CALL exchange data between user-space and kernel via
+ * envelope which consists of 2 headers sections and payload sections as
+ * illustrated below:
+ * +-----------------+---------------+---------------------------+
+ * | 64-Bytes | 8-Bytes | Max 184-Bytes |
+ * +-----------------+---------------+---------------------------+
+ * | ND-HEADER | PDSM-HEADER | PDSM-PAYLOAD |
+ * +-----------------+---------------+---------------------------+
+ * | nd_family | | |
+ * | nd_size_out | cmd_status | |
+ * | nd_size_in | reserved | nd_pdsm_payload |
+ * | nd_command | payload --> | |
+ * | nd_fw_size | | |
+ * | nd_payload ---> | | |
+ * +---------------+-----------------+---------------------------+
+ *
+ * ND Header:
+ * This is the generic libnvdimm header described as 'struct nd_cmd_pkg'
+ * which is interpreted by libnvdimm before passed on to papr_scm. Important
+ * member fields used are:
+ * 'nd_family' : (In) NVDIMM_FAMILY_PAPR_SCM
+ * 'nd_size_in' : (In) PDSM-HEADER + PDSM-IN-PAYLOAD (usually 0)
+ * 'nd_size_out' : (In) PDSM-HEADER + PDSM-RETURN-PAYLOAD
+ * 'nd_command' : (In) One of PAPR_PDSM_XXX
+ * 'nd_fw_size' : (Out) PDSM-HEADER + size of actual payload returned
+ *
+ * PDSM Header:
+ * This is papr-scm specific header that precedes the payload. This is defined
+ * as nd_cmd_pdsm_pkg. Following fields aare available in this header:
+ *
+ * 'cmd_status' : (Out) Errors if any encountered while servicing PDSM.
+ * 'reserved' : Not used, reserved for future and should be set to 0.
+ * 'payload' : A union of all the possible payload structs
+ *
+ * PDSM Payload:
+ *
+ * The layout of the PDSM Payload is defined by various structs shared between
+ * papr_scm and libndctl so that contents of payload can be interpreted. As such
+ * its defined as a union of all possible payload structs as
+ * 'union nd_pdsm_payload'. Based on the value of 'nd_cmd_pkg.nd_command'
+ * appropriate member of the union is accessed.
+ */
+
+/* Max payload size that we can handle */
+#define ND_PDSM_PAYLOAD_MAX_SIZE 184
+
+/* Max payload size that we can handle */
+#define ND_PDSM_HDR_SIZE \
+ (sizeof(struct nd_pkg_pdsm) - ND_PDSM_PAYLOAD_MAX_SIZE)
+
+/* Various nvdimm health indicators */
+#define PAPR_PDSM_DIMM_HEALTHY 0
+#define PAPR_PDSM_DIMM_UNHEALTHY 1
+#define PAPR_PDSM_DIMM_CRITICAL 2
+#define PAPR_PDSM_DIMM_FATAL 3
+
+/* struct nd_papr_pdsm_health.extension_flags field flags */
+
+/* Indicate that the 'dimm_fuel_gauge' field is valid */
+#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1
+
+/* Indicate that the 'dimm_dsc' field is valid */
+#define PDSM_DIMM_DSC_VALID 2
+
+/*
+ * Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH
+ * Various flags indicate the health status of the dimm.
+ *
+ * extension_flags : Any extension fields present in the struct.
+ * dimm_unarmed : Dimm not armed. So contents wont persist.
+ * dimm_bad_shutdown : Previous shutdown did not persist contents.
+ * dimm_bad_restore : Contents from previous shutdown werent restored.
+ * dimm_scrubbed : Contents of the dimm have been scrubbed.
+ * dimm_locked : Contents of the dimm cant be modified until CEC reboot
+ * dimm_encrypted : Contents of dimm are encrypted.
+ * dimm_health : Dimm health indicator. One of PAPR_PDSM_DIMM_XXXX
+ * dimm_fuel_gauge : Life remaining of DIMM as a percentage from 0-100
+ */
+struct nd_papr_pdsm_health {
+ union {
+ struct {
+ __u32 extension_flags;
+ __u8 dimm_unarmed;
+ __u8 dimm_bad_shutdown;
+ __u8 dimm_bad_restore;
+ __u8 dimm_scrubbed;
+ __u8 dimm_locked;
+ __u8 dimm_encrypted;
+ __u16 dimm_health;
+
+ /* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */
+ __u16 dimm_fuel_gauge;
+
+ /* Extension flag PDSM_DIMM_DSC_VALID */
+ __u64 dimm_dsc;
+ };
+ __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
+ };
+};
+
+/* Flags for injecting specific smart errors */
+#define PDSM_SMART_INJECT_HEALTH_FATAL (1 << 0)
+#define PDSM_SMART_INJECT_BAD_SHUTDOWN (1 << 1)
+
+struct nd_papr_pdsm_smart_inject {
+ union {
+ struct {
+ /* One or more of PDSM_SMART_INJECT_ */
+ __u32 flags;
+ __u8 fatal_enable;
+ __u8 unsafe_shutdown_enable;
+ };
+ __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
+ };
+};
+
+/*
+ * Methods to be embedded in ND_CMD_CALL request. These are sent to the kernel
+ * via 'nd_cmd_pkg.nd_command' member of the ioctl struct
+ */
+enum papr_pdsm {
+ PAPR_PDSM_MIN = 0x0,
+ PAPR_PDSM_HEALTH,
+ PAPR_PDSM_SMART_INJECT,
+ PAPR_PDSM_MAX,
+};
+
+/* Maximal union that can hold all possible payload types */
+union nd_pdsm_payload {
+ struct nd_papr_pdsm_health health;
+ struct nd_papr_pdsm_smart_inject smart_inject;
+ __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE];
+} __packed;
+
+/*
+ * PDSM-header + payload expected with ND_CMD_CALL ioctl from libnvdimm
+ * Valid member of union 'payload' is identified via 'nd_cmd_pkg.nd_command'
+ * that should always precede this struct when sent to papr_scm via CMD_CALL
+ * interface.
+ */
+struct nd_pkg_pdsm {
+ __s32 cmd_status; /* Out: Sub-cmd status returned back */
+ __u16 reserved[2]; /* Ignored and to be set as '0' */
+ union nd_pdsm_payload payload;
+} __packed;
+
+#endif /* _UAPI_ASM_POWERPC_PAPR_PDSM_H_ */
diff --git a/arch/powerpc/include/uapi/asm/perf_regs.h b/arch/powerpc/include/uapi/asm/perf_regs.h
index f599064dd8dc..749a2e3af89e 100644
--- a/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -48,6 +48,48 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
PERF_REG_POWERPC_MMCRA,
- PERF_REG_POWERPC_MAX,
+ /* Extended registers */
+ PERF_REG_POWERPC_MMCR0,
+ PERF_REG_POWERPC_MMCR1,
+ PERF_REG_POWERPC_MMCR2,
+ PERF_REG_POWERPC_MMCR3,
+ PERF_REG_POWERPC_SIER2,
+ PERF_REG_POWERPC_SIER3,
+ PERF_REG_POWERPC_PMC1,
+ PERF_REG_POWERPC_PMC2,
+ PERF_REG_POWERPC_PMC3,
+ PERF_REG_POWERPC_PMC4,
+ PERF_REG_POWERPC_PMC5,
+ PERF_REG_POWERPC_PMC6,
+ PERF_REG_POWERPC_SDAR,
+ PERF_REG_POWERPC_SIAR,
+ /* Max mask value for interrupt regs w/o extended regs */
+ PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
+ /* Max mask value for interrupt regs including extended regs */
+ PERF_REG_EXTENDED_MAX = PERF_REG_POWERPC_SIAR + 1,
};
+
+#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1)
+
+/*
+ * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300
+ * includes 11 SPRS from MMCR0 to SIAR excluding the
+ * unsupported SPRS MMCR3, SIER2 and SIER3.
+ */
+#define PERF_REG_PMU_MASK_300 \
+ ((1ULL << PERF_REG_POWERPC_MMCR0) | (1ULL << PERF_REG_POWERPC_MMCR1) | \
+ (1ULL << PERF_REG_POWERPC_MMCR2) | (1ULL << PERF_REG_POWERPC_PMC1) | \
+ (1ULL << PERF_REG_POWERPC_PMC2) | (1ULL << PERF_REG_POWERPC_PMC3) | \
+ (1ULL << PERF_REG_POWERPC_PMC4) | (1ULL << PERF_REG_POWERPC_PMC5) | \
+ (1ULL << PERF_REG_POWERPC_PMC6) | (1ULL << PERF_REG_POWERPC_SDAR) | \
+ (1ULL << PERF_REG_POWERPC_SIAR))
+
+/*
+ * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31
+ * includes 14 SPRs from MMCR0 to SIAR.
+ */
+#define PERF_REG_PMU_MASK_31 \
+ (PERF_REG_PMU_MASK_300 | (1ULL << PERF_REG_POWERPC_MMCR3) | \
+ (1ULL << PERF_REG_POWERPC_SIER2) | (1ULL << PERF_REG_POWERPC_SIER3))
+
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/arch/powerpc/include/uapi/asm/posix_types.h b/arch/powerpc/include/uapi/asm/posix_types.h
index f698400e4bb0..9c0342312544 100644
--- a/arch/powerpc/include/uapi/asm/posix_types.h
+++ b/arch/powerpc/include/uapi/asm/posix_types.h
@@ -12,11 +12,6 @@
typedef unsigned long __kernel_old_dev_t;
#define __kernel_old_dev_t __kernel_old_dev_t
#else
-typedef unsigned int __kernel_size_t;
-typedef int __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-#define __kernel_size_t __kernel_size_t
-
typedef short __kernel_ipc_pid_t;
#define __kernel_ipc_pid_t __kernel_ipc_pid_t
#endif
diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h
index f5f1ccc740fc..7004cfea3f5f 100644
--- a/arch/powerpc/include/uapi/asm/ptrace.h
+++ b/arch/powerpc/include/uapi/asm/ptrace.h
@@ -222,6 +222,7 @@ struct ppc_debug_info {
#define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x0000000000000004
#define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x0000000000000008
#define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x0000000000000010
+#define PPC_DEBUG_FEATURE_DATA_BP_ARCH_31 0x0000000000000020
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/uapi/asm/shmbuf.h b/arch/powerpc/include/uapi/asm/shmbuf.h
index 00422b2f3c63..439a3a02ba64 100644
--- a/arch/powerpc/include/uapi/asm/shmbuf.h
+++ b/arch/powerpc/include/uapi/asm/shmbuf.h
@@ -2,6 +2,9 @@
#ifndef _ASM_POWERPC_SHMBUF_H
#define _ASM_POWERPC_SHMBUF_H
+#include <asm/ipcbuf.h>
+#include <asm/posix_types.h>
+
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -34,7 +37,7 @@ struct shmid64_ds {
unsigned long shm_ctime; /* last change time */
unsigned long __unused4;
#endif
- size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_size_t shm_segsz; /* size of segment (bytes) */
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
unsigned long shm_nattch; /* no. of current attaches */
diff --git a/arch/powerpc/include/uapi/asm/signal.h b/arch/powerpc/include/uapi/asm/signal.h
index 85b0a7aa43e7..a5dfe84f50ab 100644
--- a/arch/powerpc/include/uapi/asm/signal.h
+++ b/arch/powerpc/include/uapi/asm/signal.h
@@ -60,34 +60,15 @@ typedef struct {
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK is not currently supported, but will allow sigaltstack(2).
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001U
-#define SA_NOCLDWAIT 0x00000002U
-#define SA_SIGINFO 0x00000004U
-#define SA_ONSTACK 0x08000000U
-#define SA_RESTART 0x10000000U
-#define SA_NODEFER 0x40000000U
-#define SA_RESETHAND 0x80000000U
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
#define SA_RESTORER 0x04000000U
+#ifdef __powerpc64__
+#define MINSIGSTKSZ 8192
+#define SIGSTKSZ 32768
+#else
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
+#endif
#include <asm-generic/signal-defs.h>
@@ -110,7 +91,7 @@ struct sigaction {
typedef struct sigaltstack {
void __user *ss_sp;
int ss_flags;
- size_t ss_size;
+ __kernel_size_t ss_size;
} stack_t;
diff --git a/arch/powerpc/include/uapi/asm/stat.h b/arch/powerpc/include/uapi/asm/stat.h
index 7871055e5e32..d50901664239 100644
--- a/arch/powerpc/include/uapi/asm/stat.h
+++ b/arch/powerpc/include/uapi/asm/stat.h
@@ -29,16 +29,16 @@ struct __old_kernel_stat {
struct stat {
unsigned long st_dev;
- ino_t st_ino;
+ __kernel_ino_t st_ino;
#ifdef __powerpc64__
unsigned long st_nlink;
- mode_t st_mode;
+ __kernel_mode_t st_mode;
#else
- mode_t st_mode;
+ __kernel_mode_t st_mode;
unsigned short st_nlink;
#endif
- uid_t st_uid;
- gid_t st_gid;
+ __kernel_uid32_t st_uid;
+ __kernel_gid32_t st_gid;
unsigned long st_rdev;
long st_size;
unsigned long st_blksize;
diff --git a/arch/powerpc/include/uapi/asm/termbits.h b/arch/powerpc/include/uapi/asm/termbits.h
index ed18bc61f63d..21dc86dcb2f1 100644
--- a/arch/powerpc/include/uapi/asm/termbits.h
+++ b/arch/powerpc/include/uapi/asm/termbits.h
@@ -9,8 +9,8 @@
* 2 of the License, or (at your option) any later version.
*/
-typedef unsigned char cc_t;
-typedef unsigned int speed_t;
+#include <asm-generic/termbits-common.h>
+
typedef unsigned int tcflag_t;
/*
@@ -64,115 +64,72 @@ struct ktermios {
#define VDISCARD 16
/* c_iflag bits */
-#define IGNBRK 0000001
-#define BRKINT 0000002
-#define IGNPAR 0000004
-#define PARMRK 0000010
-#define INPCK 0000020
-#define ISTRIP 0000040
-#define INLCR 0000100
-#define IGNCR 0000200
-#define ICRNL 0000400
-#define IXON 0001000
-#define IXOFF 0002000
-#define IXANY 0004000
-#define IUCLC 0010000
-#define IMAXBEL 0020000
-#define IUTF8 0040000
+#define IXON 0x0200
+#define IXOFF 0x0400
+#define IUCLC 0x1000
+#define IMAXBEL 0x2000
+#define IUTF8 0x4000
/* c_oflag bits */
-#define OPOST 0000001
-#define ONLCR 0000002
-#define OLCUC 0000004
-
-#define OCRNL 0000010
-#define ONOCR 0000020
-#define ONLRET 0000040
-
-#define OFILL 00000100
-#define OFDEL 00000200
-#define NLDLY 00001400
-#define NL0 00000000
-#define NL1 00000400
-#define NL2 00001000
-#define NL3 00001400
-#define TABDLY 00006000
-#define TAB0 00000000
-#define TAB1 00002000
-#define TAB2 00004000
-#define TAB3 00006000
-#define XTABS 00006000 /* required by POSIX to == TAB3 */
-#define CRDLY 00030000
-#define CR0 00000000
-#define CR1 00010000
-#define CR2 00020000
-#define CR3 00030000
-#define FFDLY 00040000
-#define FF0 00000000
-#define FF1 00040000
-#define BSDLY 00100000
-#define BS0 00000000
-#define BS1 00100000
-#define VTDLY 00200000
-#define VT0 00000000
-#define VT1 00200000
+#define ONLCR 0x00002
+#define OLCUC 0x00004
+#define NLDLY 0x00300
+#define NL0 0x00000
+#define NL1 0x00100
+#define NL2 0x00200
+#define NL3 0x00300
+#define TABDLY 0x00c00
+#define TAB0 0x00000
+#define TAB1 0x00400
+#define TAB2 0x00800
+#define TAB3 0x00c00
+#define XTABS 0x00c00 /* required by POSIX to == TAB3 */
+#define CRDLY 0x03000
+#define CR0 0x00000
+#define CR1 0x01000
+#define CR2 0x02000
+#define CR3 0x03000
+#define FFDLY 0x04000
+#define FF0 0x00000
+#define FF1 0x04000
+#define BSDLY 0x08000
+#define BS0 0x00000
+#define BS1 0x08000
+#define VTDLY 0x10000
+#define VT0 0x00000
+#define VT1 0x10000
/* c_cflag bit meaning */
-#define CBAUD 0000377
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
-#define EXTA B19200
-#define EXTB B38400
-#define CBAUDEX 0000000
-#define B57600 00020
-#define B115200 00021
-#define B230400 00022
-#define B460800 00023
-#define B500000 00024
-#define B576000 00025
-#define B921600 00026
-#define B1000000 00027
-#define B1152000 00030
-#define B1500000 00031
-#define B2000000 00032
-#define B2500000 00033
-#define B3000000 00034
-#define B3500000 00035
-#define B4000000 00036
-#define BOTHER 00037
-
-#define CIBAUD 077600000
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
-
-#define CSIZE 00001400
-#define CS5 00000000
-#define CS6 00000400
-#define CS7 00001000
-#define CS8 00001400
-
-#define CSTOPB 00002000
-#define CREAD 00004000
-#define PARENB 00010000
-#define PARODD 00020000
-#define HUPCL 00040000
-
-#define CLOCAL 00100000
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
-#define CRTSCTS 020000000000 /* flow control */
+#define CBAUD 0x000000ff
+#define CBAUDEX 0x00000000
+#define BOTHER 0x0000001f
+#define B57600 0x00000010
+#define B115200 0x00000011
+#define B230400 0x00000012
+#define B460800 0x00000013
+#define B500000 0x00000014
+#define B576000 0x00000015
+#define B921600 0x00000016
+#define B1000000 0x00000017
+#define B1152000 0x00000018
+#define B1500000 0x00000019
+#define B2000000 0x0000001a
+#define B2500000 0x0000001b
+#define B3000000 0x0000001c
+#define B3500000 0x0000001d
+#define B4000000 0x0000001e
+#define CSIZE 0x00000300
+#define CS5 0x00000000
+#define CS6 0x00000100
+#define CS7 0x00000200
+#define CS8 0x00000300
+#define CSTOPB 0x00000400
+#define CREAD 0x00000800
+#define PARENB 0x00001000
+#define PARODD 0x00002000
+#define HUPCL 0x00004000
+#define CLOCAL 0x00008000
+#define CIBAUD 0x00ff0000
/* c_lflag bits */
#define ISIG 0x00000080
@@ -192,17 +149,6 @@ struct ktermios {
#define IEXTEN 0x00000400
#define EXTPROC 0x10000000
-/* Values for the ACTION argument to `tcflow'. */
-#define TCOOFF 0
-#define TCOON 1
-#define TCIOFF 2
-#define TCION 3
-
-/* Values for the QUEUE_SELECTOR argument to `tcflush'. */
-#define TCIFLUSH 0
-#define TCOFLUSH 1
-#define TCIOFLUSH 2
-
/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */
#define TCSANOW 0
#define TCSADRAIN 1
diff --git a/arch/powerpc/include/uapi/asm/vas-api.h b/arch/powerpc/include/uapi/asm/vas-api.h
new file mode 100644
index 000000000000..7c81301ecdba
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/vas-api.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright 2019 IBM Corp.
+ */
+
+#ifndef _UAPI_MISC_VAS_H
+#define _UAPI_MISC_VAS_H
+
+#include <linux/types.h>
+
+#include <asm/ioctl.h>
+
+#define VAS_MAGIC 'v'
+#define VAS_TX_WIN_OPEN _IOW(VAS_MAGIC, 0x20, struct vas_tx_win_open_attr)
+
+/* Flags to VAS TX open window ioctl */
+/* To allocate a window with QoS credit, otherwise use default credit */
+#define VAS_TX_WIN_FLAG_QOS_CREDIT 0x0000000000000001
+
+struct vas_tx_win_open_attr {
+ __u32 version;
+ __s16 vas_id; /* specific instance of vas or -1 for default */
+ __u16 reserved1;
+ __u64 flags;
+ __u64 reserved2[6];
+};
+
+#endif /* _UAPI_MISC_VAS_H */
diff --git a/arch/powerpc/kernel/.gitignore b/arch/powerpc/kernel/.gitignore
index 67ebd3003c05..d71179d3ffe9 100644
--- a/arch/powerpc/kernel/.gitignore
+++ b/arch/powerpc/kernel/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
prom_init_check
vmlinux.lds
diff --git a/arch/powerpc/kernel/fsl_booke_entry_mapping.S b/arch/powerpc/kernel/85xx_entry_mapping.S
index 8bccce6544b5..dedc17fac8f8 100644
--- a/arch/powerpc/kernel/fsl_booke_entry_mapping.S
+++ b/arch/powerpc/kernel/85xx_entry_mapping.S
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* 1. Find the index of the entry we're executing in */
- bl invstr /* Find our address */
+ bcl 20,31,$+4 /* Find our address */
invstr: mflr r6 /* Make it accessible */
mfmsr r7
rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
@@ -85,7 +85,7 @@ skpinv: addi r6,r6,1 /* Increment */
addi r6,r6,10
slw r6,r8,r6 /* convert to mask */
- bl 1f /* Find our address */
+ bcl 20,31,$+4 /* Find our address */
1: mflr r7
mfspr r8,SPRN_MAS3
@@ -117,7 +117,7 @@ skpinv: addi r6,r6,1 /* Increment */
xori r6,r4,1
slwi r6,r6,5 /* setup new context with other address space */
- bl 1f /* Find our address */
+ bcl 20,31,$+4 /* Find our address */
1: mflr r9
rlwimi r7,r9,0,20,31
addi r7,r7,(2f - 1b)
@@ -207,7 +207,7 @@ next_tlb_setup:
lis r7,MSR_KERNEL@h
ori r7,r7,MSR_KERNEL@l
- bl 1f /* Find our address */
+ bcl 20,31,$+4 /* Find our address */
1: mflr r9
rlwimi r6,r9,0,20,31
addi r6,r6,(2f - 1b)
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 78a1b22d4fd8..9b6146056e48 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -3,8 +3,6 @@
# Makefile for the linux kernel.
#
-CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-
ifdef CONFIG_PPC64
CFLAGS_prom_init.o += $(NO_MINIMAL_TOC)
endif
@@ -13,14 +11,16 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
+CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
-CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
+CFLAGS_prom_init.o += -fno-stack-protector
CFLAGS_prom_init.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_prom_init.o += -ffreestanding
+CFLAGS_prom_init.o += $(call cc-option, -ftrivial-auto-var-init=uninitialized)
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
@@ -34,6 +34,19 @@ KASAN_SANITIZE_early_32.o := n
KASAN_SANITIZE_cputable.o := n
KASAN_SANITIZE_prom_init.o := n
KASAN_SANITIZE_btext.o := n
+KASAN_SANITIZE_paca.o := n
+KASAN_SANITIZE_setup_64.o := n
+KASAN_SANITIZE_mce.o := n
+KASAN_SANITIZE_mce_power.o := n
+KASAN_SANITIZE_udbg.o := n
+KASAN_SANITIZE_udbg_16550.o := n
+
+# we have to be particularly careful in ppc64 to exclude code that
+# runs with translations off, as we cannot access the shadow with
+# translations off. However, ppc32 can sanitize this.
+ifdef CONFIG_PPC64
+KASAN_SANITIZE_traps.o := n
+endif
ifdef CONFIG_KASAN
CFLAGS_early_32.o += -DDISABLE_BRANCH_PROFILING
@@ -41,70 +54,80 @@ CFLAGS_cputable.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_btext.o += -DDISABLE_BRANCH_PROFILING
endif
-obj-y := cputable.o ptrace.o syscalls.o \
- irq.o align.o signal_32.o pmc.o vdso.o \
+#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
+# Remove stack protector to avoid triggering unneeded stack canary
+# checks due to randomize_kstack_offset.
+CFLAGS_REMOVE_syscall.o = -fstack-protector -fstack-protector-strong
+CFLAGS_syscall.o += -fno-stack-protector
+#endif
+
+obj-y := cputable.o syscalls.o \
+ irq.o align.o signal_$(BITS).o pmc.o vdso.o \
process.o systbl.o idle.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
- of_platform.o prom_parse.o
-obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
- signal_64.o ptrace32.o \
- paca.o nvram_64.o firmware.o note.o
-obj-$(CONFIG_VDSO32) += vdso32/
+ of_platform.o prom_parse.o firmware.o \
+ hw_breakpoint_constraints.o interrupt.o \
+ kdebugfs.o stacktrace.o syscall.o
+obj-y += ptrace/
+obj-$(CONFIG_PPC64) += setup_64.o irq_64.o\
+ paca.o nvram_64.o note.o
+obj-$(CONFIG_PPC32) += sys_ppc32.o
+obj-$(CONFIG_COMPAT) += sys_ppc32.o signal_32.o
+obj-$(CONFIG_VDSO32) += vdso32_wrapper.o
obj-$(CONFIG_PPC_WATCHDOG) += watchdog.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_PPC_DAWR) += dawr.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
-obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
+obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_64e.o
obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
-obj-$(CONFIG_PPC64) += vdso64/
+obj-$(CONFIG_PPC64) += vdso64_wrapper.o
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_BOOK3S_IDLE) += idle_book3s.o
procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
-obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y)
+obj-$(CONFIG_PPC_RTAS) += rtas_entry.o rtas.o rtas-rtc.o $(rtaspci-y-y)
obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
obj-$(CONFIG_PPC_DT_CPU_FTRS) += dt_cpu_ftrs.o
-obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
+obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_cache.o \
eeh_driver.o eeh_event.o eeh_sysfs.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_FA_DUMP) += fadump.o
obj-$(CONFIG_PRESERVE_FA_DUMP) += fadump.o
-ifdef CONFIG_PPC32
-obj-$(CONFIG_E500) += idle_e500.o
-endif
+obj-$(CONFIG_PPC_85xx) += idle_85xx.o
obj-$(CONFIG_PPC_BOOK3S_32) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
obj-$(CONFIG_TAU) += tau_6xx.o
obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
-ifdef CONFIG_FSL_BOOKE
-obj-$(CONFIG_HIBERNATION) += swsusp_booke.o
+ifdef CONFIG_PPC_85xx
+obj-$(CONFIG_HIBERNATION) += swsusp_85xx.o
else
obj-$(CONFIG_HIBERNATION) += swsusp_$(BITS).o
endif
obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
obj-$(CONFIG_MODULES) += module.o module_$(BITS).o
obj-$(CONFIG_44x) += cpu_setup_44x.o
-obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o
+obj-$(CONFIG_PPC_E500) += cpu_setup_e500.o
obj-$(CONFIG_PPC_DOORBELL) += dbell.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
-extra-y := head_$(BITS).o
-extra-$(CONFIG_40x) := head_40x.o
-extra-$(CONFIG_44x) := head_44x.o
-extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
-extra-$(CONFIG_PPC_8xx) := head_8xx.o
+obj-$(CONFIG_PPC64) += head_64.o
+obj-$(CONFIG_PPC_BOOK3S_32) += head_book3s_32.o
+obj-$(CONFIG_40x) += head_40x.o
+obj-$(CONFIG_44x) += head_44x.o
+obj-$(CONFIG_PPC_8xx) += head_8xx.o
+obj-$(CONFIG_PPC_85xx) += head_85xx.o
extra-y += vmlinux.lds
obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o
-obj-$(CONFIG_PPC32) += entry_32.o setup_32.o early_32.o
+obj-$(CONFIG_PPC32) += entry_32.o setup_32.o early_32.o static_call.o
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
@@ -114,7 +137,6 @@ obj-$(CONFIG_OPTPROBES) += optprobes.o optprobes_head.o
obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o
obj-$(CONFIG_UPROBES) += uprobes.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
-obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
obj-$(CONFIG_ARCH_HAS_DMA_SET_MASK) += dma-mask.o
@@ -162,6 +184,9 @@ UBSAN_SANITIZE_kprobes.o := n
GCOV_PROFILE_kprobes-ftrace.o := n
KCOV_INSTRUMENT_kprobes-ftrace.o := n
UBSAN_SANITIZE_kprobes-ftrace.o := n
+GCOV_PROFILE_syscall_64.o := n
+KCOV_INSTRUMENT_syscall_64.o := n
+UBSAN_SANITIZE_syscall_64.o := n
UBSAN_SANITIZE_vdso.o := n
# Necessary for booting with kcov enabled on book3e machines
@@ -169,10 +194,13 @@ KCOV_INSTRUMENT_cputable.o := n
KCOV_INSTRUMENT_setup_64.o := n
KCOV_INSTRUMENT_paca.o := n
-extra-$(CONFIG_PPC_FPU) += fpu.o
-extra-$(CONFIG_ALTIVEC) += vector.o
-extra-$(CONFIG_PPC64) += entry_64.o
-extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o
+CFLAGS_setup_64.o += -fno-stack-protector
+CFLAGS_paca.o += -fno-stack-protector
+
+obj-$(CONFIG_PPC_FPU) += fpu.o
+obj-$(CONFIG_ALTIVEC) += vector.o
+obj-$(CONFIG_PPC64) += entry_64.o
+obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o
extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init_check
@@ -184,3 +212,10 @@ $(obj)/prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o FORCE
targets += prom_init_check
clean-files := vmlinux.lds
+
+# Force dependency (incbin is bad)
+$(obj)/vdso32_wrapper.o : $(obj)/vdso/vdso32.so.dbg
+$(obj)/vdso64_wrapper.o : $(obj)/vdso/vdso64.so.dbg
+
+# for cleaning
+subdir- += vdso
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 92045ed64976..3e37ece06739 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -24,6 +24,7 @@
#include <asm/disassemble.h>
#include <asm/cpu_has_feature.h>
#include <asm/sstep.h>
+#include <asm/inst.h>
struct aligninfo {
unsigned char len;
@@ -104,9 +105,8 @@ static struct aligninfo spe_aligninfo[32] = {
* so we don't need the address swizzling.
*/
static int emulate_spe(struct pt_regs *regs, unsigned int reg,
- unsigned int instr)
+ ppc_inst_t ppc_instr)
{
- int ret;
union {
u64 ll;
u32 w[2];
@@ -115,8 +115,9 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
} data, temp;
unsigned char __user *p, *addr;
unsigned long *evr = &current->thread.evr[reg];
- unsigned int nb, flags;
+ unsigned int nb, flags, instr;
+ instr = ppc_inst_val(ppc_instr);
instr = (instr >> 1) & 0x1f;
/* DAR has the operand effective address */
@@ -125,11 +126,6 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
nb = spe_aligninfo[instr].len;
flags = spe_aligninfo[instr].flags;
- /* Verify the address of the operand */
- if (unlikely(user_mode(regs) &&
- !access_ok(addr, nb)))
- return -EFAULT;
-
/* userland only */
if (unlikely(!user_mode(regs)))
return 0;
@@ -167,26 +163,27 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
}
} else {
temp.ll = data.ll = 0;
- ret = 0;
p = addr;
+ if (!user_read_access_begin(addr, nb))
+ return -EFAULT;
+
switch (nb) {
case 8:
- ret |= __get_user_inatomic(temp.v[0], p++);
- ret |= __get_user_inatomic(temp.v[1], p++);
- ret |= __get_user_inatomic(temp.v[2], p++);
- ret |= __get_user_inatomic(temp.v[3], p++);
- /* fall through */
+ unsafe_get_user(temp.v[0], p++, Efault_read);
+ unsafe_get_user(temp.v[1], p++, Efault_read);
+ unsafe_get_user(temp.v[2], p++, Efault_read);
+ unsafe_get_user(temp.v[3], p++, Efault_read);
+ fallthrough;
case 4:
- ret |= __get_user_inatomic(temp.v[4], p++);
- ret |= __get_user_inatomic(temp.v[5], p++);
- /* fall through */
+ unsafe_get_user(temp.v[4], p++, Efault_read);
+ unsafe_get_user(temp.v[5], p++, Efault_read);
+ fallthrough;
case 2:
- ret |= __get_user_inatomic(temp.v[6], p++);
- ret |= __get_user_inatomic(temp.v[7], p++);
- if (unlikely(ret))
- return -EFAULT;
+ unsafe_get_user(temp.v[6], p++, Efault_read);
+ unsafe_get_user(temp.v[7], p++, Efault_read);
}
+ user_read_access_end();
switch (instr) {
case EVLDD:
@@ -253,31 +250,41 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
/* Store result to memory or update registers */
if (flags & ST) {
- ret = 0;
p = addr;
+
+ if (!user_write_access_begin(addr, nb))
+ return -EFAULT;
+
switch (nb) {
case 8:
- ret |= __put_user_inatomic(data.v[0], p++);
- ret |= __put_user_inatomic(data.v[1], p++);
- ret |= __put_user_inatomic(data.v[2], p++);
- ret |= __put_user_inatomic(data.v[3], p++);
- /* fall through */
+ unsafe_put_user(data.v[0], p++, Efault_write);
+ unsafe_put_user(data.v[1], p++, Efault_write);
+ unsafe_put_user(data.v[2], p++, Efault_write);
+ unsafe_put_user(data.v[3], p++, Efault_write);
+ fallthrough;
case 4:
- ret |= __put_user_inatomic(data.v[4], p++);
- ret |= __put_user_inatomic(data.v[5], p++);
- /* fall through */
+ unsafe_put_user(data.v[4], p++, Efault_write);
+ unsafe_put_user(data.v[5], p++, Efault_write);
+ fallthrough;
case 2:
- ret |= __put_user_inatomic(data.v[6], p++);
- ret |= __put_user_inatomic(data.v[7], p++);
+ unsafe_put_user(data.v[6], p++, Efault_write);
+ unsafe_put_user(data.v[7], p++, Efault_write);
}
- if (unlikely(ret))
- return -EFAULT;
+ user_write_access_end();
} else {
*evr = data.w[0];
regs->gpr[reg] = data.w[1];
}
return 1;
+
+Efault_read:
+ user_read_access_end();
+ return -EFAULT;
+
+Efault_write:
+ user_write_access_end();
+ return -EFAULT;
}
#endif /* CONFIG_SPE */
@@ -293,28 +300,27 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
int fix_alignment(struct pt_regs *regs)
{
- unsigned int instr;
+ ppc_inst_t instr;
struct instruction_op op;
int r, type;
- /*
- * We require a complete register set, if not, then our assembly
- * is broken
- */
- CHECK_FULL_REGS(regs);
+ if (is_kernel_addr(regs->nip))
+ r = copy_inst_from_kernel_nofault(&instr, (void *)regs->nip);
+ else
+ r = __get_user_instr(instr, (void __user *)regs->nip);
- if (unlikely(__get_user(instr, (unsigned int __user *)regs->nip)))
+ if (unlikely(r))
return -EFAULT;
if ((regs->msr & MSR_LE) != (MSR_KERNEL & MSR_LE)) {
/* We don't handle PPC little-endian any more... */
if (cpu_has_feature(CPU_FTR_PPC_LE))
return -EIO;
- instr = swab32(instr);
+ instr = ppc_inst_swab(instr);
}
#ifdef CONFIG_SPE
- if ((instr >> 26) == 0x4) {
- int reg = (instr >> 21) & 0x1f;
+ if (ppc_inst_primary_opcode(instr) == 0x4) {
+ int reg = (ppc_inst_val(instr) >> 21) & 0x1f;
PPC_WARN_ALIGNMENT(spe, regs);
return emulate_spe(regs, reg, instr);
}
@@ -331,7 +337,7 @@ int fix_alignment(struct pt_regs *regs)
* when pasting to a co-processor. Furthermore, paste_last is the
* synchronisation point for preceding copy/paste sequences.
*/
- if ((instr & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
+ if ((ppc_inst_val(instr) & 0xfc0006fe) == (PPC_INST_COPY & 0xfc0006fe))
return -EIO;
r = analyse_instr(&op, regs, instr);
@@ -343,6 +349,7 @@ int fix_alignment(struct pt_regs *regs)
if (op.type != CACHEOP + DCBZ)
return -EINVAL;
PPC_WARN_ALIGNMENT(dcbz, regs);
+ WARN_ON_ONCE(!user_mode(regs));
r = emulate_dcbz(op.ea, regs);
} else {
if (type == LARX || type == STCX)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index fcf24a365fc0..4ce2a4aa3985 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -9,8 +9,6 @@
* #defines from the assembly-language output.
*/
-#define GENERATING_ASM_OFFSETS /* asm/smp.h */
-
#include <linux/compat.h>
#include <linux/signal.h>
#include <linux/sched.h>
@@ -30,7 +28,6 @@
#include <asm/io.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
@@ -57,12 +54,12 @@
#endif
#ifdef CONFIG_PPC32
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
#include "head_booke.h"
#endif
#endif
-#if defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_PPC_E500)
#include "../mm/mmu_decl.h"
#endif
@@ -70,6 +67,10 @@
#include <asm/fixmap.h>
#endif
+#ifdef CONFIG_XMON
+#include "../xmon/xmon_bpts.h"
+#endif
+
#define STACK_PT_REGS_OFFSET(sym, val) \
DEFINE(sym, STACK_FRAME_OVERHEAD + offsetof(struct pt_regs, val))
@@ -83,22 +84,17 @@ int main(void)
OFFSET(PACA_CANARY, paca_struct, canary);
#endif
#endif
- OFFSET(MMCONTEXTID, mm_struct, context.id);
-#ifdef CONFIG_PPC64
- DEFINE(SIGSEGV, SIGSEGV);
- DEFINE(NMI_MASK, NMI_MASK);
-#else
- OFFSET(KSP_LIMIT, thread_struct, ksp_limit);
+#ifdef CONFIG_PPC32
#ifdef CONFIG_PPC_RTAS
OFFSET(RTAS_SP, thread_struct, rtas_sp);
#endif
#endif /* CONFIG_PPC64 */
OFFSET(TASK_STACK, task_struct, stack);
#ifdef CONFIG_SMP
- OFFSET(TASK_CPU, task_struct, cpu);
+ OFFSET(TASK_CPU, task_struct, thread_info.cpu);
#endif
-#ifdef CONFIG_LIVEPATCH
+#ifdef CONFIG_LIVEPATCH_64
OFFSET(TI_livepatch_sp, thread_info, livepatch_sp);
#endif
@@ -107,15 +103,16 @@ int main(void)
#ifdef CONFIG_BOOKE
OFFSET(THREAD_NORMSAVES, thread_struct, normsave[0]);
#endif
+#ifdef CONFIG_PPC_FPU
OFFSET(THREAD_FPEXC_MODE, thread_struct, fpexc_mode);
OFFSET(THREAD_FPSTATE, thread_struct, fp_state.fpr);
OFFSET(THREAD_FPSAVEAREA, thread_struct, fp_save_area);
+#endif
OFFSET(FPSTATE_FPSCR, thread_fp_state, fpscr);
OFFSET(THREAD_LOAD_FP, thread_struct, load_fp);
#ifdef CONFIG_ALTIVEC
OFFSET(THREAD_VRSTATE, thread_struct, vr_state.vr);
OFFSET(THREAD_VRSAVEAREA, thread_struct, vr_save_area);
- OFFSET(THREAD_VRSAVE, thread_struct, vrsave);
OFFSET(THREAD_USED_VR, thread_struct, used_vr);
OFFSET(VRSTATE_VSCR, thread_vr_state, vscr);
OFFSET(THREAD_LOAD_VEC, thread_struct, load_vec);
@@ -127,7 +124,6 @@ int main(void)
OFFSET(KSP_VSID, thread_struct, ksp_vsid);
#else /* CONFIG_PPC64 */
OFFSET(PGDIR, thread_struct, pgdir);
-#ifdef CONFIG_VMAP_STACK
OFFSET(SRR0, thread_struct, srr0);
OFFSET(SRR1, thread_struct, srr1);
OFFSET(DAR, thread_struct, dar);
@@ -143,27 +139,20 @@ int main(void)
OFFSET(THR11, thread_struct, r11);
OFFSET(THLR, thread_struct, lr);
OFFSET(THCTR, thread_struct, ctr);
-#endif
+ OFFSET(THSR0, thread_struct, sr0);
#endif
#ifdef CONFIG_SPE
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
OFFSET(THREAD_ACC, thread_struct, acc);
- OFFSET(THREAD_SPEFSCR, thread_struct, spefscr);
OFFSET(THREAD_USED_SPE, thread_struct, used_spe);
#endif /* CONFIG_SPE */
#endif /* CONFIG_PPC64 */
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- OFFSET(THREAD_DBCR0, thread_struct, debug.dbcr0);
-#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
OFFSET(THREAD_KVM_SVCPU, thread_struct, kvm_shadow_vcpu);
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
#endif
-#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
- OFFSET(KUAP, thread_struct, kuap);
-#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
@@ -173,6 +162,7 @@ int main(void)
OFFSET(THREAD_TM_TAR, thread_struct, tm_tar);
OFFSET(THREAD_TM_PPR, thread_struct, tm_ppr);
OFFSET(THREAD_TM_DSCR, thread_struct, tm_dscr);
+ OFFSET(THREAD_TM_AMR, thread_struct, tm_amr);
OFFSET(PT_CKPT_REGS, thread_struct, ckpt_regs);
OFFSET(THREAD_CKVRSTATE, thread_struct, ckvr_state.vr);
OFFSET(THREAD_CKVRSAVE, thread_struct, ckvrsave);
@@ -182,19 +172,12 @@ int main(void)
sizeof(struct pt_regs) + 16);
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
- OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
- OFFSET(TI_PREEMPT, thread_info, preempt_count);
#ifdef CONFIG_PPC64
OFFSET(DCACHEL1BLOCKSIZE, ppc64_caches, l1d.block_size);
OFFSET(DCACHEL1LOGBLOCKSIZE, ppc64_caches, l1d.log_block_size);
- OFFSET(DCACHEL1BLOCKSPERPAGE, ppc64_caches, l1d.blocks_per_page);
- OFFSET(ICACHEL1BLOCKSIZE, ppc64_caches, l1i.block_size);
- OFFSET(ICACHEL1LOGBLOCKSIZE, ppc64_caches, l1i.log_block_size);
- OFFSET(ICACHEL1BLOCKSPERPAGE, ppc64_caches, l1i.blocks_per_page);
/* paca */
- DEFINE(PACA_SIZE, sizeof(struct paca_struct));
OFFSET(PACAPACAINDEX, paca_struct, paca_index);
OFFSET(PACAPROCSTART, paca_struct, cpu_start);
OFFSET(PACAKSAVE, paca_struct, kstack);
@@ -206,20 +189,15 @@ int main(void)
OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase);
OFFSET(PACAKMSR, paca_struct, kernel_msr);
+#ifdef CONFIG_PPC_BOOK3S_64
+ OFFSET(PACAHSRR_VALID, paca_struct, hsrr_valid);
+ OFFSET(PACASRR_VALID, paca_struct, srr_valid);
+#endif
OFFSET(PACAIRQSOFTMASK, paca_struct, irq_soft_mask);
OFFSET(PACAIRQHAPPENED, paca_struct, irq_happened);
OFFSET(PACA_FTRACE_ENABLED, paca_struct, ftrace_enabled);
-#ifdef CONFIG_PPC_BOOK3S
- OFFSET(PACACONTEXTID, paca_struct, mm_ctx_id);
-#ifdef CONFIG_PPC_MM_SLICES
- OFFSET(PACALOWSLICESPSIZE, paca_struct, mm_ctx_low_slices_psize);
- OFFSET(PACAHIGHSLICEPSIZE, paca_struct, mm_ctx_high_slices_psize);
- OFFSET(PACA_SLB_ADDR_LIMIT, paca_struct, mm_ctx_slb_addr_limit);
- DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
-#endif /* CONFIG_PPC_MM_SLICES */
-#endif
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
OFFSET(PACAPGD, paca_struct, pgd);
OFFSET(PACA_KERNELPGD, paca_struct, kernel_pgd);
OFFSET(PACA_EXGEN, paca_struct, exgen);
@@ -235,36 +213,23 @@ int main(void)
OFFSET(TCD_ESEL_NEXT, tlb_core_data, esel_next);
OFFSET(TCD_ESEL_MAX, tlb_core_data, esel_max);
OFFSET(TCD_ESEL_FIRST, tlb_core_data, esel_first);
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_BOOK3E_64 */
#ifdef CONFIG_PPC_BOOK3S_64
- OFFSET(PACASLBCACHE, paca_struct, slb_cache);
- OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr);
- OFFSET(PACASTABRR, paca_struct, stab_rr);
- OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp);
-#ifdef CONFIG_PPC_MM_SLICES
- OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp);
-#else
- OFFSET(PACACONTEXTSLLP, paca_struct, mm_ctx_sllp);
-#endif /* CONFIG_PPC_MM_SLICES */
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXMC, paca_struct, exmc);
- OFFSET(PACA_EXSLB, paca_struct, exslb);
OFFSET(PACA_EXNMI, paca_struct, exnmi);
-#ifdef CONFIG_PPC_PSERIES
- OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
-#endif
+#ifdef CONFIG_PPC_64S_HASH_MMU
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
+#endif
OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
#endif
- OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx);
OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
- OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx);
#endif /* CONFIG_PPC_BOOK3S_64 */
OFFSET(PACAEMERGSP, paca_struct, emergency_sp);
#ifdef CONFIG_PPC_BOOK3S_64
@@ -280,21 +245,14 @@ int main(void)
OFFSET(PACAHWCPUID, paca_struct, hw_cpu_id);
OFFSET(PACAKEXECSTATE, paca_struct, kexec_state);
OFFSET(PACA_DSCR_DEFAULT, paca_struct, dscr_default);
- OFFSET(ACCOUNT_STARTTIME, paca_struct, accounting.starttime);
- OFFSET(ACCOUNT_STARTTIME_USER, paca_struct, accounting.starttime_user);
- OFFSET(ACCOUNT_USER_TIME, paca_struct, accounting.utime);
- OFFSET(ACCOUNT_SYSTEM_TIME, paca_struct, accounting.stime);
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC64
+ OFFSET(PACA_EXIT_SAVE_R1, paca_struct, exit_save_r1);
+#endif
+#ifdef CONFIG_PPC_BOOK3E_64
OFFSET(PACA_TRAP_SAVE, paca_struct, trap_save);
#endif
OFFSET(PACA_SPRG_VDSO, paca_struct, sprg_vdso);
#else /* CONFIG_PPC64 */
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
- OFFSET(ACCOUNT_STARTTIME, thread_info, accounting.starttime);
- OFFSET(ACCOUNT_STARTTIME_USER, thread_info, accounting.starttime_user);
- OFFSET(ACCOUNT_USER_TIME, thread_info, accounting.utime);
- OFFSET(ACCOUNT_SYSTEM_TIME, thread_info, accounting.stime);
-#endif
#endif /* CONFIG_PPC64 */
/* RTAS */
@@ -303,7 +261,7 @@ int main(void)
/* Interrupt register frame */
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
- DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
+ DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_WITH_PT_REGS);
STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
@@ -318,9 +276,6 @@ int main(void)
STACK_PT_REGS_OFFSET(GPR11, gpr[11]);
STACK_PT_REGS_OFFSET(GPR12, gpr[12]);
STACK_PT_REGS_OFFSET(GPR13, gpr[13]);
-#ifndef CONFIG_PPC64
- STACK_PT_REGS_OFFSET(GPR14, gpr[14]);
-#endif /* CONFIG_PPC64 */
/*
* Note: these symbols include _ because they overlap with special
* register names
@@ -332,52 +287,38 @@ int main(void)
STACK_PT_REGS_OFFSET(_CCR, ccr);
STACK_PT_REGS_OFFSET(_XER, xer);
STACK_PT_REGS_OFFSET(_DAR, dar);
+ STACK_PT_REGS_OFFSET(_DEAR, dear);
STACK_PT_REGS_OFFSET(_DSISR, dsisr);
+ STACK_PT_REGS_OFFSET(_ESR, esr);
STACK_PT_REGS_OFFSET(ORIG_GPR3, orig_gpr3);
STACK_PT_REGS_OFFSET(RESULT, result);
STACK_PT_REGS_OFFSET(_TRAP, trap);
-#ifndef CONFIG_PPC64
- /*
- * The PowerPC 400-class & Book-E processors have neither the DAR
- * nor the DSISR SPRs. Hence, we overload them to hold the similar
- * DEAR and ESR SPRs for such processors. For critical interrupts
- * we use them to hold SRR0 and SRR1.
- */
- STACK_PT_REGS_OFFSET(_DEAR, dar);
- STACK_PT_REGS_OFFSET(_ESR, dsisr);
-#else /* CONFIG_PPC64 */
+#ifdef CONFIG_PPC64
STACK_PT_REGS_OFFSET(SOFTE, softe);
STACK_PT_REGS_OFFSET(_PPR, ppr);
-#endif /* CONFIG_PPC64 */
+#endif
-#ifdef CONFIG_PPC_KUAP
- STACK_PT_REGS_OFFSET(STACK_REGS_KUAP, kuap);
+#ifdef CONFIG_PPC_PKEY
+ STACK_PT_REGS_OFFSET(STACK_REGS_AMR, amr);
+ STACK_PT_REGS_OFFSET(STACK_REGS_IAMR, iamr);
#endif
-#if defined(CONFIG_PPC32)
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
- DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE);
- DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
+#if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
+ STACK_PT_REGS_OFFSET(MAS0, mas0);
/* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */
- DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0));
- DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1));
- DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2));
- DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3));
- DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6));
- DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7));
- DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0));
- DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1));
- DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0));
- DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1));
- DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0));
- DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1));
- DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit));
+ STACK_PT_REGS_OFFSET(MMUCR, mas0);
+ STACK_PT_REGS_OFFSET(MAS1, mas1);
+ STACK_PT_REGS_OFFSET(MAS2, mas2);
+ STACK_PT_REGS_OFFSET(MAS3, mas3);
+ STACK_PT_REGS_OFFSET(MAS6, mas6);
+ STACK_PT_REGS_OFFSET(MAS7, mas7);
+ STACK_PT_REGS_OFFSET(_SRR0, srr0);
+ STACK_PT_REGS_OFFSET(_SRR1, srr1);
+ STACK_PT_REGS_OFFSET(_CSRR0, csrr0);
+ STACK_PT_REGS_OFFSET(_CSRR1, csrr1);
+ STACK_PT_REGS_OFFSET(_DSRR0, dsrr0);
+ STACK_PT_REGS_OFFSET(_DSRR1, dsrr1);
#endif
-#endif
-
-#ifndef CONFIG_PPC64
- OFFSET(MM_PGD, mm_struct, pgd);
-#endif /* ! CONFIG_PPC64 */
/* About the CPU features table */
OFFSET(CPU_SPEC_FEATURES, cpu_spec, cpu_features);
@@ -394,59 +335,23 @@ int main(void)
#endif /* ! CONFIG_PPC64 */
/* datapage offsets for use by vdso */
- OFFSET(CFG_TB_ORIG_STAMP, vdso_data, tb_orig_stamp);
- OFFSET(CFG_TB_TICKS_PER_SEC, vdso_data, tb_ticks_per_sec);
- OFFSET(CFG_TB_TO_XS, vdso_data, tb_to_xs);
- OFFSET(CFG_TB_UPDATE_COUNT, vdso_data, tb_update_count);
- OFFSET(CFG_TZ_MINUTEWEST, vdso_data, tz_minuteswest);
- OFFSET(CFG_TZ_DSTTIME, vdso_data, tz_dsttime);
- OFFSET(CFG_SYSCALL_MAP32, vdso_data, syscall_map_32);
- OFFSET(WTOM_CLOCK_SEC, vdso_data, wtom_clock_sec);
- OFFSET(WTOM_CLOCK_NSEC, vdso_data, wtom_clock_nsec);
- OFFSET(STAMP_XTIME_SEC, vdso_data, stamp_xtime_sec);
- OFFSET(STAMP_XTIME_NSEC, vdso_data, stamp_xtime_nsec);
- OFFSET(STAMP_SEC_FRAC, vdso_data, stamp_sec_fraction);
- OFFSET(CLOCK_HRTIMER_RES, vdso_data, hrtimer_res);
+ OFFSET(VDSO_DATA_OFFSET, vdso_arch_data, data);
+ OFFSET(CFG_TB_TICKS_PER_SEC, vdso_arch_data, tb_ticks_per_sec);
#ifdef CONFIG_PPC64
- OFFSET(CFG_ICACHE_BLOCKSZ, vdso_data, icache_block_size);
- OFFSET(CFG_DCACHE_BLOCKSZ, vdso_data, dcache_block_size);
- OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_data, icache_log_block_size);
- OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_data, dcache_log_block_size);
- OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64);
- OFFSET(TVAL64_TV_SEC, __kernel_old_timeval, tv_sec);
- OFFSET(TVAL64_TV_USEC, __kernel_old_timeval, tv_usec);
-#endif
- OFFSET(TSPC64_TV_SEC, __kernel_timespec, tv_sec);
- OFFSET(TSPC64_TV_NSEC, __kernel_timespec, tv_nsec);
- OFFSET(TVAL32_TV_SEC, old_timeval32, tv_sec);
- OFFSET(TVAL32_TV_USEC, old_timeval32, tv_usec);
- OFFSET(TSPC32_TV_SEC, old_timespec32, tv_sec);
- OFFSET(TSPC32_TV_NSEC, old_timespec32, tv_nsec);
- /* timeval/timezone offsets for use by vdso */
- OFFSET(TZONE_TZ_MINWEST, timezone, tz_minuteswest);
- OFFSET(TZONE_TZ_DSTTIME, timezone, tz_dsttime);
-
- /* Other bits used by the vdso */
- DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
- DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
- DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
- DEFINE(CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE);
- DEFINE(CLOCK_MAX, CLOCK_TAI);
- DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
- DEFINE(EINVAL, EINVAL);
- DEFINE(KTIME_LOW_RES, KTIME_LOW_RES);
+ OFFSET(CFG_ICACHE_BLOCKSZ, vdso_arch_data, icache_block_size);
+ OFFSET(CFG_DCACHE_BLOCKSZ, vdso_arch_data, dcache_block_size);
+ OFFSET(CFG_ICACHE_LOGBLOCKSZ, vdso_arch_data, icache_log_block_size);
+ OFFSET(CFG_DCACHE_LOGBLOCKSZ, vdso_arch_data, dcache_log_block_size);
+ OFFSET(CFG_SYSCALL_MAP64, vdso_arch_data, syscall_map);
+ OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, compat_syscall_map);
+#else
+ OFFSET(CFG_SYSCALL_MAP32, vdso_arch_data, syscall_map);
+#endif
#ifdef CONFIG_BUG
DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- DEFINE(PGD_TABLE_SIZE, (sizeof(pgd_t) << max(RADIX_PGD_INDEX_SIZE, H_PGD_INDEX_SIZE)));
-#else
- DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
-#endif
- DEFINE(PTE_SIZE, sizeof(pte_t));
-
#ifdef CONFIG_KVM
OFFSET(VCPU_HOST_STACK, kvm_vcpu, arch.host_stack);
OFFSET(VCPU_HOST_PID, kvm_vcpu, arch.host_pid);
@@ -474,7 +379,7 @@ int main(void)
OFFSET(VCPU_SPRG2, kvm_vcpu, arch.shregs.sprg2);
OFFSET(VCPU_SPRG3, kvm_vcpu, arch.shregs.sprg3);
#endif
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
OFFSET(VCPU_TB_RMENTRY, kvm_vcpu, arch.rm_entry);
OFFSET(VCPU_TB_RMINTR, kvm_vcpu, arch.rm_intr);
OFFSET(VCPU_TB_RMEXIT, kvm_vcpu, arch.rm_exit);
@@ -518,11 +423,9 @@ int main(void)
OFFSET(KVM_HOST_LPID, kvm, arch.host_lpid);
OFFSET(KVM_HOST_LPCR, kvm, arch.host_lpcr);
OFFSET(KVM_HOST_SDR1, kvm, arch.host_sdr1);
- OFFSET(KVM_NEED_FLUSH, kvm, arch.need_tlb_flush.bits);
OFFSET(KVM_ENABLED_HCALLS, kvm, arch.enabled_hcalls);
OFFSET(KVM_VRMA_SLB_V, kvm, arch.vrma_slb_v);
OFFSET(KVM_RADIX, kvm, arch.radix);
- OFFSET(KVM_FWNMI, kvm, arch.fwnmi_enabled);
OFFSET(KVM_SECURE_GUEST, kvm, arch.secure_guest);
OFFSET(VCPU_DSISR, kvm_vcpu, arch.shregs.dsisr);
OFFSET(VCPU_DAR, kvm_vcpu, arch.shregs.dar);
@@ -544,11 +447,12 @@ int main(void)
OFFSET(VCPU_CTRL, kvm_vcpu, arch.ctrl);
OFFSET(VCPU_DABR, kvm_vcpu, arch.dabr);
OFFSET(VCPU_DABRX, kvm_vcpu, arch.dabrx);
- OFFSET(VCPU_DAWR, kvm_vcpu, arch.dawr);
- OFFSET(VCPU_DAWRX, kvm_vcpu, arch.dawrx);
+ OFFSET(VCPU_DAWR0, kvm_vcpu, arch.dawr0);
+ OFFSET(VCPU_DAWRX0, kvm_vcpu, arch.dawrx0);
+ OFFSET(VCPU_DAWR1, kvm_vcpu, arch.dawr1);
+ OFFSET(VCPU_DAWRX1, kvm_vcpu, arch.dawrx1);
OFFSET(VCPU_CIABR, kvm_vcpu, arch.ciabr);
OFFSET(VCPU_HFLAGS, kvm_vcpu, arch.hflags);
- OFFSET(VCPU_DEC, kvm_vcpu, arch.dec);
OFFSET(VCPU_DEC_EXPIRES, kvm_vcpu, arch.dec_expires);
OFFSET(VCPU_PENDING_EXC, kvm_vcpu, arch.pending_exceptions);
OFFSET(VCPU_CEDED, kvm_vcpu, arch.ceded);
@@ -556,8 +460,9 @@ int main(void)
OFFSET(VCPU_IRQ_PENDING, kvm_vcpu, arch.irq_pending);
OFFSET(VCPU_DBELL_REQ, kvm_vcpu, arch.doorbell_request);
OFFSET(VCPU_MMCR, kvm_vcpu, arch.mmcr);
+ OFFSET(VCPU_MMCRA, kvm_vcpu, arch.mmcra);
+ OFFSET(VCPU_MMCRS, kvm_vcpu, arch.mmcrs);
OFFSET(VCPU_PMC, kvm_vcpu, arch.pmc);
- OFFSET(VCPU_SPMC, kvm_vcpu, arch.spmc);
OFFSET(VCPU_SIAR, kvm_vcpu, arch.siar);
OFFSET(VCPU_SDAR, kvm_vcpu, arch.sdar);
OFFSET(VCPU_SIER, kvm_vcpu, arch.sier);
@@ -566,7 +471,6 @@ int main(void)
OFFSET(VCPU_SLB_NR, kvm_vcpu, arch.slb_nr);
OFFSET(VCPU_FAULT_DSISR, kvm_vcpu, arch.fault_dsisr);
OFFSET(VCPU_FAULT_DAR, kvm_vcpu, arch.fault_dar);
- OFFSET(VCPU_FAULT_GPA, kvm_vcpu, arch.fault_gpa);
OFFSET(VCPU_INTR_MSR, kvm_vcpu, arch.intr_msr);
OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
OFFSET(VCPU_TRAP, kvm_vcpu, arch.trap);
@@ -678,13 +582,10 @@ int main(void)
HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
- HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
- HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
HSTATE_FIELD(HSTATE_PTID, ptid);
- HSTATE_FIELD(HSTATE_TID, tid);
HSTATE_FIELD(HSTATE_FAKE_SUSPEND, fake_suspend);
HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]);
HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]);
@@ -693,6 +594,9 @@ int main(void)
HSTATE_FIELD(HSTATE_SDAR, host_mmcr[4]);
HSTATE_FIELD(HSTATE_MMCR2, host_mmcr[5]);
HSTATE_FIELD(HSTATE_SIER, host_mmcr[6]);
+ HSTATE_FIELD(HSTATE_MMCR3, host_mmcr[7]);
+ HSTATE_FIELD(HSTATE_SIER2, host_mmcr[8]);
+ HSTATE_FIELD(HSTATE_SIER3, host_mmcr[9]);
HSTATE_FIELD(HSTATE_PMC1, host_pmc[0]);
HSTATE_FIELD(HSTATE_PMC2, host_pmc[1]);
HSTATE_FIELD(HSTATE_PMC3, host_pmc[2]);
@@ -711,8 +615,6 @@ int main(void)
OFFSET(KVM_SPLIT_LDBAR, kvm_split_mode, ldbar);
OFFSET(KVM_SPLIT_DO_NAP, kvm_split_mode, do_nap);
OFFSET(KVM_SPLIT_NAPPED, kvm_split_mode, napped);
- OFFSET(KVM_SPLIT_DO_SET, kvm_split_mode, do_set);
- OFFSET(KVM_SPLIT_DO_RESTORE, kvm_split_mode, do_restore);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_PPC_BOOK3S_64
@@ -749,7 +651,7 @@ int main(void)
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
OFFSET(TLBCAM_MAS0, tlbcam, MAS0);
OFFSET(TLBCAM_MAS1, tlbcam, MAS1);
@@ -789,11 +691,14 @@ int main(void)
#endif
DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER);
- DEFINE(PPC_DBELL_MSGTYPE, PPC_DBELL_MSGTYPE);
#ifdef CONFIG_PPC_8xx
DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE));
#endif
+#ifdef CONFIG_XMON
+ DEFINE(BPT_SIZE, BPT_SIZE);
+#endif
+
return 0;
}
diff --git a/arch/powerpc/kernel/audit.c b/arch/powerpc/kernel/audit.c
index a2dddd7f3d09..1bcfca5fdf67 100644
--- a/arch/powerpc/kernel/audit.c
+++ b/arch/powerpc/kernel/audit.c
@@ -47,15 +47,17 @@ int audit_classify_syscall(int abi, unsigned syscall)
#endif
switch(syscall) {
case __NR_open:
- return 2;
+ return AUDITSC_OPEN;
case __NR_openat:
- return 3;
+ return AUDITSC_OPENAT;
case __NR_socketcall:
- return 4;
+ return AUDITSC_SOCKETCALL;
case __NR_execve:
- return 5;
+ return AUDITSC_EXECVE;
+ case __NR_openat2:
+ return AUDITSC_OPENAT2;
default:
- return 0;
+ return AUDITSC_NATIVE;
}
}
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 6dfceaa820e4..2769889219bf 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -9,13 +9,13 @@
#include <linux/init.h>
#include <linux/export.h>
#include <linux/memblock.h>
+#include <linux/pgtable.h>
+#include <linux/of.h>
#include <asm/sections.h>
-#include <asm/prom.h>
#include <asm/btext.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/udbg.h>
@@ -26,7 +26,7 @@
static void scrollscreen(void);
#endif
-#define __force_data __attribute__((__section__(".data")))
+#define __force_data __section(".data")
static int g_loc_X __force_data;
static int g_loc_Y __force_data;
@@ -45,8 +45,7 @@ unsigned long disp_BAT[2] __initdata = {0, 0};
static unsigned char vga_font[cmapsz];
-int boot_text_mapped __force_data = 0;
-int force_printk_to_btext = 0;
+static int boot_text_mapped __force_data;
extern void rmci_on(void);
extern void rmci_off(void);
@@ -74,7 +73,7 @@ static inline void rmci_maybe_off(void)
* the display during identify_machine() and MMU_Init()
*
* The display is mapped to virtual address 0xD0000000, rather
- * than 1:1, because some some CHRP machines put the frame buffer
+ * than 1:1, because some CHRP machines put the frame buffer
* in the region starting at 0xC0000000 (PAGE_OFFSET).
* This mapping is temporary and will disappear as soon as the
* setup done by MMU_Init() is applied.
@@ -95,19 +94,10 @@ void __init btext_prepare_BAT(void)
boot_text_mapped = 0;
return;
}
- if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
- /* 603, 604, G3, G4, ... */
- lowbits = addr & ~0xFF000000UL;
- addr &= 0xFF000000UL;
- disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
- disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW);
- } else {
- /* 601 */
- lowbits = addr & ~0xFF800000UL;
- addr &= 0xFF800000UL;
- disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4;
- disp_BAT[1] = addr | BL_8M | 0x40;
- }
+ lowbits = addr & ~0xFF000000UL;
+ addr &= 0xFF000000UL;
+ disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
+ disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW);
logicalDisplayBase = (void *) (vaddr + lowbits);
}
#endif
@@ -170,7 +160,7 @@ void btext_map(void)
boot_text_mapped = 1;
}
-static int btext_initialize(struct device_node *np)
+static int __init btext_initialize(struct device_node *np)
{
unsigned int width, height, depth, pitch;
unsigned long address = 0;
@@ -250,8 +240,10 @@ int __init btext_find_display(int allow_nonstdout)
rc = btext_initialize(np);
printk("result: %d\n", rc);
}
- if (rc == 0)
+ if (rc == 0) {
+ of_node_put(np);
break;
+ }
}
return rc;
}
@@ -299,7 +291,7 @@ void btext_update_display(unsigned long phys, int width, int height,
}
EXPORT_SYMBOL(btext_update_display);
-void btext_clearscreen(void)
+void __init btext_clearscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
@@ -317,7 +309,7 @@ void btext_clearscreen(void)
rmci_maybe_off();
}
-void btext_flushscreen(void)
+void __init btext_flushscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
@@ -336,7 +328,7 @@ void btext_flushscreen(void)
__asm__ __volatile__ ("sync" ::: "memory");
}
-void btext_flushline(void)
+void __init btext_flushline(void)
{
unsigned int *base = (unsigned int *)calc_base(0, g_loc_Y << 4);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
@@ -551,7 +543,7 @@ void btext_drawstring(const char *c)
btext_drawchar(*c++);
}
-void btext_drawtext(const char *c, unsigned int len)
+void __init btext_drawtext(const char *c, unsigned int len)
{
if (!boot_text_mapped)
return;
@@ -559,7 +551,7 @@ void btext_drawtext(const char *c, unsigned int len)
btext_drawchar(*c++);
}
-void btext_drawhex(unsigned long v)
+void __init btext_drawhex(unsigned long v)
{
if (!boot_text_mapped)
return;
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 470336277c67..f502337dd37d 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -7,6 +7,8 @@
* Author: Nathan Lynch
*/
+#define pr_fmt(fmt) "cacheinfo: " fmt
+
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
@@ -16,7 +18,6 @@
#include <linux/of.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <asm/prom.h>
#include <asm/cputhreads.h>
#include <asm/smp.h>
@@ -118,6 +119,7 @@ struct cache {
struct cpumask shared_cpu_map; /* online CPUs using this cache */
int type; /* split cache disambiguation */
int level; /* level not explicit in device tree */
+ int group_id; /* id of the group of threads that share this cache */
struct list_head list; /* global list of cache objects */
struct cache *next_local; /* next cache of >= level */
};
@@ -140,22 +142,24 @@ static const char *cache_type_string(const struct cache *cache)
}
static void cache_init(struct cache *cache, int type, int level,
- struct device_node *ofnode)
+ struct device_node *ofnode, int group_id)
{
cache->type = type;
cache->level = level;
cache->ofnode = of_node_get(ofnode);
+ cache->group_id = group_id;
INIT_LIST_HEAD(&cache->list);
list_add(&cache->list, &cache_list);
}
-static struct cache *new_cache(int type, int level, struct device_node *ofnode)
+static struct cache *new_cache(int type, int level,
+ struct device_node *ofnode, int group_id)
{
struct cache *cache;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (cache)
- cache_init(cache, type, level, ofnode);
+ cache_init(cache, type, level, ofnode, group_id);
return cache;
}
@@ -166,7 +170,7 @@ static void release_cache_debugcheck(struct cache *cache)
list_for_each_entry(iter, &cache_list, list)
WARN_ONCE(iter->next_local == cache,
- "cache for %pOF(%s) refers to cache for %pOF(%s)\n",
+ "cache for %pOFP(%s) refers to cache for %pOFP(%s)\n",
iter->ofnode,
cache_type_string(iter),
cache->ofnode,
@@ -178,7 +182,7 @@ static void release_cache(struct cache *cache)
if (!cache)
return;
- pr_debug("freeing L%d %s cache for %pOF\n", cache->level,
+ pr_debug("freeing L%d %s cache for %pOFP\n", cache->level,
cache_type_string(cache), cache->ofnode);
release_cache_debugcheck(cache);
@@ -193,7 +197,7 @@ static void cache_cpu_set(struct cache *cache, int cpu)
while (next) {
WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
- "CPU %i already accounted in %pOF(%s)\n",
+ "CPU %i already accounted in %pOFP(%s)\n",
cpu, next->ofnode,
cache_type_string(next));
cpumask_set_cpu(cpu, &next->shared_cpu_map);
@@ -307,20 +311,24 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
return cache;
list_for_each_entry(iter, &cache_list, list)
- if (iter->ofnode == cache->ofnode && iter->next_local == cache)
+ if (iter->ofnode == cache->ofnode &&
+ iter->group_id == cache->group_id &&
+ iter->next_local == cache)
return iter;
return cache;
}
-/* return the first cache on a local list matching node */
-static struct cache *cache_lookup_by_node(const struct device_node *node)
+/* return the first cache on a local list matching node and thread-group id */
+static struct cache *cache_lookup_by_node_group(const struct device_node *node,
+ int group_id)
{
struct cache *cache = NULL;
struct cache *iter;
list_for_each_entry(iter, &cache_list, list) {
- if (iter->ofnode != node)
+ if (iter->ofnode != node ||
+ iter->group_id != group_id)
continue;
cache = cache_find_first_sibling(iter);
break;
@@ -350,23 +358,24 @@ static int cache_is_unified_d(const struct device_node *np)
CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
}
-static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
+static struct cache *cache_do_one_devnode_unified(struct device_node *node, int group_id,
+ int level)
{
- pr_debug("creating L%d ucache for %pOF\n", level, node);
+ pr_debug("creating L%d ucache for %pOFP\n", level, node);
- return new_cache(cache_is_unified_d(node), level, node);
+ return new_cache(cache_is_unified_d(node), level, node, group_id);
}
-static struct cache *cache_do_one_devnode_split(struct device_node *node,
+static struct cache *cache_do_one_devnode_split(struct device_node *node, int group_id,
int level)
{
struct cache *dcache, *icache;
- pr_debug("creating L%d dcache and icache for %pOF\n", level,
+ pr_debug("creating L%d dcache and icache for %pOFP\n", level,
node);
- dcache = new_cache(CACHE_TYPE_DATA, level, node);
- icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
+ dcache = new_cache(CACHE_TYPE_DATA, level, node, group_id);
+ icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node, group_id);
if (!dcache || !icache)
goto err;
@@ -380,31 +389,32 @@ err:
return NULL;
}
-static struct cache *cache_do_one_devnode(struct device_node *node, int level)
+static struct cache *cache_do_one_devnode(struct device_node *node, int group_id, int level)
{
struct cache *cache;
if (cache_node_is_unified(node))
- cache = cache_do_one_devnode_unified(node, level);
+ cache = cache_do_one_devnode_unified(node, group_id, level);
else
- cache = cache_do_one_devnode_split(node, level);
+ cache = cache_do_one_devnode_split(node, group_id, level);
return cache;
}
static struct cache *cache_lookup_or_instantiate(struct device_node *node,
+ int group_id,
int level)
{
struct cache *cache;
- cache = cache_lookup_by_node(node);
+ cache = cache_lookup_by_node_group(node, group_id);
WARN_ONCE(cache && cache->level != level,
"cache level mismatch on lookup (got %d, expected %d)\n",
cache->level, level);
if (!cache)
- cache = cache_do_one_devnode(node, level);
+ cache = cache_do_one_devnode(node, group_id, level);
return cache;
}
@@ -418,15 +428,53 @@ static void link_cache_lists(struct cache *smaller, struct cache *bigger)
}
smaller->next_local = bigger;
+
+ /*
+ * The cache->next_local list sorts by level ascending:
+ * L1d -> L1i -> L2 -> L3 ...
+ */
+ WARN_ONCE((smaller->level == 1 && bigger->level > 2) ||
+ (smaller->level > 1 && bigger->level != smaller->level + 1),
+ "linking L%i cache %pOFP to L%i cache %pOFP; skipped a level?\n",
+ smaller->level, smaller->ofnode, bigger->level, bigger->ofnode);
}
static void do_subsidiary_caches_debugcheck(struct cache *cache)
{
- WARN_ON_ONCE(cache->level != 1);
- WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu"));
+ WARN_ONCE(cache->level != 1,
+ "instantiating cache chain from L%d %s cache for "
+ "%pOFP instead of an L1\n", cache->level,
+ cache_type_string(cache), cache->ofnode);
+ WARN_ONCE(!of_node_is_type(cache->ofnode, "cpu"),
+ "instantiating cache chain from node %pOFP of type '%s' "
+ "instead of a cpu node\n", cache->ofnode,
+ of_node_get_device_type(cache->ofnode));
}
-static void do_subsidiary_caches(struct cache *cache)
+/*
+ * If sub-groups of threads in a core containing @cpu_id share the
+ * L@level-cache (information obtained via "ibm,thread-groups"
+ * device-tree property), then we identify the group by the first
+ * thread-sibling in the group. We define this to be the group-id.
+ *
+ * In the absence of any thread-group information for L@level-cache,
+ * this function returns -1.
+ */
+static int get_group_id(unsigned int cpu_id, int level)
+{
+ if (has_big_cores && level == 1)
+ return cpumask_first(per_cpu(thread_group_l1_cache_map,
+ cpu_id));
+ else if (thread_group_shares_l2 && level == 2)
+ return cpumask_first(per_cpu(thread_group_l2_cache_map,
+ cpu_id));
+ else if (thread_group_shares_l3 && level == 3)
+ return cpumask_first(per_cpu(thread_group_l3_cache_map,
+ cpu_id));
+ return -1;
+}
+
+static void do_subsidiary_caches(struct cache *cache, unsigned int cpu_id)
{
struct device_node *subcache_node;
int level = cache->level;
@@ -435,9 +483,11 @@ static void do_subsidiary_caches(struct cache *cache)
while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
struct cache *subcache;
+ int group_id;
level++;
- subcache = cache_lookup_or_instantiate(subcache_node, level);
+ group_id = get_group_id(cpu_id, level);
+ subcache = cache_lookup_or_instantiate(subcache_node, group_id, level);
of_node_put(subcache_node);
if (!subcache)
break;
@@ -451,6 +501,7 @@ static struct cache *cache_chain_instantiate(unsigned int cpu_id)
{
struct device_node *cpu_node;
struct cache *cpu_cache = NULL;
+ int group_id;
pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
@@ -459,11 +510,13 @@ static struct cache *cache_chain_instantiate(unsigned int cpu_id)
if (!cpu_node)
goto out;
- cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
+ group_id = get_group_id(cpu_id, 1);
+
+ cpu_cache = cache_lookup_or_instantiate(cpu_node, group_id, 1);
if (!cpu_cache)
goto out;
- do_subsidiary_caches(cpu_cache);
+ do_subsidiary_caches(cpu_cache, cpu_id);
cache_cpu_set(cpu_cache, cpu_id);
out:
@@ -624,66 +677,49 @@ static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *
static struct kobj_attribute cache_level_attr =
__ATTR(level, 0444, level_show, NULL);
-static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
-{
- struct kobject *index_dir_kobj = &index->kobj;
- struct kobject *cache_dir_kobj = index_dir_kobj->parent;
- struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
- struct device *dev = kobj_to_dev(cpu_dev_kobj);
-
- return dev->id;
-}
-
-/*
- * On big-core systems, each core has two groups of CPUs each of which
- * has its own L1-cache. The thread-siblings which share l1-cache with
- * @cpu can be obtained via cpu_smallcore_mask().
- */
-static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
-{
- if (cache->level == 1)
- return cpu_smallcore_mask(cpu);
-
- return &cache->shared_cpu_map;
-}
-
-static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+static ssize_t
+show_shared_cpumap(struct kobject *k, struct kobj_attribute *attr, char *buf, bool list)
{
struct cache_index_dir *index;
struct cache *cache;
const struct cpumask *mask;
- int ret, cpu;
index = kobj_to_cache_index_dir(k);
cache = index->cache;
- if (has_big_cores) {
- cpu = index_dir_to_cpu(index);
- mask = get_big_core_shared_cpu_map(cpu, cache);
- } else {
- mask = &cache->shared_cpu_map;
- }
+ mask = &cache->shared_cpu_map;
- ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n",
- cpumask_pr_args(mask));
- buf[ret++] = '\n';
- buf[ret] = '\0';
- return ret;
+ return cpumap_print_to_pagebuf(list, buf, mask);
+}
+
+static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ return show_shared_cpumap(k, attr, buf, false);
+}
+
+static ssize_t shared_cpu_list_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
+{
+ return show_shared_cpumap(k, attr, buf, true);
}
static struct kobj_attribute cache_shared_cpu_map_attr =
__ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
+static struct kobj_attribute cache_shared_cpu_list_attr =
+ __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
+
/* Attributes which should always be created -- the kobject/sysfs core
- * does this automatically via kobj_type->default_attrs. This is the
+ * does this automatically via kobj_type->default_groups. This is the
* minimum data required to uniquely identify a cache.
*/
static struct attribute *cache_index_default_attrs[] = {
&cache_type_attr.attr,
&cache_level_attr.attr,
&cache_shared_cpu_map_attr.attr,
+ &cache_shared_cpu_list_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(cache_index_default);
/* Attributes which should be created if the cache device node has the
* right properties -- see cacheinfo_create_index_opt_attrs
@@ -702,7 +738,7 @@ static const struct sysfs_ops cache_index_ops = {
static struct kobj_type cache_index_type = {
.release = cache_index_release,
.sysfs_ops = &cache_index_ops,
- .default_attrs = cache_index_default_attrs,
+ .default_groups = cache_index_default_groups,
};
static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
@@ -733,13 +769,13 @@ static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
rc = attr->show(&dir->kobj, attr, buf);
if (rc <= 0) {
pr_debug("not creating %s attribute for "
- "%pOF(%s) (rc = %zd)\n",
+ "%pOFP(%s) (rc = %zd)\n",
attr->attr.name, cache->ofnode,
cache_type, rc);
continue;
}
if (sysfs_create_file(&dir->kobj, &attr->attr))
- pr_debug("could not create %s attribute for %pOF(%s)\n",
+ pr_debug("could not create %s attribute for %pOFP(%s)\n",
attr->attr.name, cache->ofnode, cache_type);
}
@@ -810,13 +846,15 @@ static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
{
struct device_node *cpu_node;
struct cache *cache;
+ int group_id;
cpu_node = of_get_cpu_node(cpu_id, NULL);
WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
if (!cpu_node)
return NULL;
- cache = cache_lookup_by_node(cpu_node);
+ group_id = get_group_id(cpu_id, 1);
+ cache = cache_lookup_by_node_group(cpu_node, group_id);
of_node_put(cpu_node);
return cache;
@@ -855,7 +893,7 @@ static void cache_cpu_clear(struct cache *cache, int cpu)
struct cache *next = cache->next_local;
WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
- "CPU %i not accounted in %pOF(%s)\n",
+ "CPU %i not accounted in %pOFP(%s)\n",
cpu, cache->ofnode,
cache_type_string(cache));
diff --git a/arch/powerpc/kernel/compat_audit.c b/arch/powerpc/kernel/compat_audit.c
index 55c6ccda0a85..d92ffe4e5dc1 100644
--- a/arch/powerpc/kernel/compat_audit.c
+++ b/arch/powerpc/kernel/compat_audit.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#undef __powerpc64__
+#include <linux/audit_arch.h>
#include <asm/unistd.h>
unsigned ppc32_dir_class[] = {
@@ -31,14 +32,16 @@ int ppc32_classify_syscall(unsigned syscall)
{
switch(syscall) {
case __NR_open:
- return 2;
+ return AUDITSC_OPEN;
case __NR_openat:
- return 3;
+ return AUDITSC_OPENAT;
case __NR_socketcall:
- return 4;
+ return AUDITSC_SOCKETCALL;
case __NR_execve:
- return 5;
+ return AUDITSC_EXECVE;
+ case __NR_openat2:
+ return AUDITSC_OPENAT2;
default:
- return 1;
+ return AUDITSC_COMPAT;
}
}
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index f6517f67265a..f8b5ff64b604 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -288,6 +288,7 @@ _GLOBAL(__init_fpu_registers)
mtmsr r10
isync
blr
+_ASM_NOKPROBE_SYMBOL(__init_fpu_registers)
/* Definitions for the table use to save CPU states */
@@ -483,4 +484,5 @@ _GLOBAL(__restore_cpu_setup)
1:
mtcr r7
blr
+_ASM_NOKPROBE_SYMBOL(__restore_cpu_setup)
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_e500.S
index 1d308780e0d3..2ab25161b0ad 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_e500.S
@@ -12,7 +12,7 @@
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
-#include <asm/nohash/mmu-book3e.h>
+#include <asm/nohash/mmu-e500.h>
#include <asm/asm-offsets.h>
#include <asm/mpc85xx.h>
@@ -108,16 +108,7 @@ _GLOBAL(__setup_cpu_e6500)
#endif /* CONFIG_PPC_E500MC */
#ifdef CONFIG_PPC32
-#ifdef CONFIG_E200
-_GLOBAL(__setup_cpu_e200)
- /* enable dedicated debug exception handling resources (Debug APU) */
- mfspr r3,SPRN_HID0
- ori r3,r3,HID0_DAPUEN@l
- mtspr SPRN_HID0,r3
- b __setup_e200_ivors
-#endif /* CONFIG_E200 */
-
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
#ifndef CONFIG_PPC_E500MC
_GLOBAL(__setup_cpu_e500v1)
_GLOBAL(__setup_cpu_e500v2)
@@ -165,7 +156,7 @@ _GLOBAL(__setup_cpu_e5500)
mtlr r5
blr
#endif /* CONFIG_PPC_E500MC */
-#endif /* CONFIG_E500 */
+#endif /* CONFIG_PPC_E500 */
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC_BOOK3E_64
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
deleted file mode 100644
index a460298c7ddb..000000000000
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ /dev/null
@@ -1,219 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * This file contains low level CPU setup functions.
- * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org)
- */
-
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/cputable.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/cache.h>
-#include <asm/book3s/64/mmu-hash.h>
-
-/* Entry: r3 = crap, r4 = ptr to cputable entry
- *
- * Note that we can be called twice for pseudo-PVRs
- */
-_GLOBAL(__setup_cpu_power7)
- mflr r11
- bl __init_hvmode_206
- mtlr r11
- beqlr
- li r0,0
- mtspr SPRN_LPID,r0
- LOAD_REG_IMMEDIATE(r0, PCR_MASK)
- mtspr SPRN_PCR,r0
- mfspr r3,SPRN_LPCR
- li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
- bl __init_LPCR_ISA206
- mtlr r11
- blr
-
-_GLOBAL(__restore_cpu_power7)
- mflr r11
- mfmsr r3
- rldicl. r0,r3,4,63
- beqlr
- li r0,0
- mtspr SPRN_LPID,r0
- LOAD_REG_IMMEDIATE(r0, PCR_MASK)
- mtspr SPRN_PCR,r0
- mfspr r3,SPRN_LPCR
- li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
- bl __init_LPCR_ISA206
- mtlr r11
- blr
-
-_GLOBAL(__setup_cpu_power8)
- mflr r11
- bl __init_FSCR
- bl __init_PMU
- bl __init_PMU_ISA207
- bl __init_hvmode_206
- mtlr r11
- beqlr
- li r0,0
- mtspr SPRN_LPID,r0
- LOAD_REG_IMMEDIATE(r0, PCR_MASK)
- mtspr SPRN_PCR,r0
- mfspr r3,SPRN_LPCR
- ori r3, r3, LPCR_PECEDH
- li r4,0 /* LPES = 0 */
- bl __init_LPCR_ISA206
- bl __init_HFSCR
- bl __init_PMU_HV
- bl __init_PMU_HV_ISA207
- mtlr r11
- blr
-
-_GLOBAL(__restore_cpu_power8)
- mflr r11
- bl __init_FSCR
- bl __init_PMU
- bl __init_PMU_ISA207
- mfmsr r3
- rldicl. r0,r3,4,63
- mtlr r11
- beqlr
- li r0,0
- mtspr SPRN_LPID,r0
- LOAD_REG_IMMEDIATE(r0, PCR_MASK)
- mtspr SPRN_PCR,r0
- mfspr r3,SPRN_LPCR
- ori r3, r3, LPCR_PECEDH
- li r4,0 /* LPES = 0 */
- bl __init_LPCR_ISA206
- bl __init_HFSCR
- bl __init_PMU_HV
- bl __init_PMU_HV_ISA207
- mtlr r11
- blr
-
-_GLOBAL(__setup_cpu_power9)
- mflr r11
- bl __init_FSCR
- bl __init_PMU
- bl __init_hvmode_206
- mtlr r11
- beqlr
- li r0,0
- mtspr SPRN_PSSCR,r0
- mtspr SPRN_LPID,r0
- mtspr SPRN_PID,r0
- LOAD_REG_IMMEDIATE(r0, PCR_MASK)
- mtspr SPRN_PCR,r0
- mfspr r3,SPRN_LPCR
- LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
- or r3, r3, r4
- LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
- andc r3, r3, r4
- li r4,0 /* LPES = 0 */
- bl __init_LPCR_ISA300
- bl __init_HFSCR
- bl __init_PMU_HV
- mtlr r11
- blr
-
-_GLOBAL(__restore_cpu_power9)
- mflr r11
- bl __init_FSCR
- bl __init_PMU
- mfmsr r3
- rldicl. r0,r3,4,63
- mtlr r11
- beqlr
- li r0,0
- mtspr SPRN_PSSCR,r0
- mtspr SPRN_LPID,r0
- mtspr SPRN_PID,r0
- LOAD_REG_IMMEDIATE(r0, PCR_MASK)
- mtspr SPRN_PCR,r0
- mfspr r3,SPRN_LPCR
- LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
- or r3, r3, r4
- LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
- andc r3, r3, r4
- li r4,0 /* LPES = 0 */
- bl __init_LPCR_ISA300
- bl __init_HFSCR
- bl __init_PMU_HV
- mtlr r11
- blr
-
-__init_hvmode_206:
- /* Disable CPU_FTR_HVMODE and exit if MSR:HV is not set */
- mfmsr r3
- rldicl. r0,r3,4,63
- bnelr
- ld r5,CPU_SPEC_FEATURES(r4)
- LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST)
- andc r5,r5,r6
- std r5,CPU_SPEC_FEATURES(r4)
- blr
-
-__init_LPCR_ISA206:
- /* Setup a sane LPCR:
- * Called with initial LPCR in R3 and desired LPES 2-bit value in R4
- *
- * LPES = 0b01 (HSRR0/1 used for 0x500)
- * PECE = 0b111
- * DPFD = 4
- * HDICE = 0
- * VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
- * VRMASD = 0b10000 (L=1, LP=00)
- *
- * Other bits untouched for now
- */
- li r5,0x10
- rldimi r3,r5, LPCR_VRMASD_SH, 64-LPCR_VRMASD_SH-5
-
- /* POWER9 has no VRMASD */
-__init_LPCR_ISA300:
- rldimi r3,r4, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
- ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
- li r5,4
- rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
- clrrdi r3,r3,1 /* clear HDICE */
- li r5,4
- rldimi r3,r5, LPCR_VC_SH, 0
- mtspr SPRN_LPCR,r3
- isync
- blr
-
-__init_FSCR:
- mfspr r3,SPRN_FSCR
- ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
- mtspr SPRN_FSCR,r3
- blr
-
-__init_HFSCR:
- mfspr r3,SPRN_HFSCR
- ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|\
- HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP
- mtspr SPRN_HFSCR,r3
- blr
-
-__init_PMU_HV:
- li r5,0
- mtspr SPRN_MMCRC,r5
- blr
-
-__init_PMU_HV_ISA207:
- li r5,0
- mtspr SPRN_MMCRH,r5
- blr
-
-__init_PMU:
- li r5,0
- mtspr SPRN_MMCRA,r5
- mtspr SPRN_MMCR0,r5
- mtspr SPRN_MMCR1,r5
- mtspr SPRN_MMCR2,r5
- blr
-
-__init_PMU_ISA207:
- li r5,0
- mtspr SPRN_MMCRS,r5
- blr
diff --git a/arch/powerpc/kernel/cpu_setup_power.c b/arch/powerpc/kernel/cpu_setup_power.c
new file mode 100644
index 000000000000..097c033668f0
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_setup_power.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2020, Jordan Niethe, IBM Corporation.
+ *
+ * This file contains low level CPU setup functions.
+ * Originally written in assembly by Benjamin Herrenschmidt & various other
+ * authors.
+ */
+
+#include <asm/reg.h>
+#include <asm/synch.h>
+#include <linux/bitops.h>
+#include <asm/cputable.h>
+#include <asm/cpu_setup.h>
+
+/* Disable CPU_FTR_HVMODE and return false if MSR:HV is not set */
+static bool init_hvmode_206(struct cpu_spec *t)
+{
+ u64 msr;
+
+ msr = mfmsr();
+ if (msr & MSR_HV)
+ return true;
+
+ t->cpu_features &= ~(CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST);
+ return false;
+}
+
+static void init_LPCR_ISA300(u64 lpcr, u64 lpes)
+{
+ /* POWER9 has no VRMASD */
+ lpcr |= (lpes << LPCR_LPES_SH) & LPCR_LPES;
+ lpcr |= LPCR_PECE0|LPCR_PECE1|LPCR_PECE2;
+ lpcr |= (4ull << LPCR_DPFD_SH) & LPCR_DPFD;
+ lpcr &= ~LPCR_HDICE; /* clear HDICE */
+ lpcr |= (4ull << LPCR_VC_SH);
+ mtspr(SPRN_LPCR, lpcr);
+ isync();
+}
+
+/*
+ * Setup a sane LPCR:
+ * Called with initial LPCR and desired LPES 2-bit value
+ *
+ * LPES = 0b01 (HSRR0/1 used for 0x500)
+ * PECE = 0b111
+ * DPFD = 4
+ * HDICE = 0
+ * VC = 0b100 (VPM0=1, VPM1=0, ISL=0)
+ * VRMASD = 0b10000 (L=1, LP=00)
+ *
+ * Other bits untouched for now
+ */
+static void init_LPCR_ISA206(u64 lpcr, u64 lpes)
+{
+ lpcr |= (0x10ull << LPCR_VRMASD_SH) & LPCR_VRMASD;
+ init_LPCR_ISA300(lpcr, lpes);
+}
+
+static void init_FSCR(void)
+{
+ u64 fscr;
+
+ fscr = mfspr(SPRN_FSCR);
+ fscr |= FSCR_TAR|FSCR_EBB;
+ mtspr(SPRN_FSCR, fscr);
+}
+
+static void init_FSCR_power9(void)
+{
+ u64 fscr;
+
+ fscr = mfspr(SPRN_FSCR);
+ fscr |= FSCR_SCV;
+ mtspr(SPRN_FSCR, fscr);
+ init_FSCR();
+}
+
+static void init_FSCR_power10(void)
+{
+ u64 fscr;
+
+ fscr = mfspr(SPRN_FSCR);
+ fscr |= FSCR_PREFIX;
+ mtspr(SPRN_FSCR, fscr);
+ init_FSCR_power9();
+}
+
+static void init_HFSCR(void)
+{
+ u64 hfscr;
+
+ hfscr = mfspr(SPRN_HFSCR);
+ hfscr |= HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|HFSCR_DSCR|\
+ HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB|HFSCR_MSGP;
+ mtspr(SPRN_HFSCR, hfscr);
+}
+
+static void init_PMU_HV(void)
+{
+ mtspr(SPRN_MMCRC, 0);
+}
+
+static void init_PMU_HV_ISA207(void)
+{
+ mtspr(SPRN_MMCRH, 0);
+}
+
+static void init_PMU(void)
+{
+ mtspr(SPRN_MMCRA, 0);
+ mtspr(SPRN_MMCR0, MMCR0_FC);
+ mtspr(SPRN_MMCR1, 0);
+ mtspr(SPRN_MMCR2, 0);
+}
+
+static void init_PMU_ISA207(void)
+{
+ mtspr(SPRN_MMCRS, 0);
+}
+
+static void init_PMU_ISA31(void)
+{
+ mtspr(SPRN_MMCR3, 0);
+ mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
+ mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
+}
+
+/*
+ * Note that we can be called twice of pseudo-PVRs.
+ * The parameter offset is not used.
+ */
+
+void __setup_cpu_power7(unsigned long offset, struct cpu_spec *t)
+{
+ if (!init_hvmode_206(t))
+ return;
+
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
+ mtspr(SPRN_PCR, PCR_MASK);
+ init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH);
+}
+
+void __restore_cpu_power7(void)
+{
+ u64 msr;
+
+ msr = mfmsr();
+ if (!(msr & MSR_HV))
+ return;
+
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
+ mtspr(SPRN_PCR, PCR_MASK);
+ init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH);
+}
+
+void __setup_cpu_power8(unsigned long offset, struct cpu_spec *t)
+{
+ init_FSCR();
+ init_PMU();
+ init_PMU_ISA207();
+
+ if (!init_hvmode_206(t))
+ return;
+
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
+ mtspr(SPRN_PCR, PCR_MASK);
+ init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */
+ init_HFSCR();
+ init_PMU_HV();
+ init_PMU_HV_ISA207();
+}
+
+void __restore_cpu_power8(void)
+{
+ u64 msr;
+
+ init_FSCR();
+ init_PMU();
+ init_PMU_ISA207();
+
+ msr = mfmsr();
+ if (!(msr & MSR_HV))
+ return;
+
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
+ mtspr(SPRN_PCR, PCR_MASK);
+ init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */
+ init_HFSCR();
+ init_PMU_HV();
+ init_PMU_HV_ISA207();
+}
+
+void __setup_cpu_power9(unsigned long offset, struct cpu_spec *t)
+{
+ init_FSCR_power9();
+ init_PMU();
+
+ if (!init_hvmode_206(t))
+ return;
+
+ mtspr(SPRN_PSSCR, 0);
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_PID, 0);
+ mtspr(SPRN_AMOR, ~0);
+ mtspr(SPRN_PCR, PCR_MASK);
+ init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
+ LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
+ init_HFSCR();
+ init_PMU_HV();
+}
+
+void __restore_cpu_power9(void)
+{
+ u64 msr;
+
+ init_FSCR_power9();
+ init_PMU();
+
+ msr = mfmsr();
+ if (!(msr & MSR_HV))
+ return;
+
+ mtspr(SPRN_PSSCR, 0);
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_PID, 0);
+ mtspr(SPRN_AMOR, ~0);
+ mtspr(SPRN_PCR, PCR_MASK);
+ init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
+ LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
+ init_HFSCR();
+ init_PMU_HV();
+}
+
+void __setup_cpu_power10(unsigned long offset, struct cpu_spec *t)
+{
+ init_FSCR_power10();
+ init_PMU();
+ init_PMU_ISA31();
+
+ if (!init_hvmode_206(t))
+ return;
+
+ mtspr(SPRN_PSSCR, 0);
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_PID, 0);
+ mtspr(SPRN_AMOR, ~0);
+ mtspr(SPRN_PCR, PCR_MASK);
+ init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
+ LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
+ init_HFSCR();
+ init_PMU_HV();
+}
+
+void __restore_cpu_power10(void)
+{
+ u64 msr;
+
+ init_FSCR_power10();
+ init_PMU();
+ init_PMU_ISA31();
+
+ msr = mfmsr();
+ if (!(msr & MSR_HV))
+ return;
+
+ mtspr(SPRN_PSSCR, 0);
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_PID, 0);
+ mtspr(SPRN_AMOR, ~0);
+ mtspr(SPRN_PCR, PCR_MASK);
+ init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
+ LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
+ init_HFSCR();
+ init_PMU_HV();
+}
diff --git a/arch/powerpc/kernel/cpu_specs.h b/arch/powerpc/kernel/cpu_specs.h
new file mode 100644
index 000000000000..85ded3f77204
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_specs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifdef CONFIG_40x
+#include "cpu_specs_40x.h"
+#endif
+
+#ifdef CONFIG_PPC_47x
+#include "cpu_specs_47x.h"
+#elif defined(CONFIG_44x)
+#include "cpu_specs_44x.h"
+#endif
+
+#ifdef CONFIG_PPC_8xx
+#include "cpu_specs_8xx.h"
+#endif
+
+#ifdef CONFIG_PPC_E500MC
+#include "cpu_specs_e500mc.h"
+#elif defined(CONFIG_PPC_85xx)
+#include "cpu_specs_85xx.h"
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_32
+#include "cpu_specs_book3s_32.h"
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+#include "cpu_specs_book3s_64.h"
+#endif
diff --git a/arch/powerpc/kernel/cpu_specs_40x.h b/arch/powerpc/kernel/cpu_specs_40x.h
new file mode 100644
index 000000000000..a1362a75b8c8
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_specs_40x.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ */
+
+static struct cpu_spec cpu_specs[] __initdata = {
+ { /* STB 04xxx */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x41810000,
+ .cpu_name = "STB04xxx",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* NP405L */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x41610000,
+ .cpu_name = "NP405L",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* NP4GS3 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x40B10000,
+ .cpu_name = "NP4GS3",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* NP405H */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x41410000,
+ .cpu_name = "NP405H",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405GPr */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x50910000,
+ .cpu_name = "405GPr",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* STBx25xx */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x51510000,
+ .cpu_name = "STBx25xx",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405LP */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x41F10000,
+ .cpu_name = "405LP",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EP */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x51210000,
+ .cpu_name = "405EP",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EX Rev. A/B with Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910007,
+ .cpu_name = "405EX Rev. A/B",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EX Rev. C without Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x1291000d,
+ .cpu_name = "405EX Rev. C",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EX Rev. C with Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x1291000f,
+ .cpu_name = "405EX Rev. C",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EX Rev. D without Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910003,
+ .cpu_name = "405EX Rev. D",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EX Rev. D with Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910005,
+ .cpu_name = "405EX Rev. D",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr Rev. A/B without Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910001,
+ .cpu_name = "405EXr Rev. A/B",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr Rev. C without Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910009,
+ .cpu_name = "405EXr Rev. C",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr Rev. C with Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x1291000b,
+ .cpu_name = "405EXr Rev. C",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr Rev. D without Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910000,
+ .cpu_name = "405EXr Rev. D",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* 405EXr Rev. D with Security */
+ .pvr_mask = 0xffff000f,
+ .pvr_value = 0x12910002,
+ .cpu_name = "405EXr Rev. D",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ {
+ /* 405EZ */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x41510000,
+ .cpu_name = "405EZ",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* APM8018X */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x7ff11432,
+ .cpu_name = "APM8018X",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ },
+ { /* default match */
+ .pvr_mask = 0x00000000,
+ .pvr_value = 0x00000000,
+ .cpu_name = "(generic 40x PPC)",
+ .cpu_features = CPU_FTRS_40X,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU |
+ PPC_FEATURE_HAS_4xxMAC,
+ .mmu_features = MMU_FTR_TYPE_40x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc405",
+ }
+};
diff --git a/arch/powerpc/kernel/cpu_specs_44x.h b/arch/powerpc/kernel/cpu_specs_44x.h
new file mode 100644
index 000000000000..69c4cdc0cdee
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_specs_44x.h
@@ -0,0 +1,304 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ */
+
+#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+ PPC_FEATURE_BOOKE)
+
+static struct cpu_spec cpu_specs[] __initdata = {
+ {
+ .pvr_mask = 0xf0000fff,
+ .pvr_value = 0x40000850,
+ .cpu_name = "440GR Rev. A",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc440",
+ },
+ { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
+ .pvr_mask = 0xf0000fff,
+ .pvr_value = 0x40000858,
+ .cpu_name = "440EP Rev. A",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440ep,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc440",
+ },
+ {
+ .pvr_mask = 0xf0000fff,
+ .pvr_value = 0x400008d3,
+ .cpu_name = "440GR Rev. B",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc440",
+ },
+ { /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */
+ .pvr_mask = 0xf0000ff7,
+ .pvr_value = 0x400008d4,
+ .cpu_name = "440EP Rev. C",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440ep,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc440",
+ },
+ { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
+ .pvr_mask = 0xf0000fff,
+ .pvr_value = 0x400008db,
+ .cpu_name = "440EP Rev. B",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440ep,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc440",
+ },
+ { /* 440GRX */
+ .pvr_mask = 0xf0000ffb,
+ .pvr_value = 0x200008D0,
+ .cpu_name = "440GRX",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440grx,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */
+ .pvr_mask = 0xf0000ffb,
+ .pvr_value = 0x200008D8,
+ .cpu_name = "440EPX",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440epx,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 440GP Rev. B */
+ .pvr_mask = 0xf0000fff,
+ .pvr_value = 0x40000440,
+ .cpu_name = "440GP Rev. B",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc440gp",
+ },
+ { /* 440GP Rev. C */
+ .pvr_mask = 0xf0000fff,
+ .pvr_value = 0x40000481,
+ .cpu_name = "440GP Rev. C",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc440gp",
+ },
+ { /* 440GX Rev. A */
+ .pvr_mask = 0xf0000fff,
+ .pvr_value = 0x50000850,
+ .cpu_name = "440GX Rev. A",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440gx,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 440GX Rev. B */
+ .pvr_mask = 0xf0000fff,
+ .pvr_value = 0x50000851,
+ .cpu_name = "440GX Rev. B",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440gx,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 440GX Rev. C */
+ .pvr_mask = 0xf0000fff,
+ .pvr_value = 0x50000892,
+ .cpu_name = "440GX Rev. C",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440gx,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 440GX Rev. F */
+ .pvr_mask = 0xf0000fff,
+ .pvr_value = 0x50000894,
+ .cpu_name = "440GX Rev. F",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440gx,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 440SP Rev. A */
+ .pvr_mask = 0xfff00fff,
+ .pvr_value = 0x53200891,
+ .cpu_name = "440SP Rev. A",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc440",
+ },
+ { /* 440SPe Rev. A */
+ .pvr_mask = 0xfff00fff,
+ .pvr_value = 0x53400890,
+ .cpu_name = "440SPe Rev. A",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440spe,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 440SPe Rev. B */
+ .pvr_mask = 0xfff00fff,
+ .pvr_value = 0x53400891,
+ .cpu_name = "440SPe Rev. B",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_440spe,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 460EX */
+ .pvr_mask = 0xffff0006,
+ .pvr_value = 0x13020002,
+ .cpu_name = "460EX",
+ .cpu_features = CPU_FTRS_440x6,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_460ex,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 460EX Rev B */
+ .pvr_mask = 0xffff0007,
+ .pvr_value = 0x13020004,
+ .cpu_name = "460EX Rev. B",
+ .cpu_features = CPU_FTRS_440x6,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_460ex,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 460GT */
+ .pvr_mask = 0xffff0006,
+ .pvr_value = 0x13020000,
+ .cpu_name = "460GT",
+ .cpu_features = CPU_FTRS_440x6,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_460gt,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 460GT Rev B */
+ .pvr_mask = 0xffff0007,
+ .pvr_value = 0x13020005,
+ .cpu_name = "460GT Rev. B",
+ .cpu_features = CPU_FTRS_440x6,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_460gt,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 460SX */
+ .pvr_mask = 0xffffff00,
+ .pvr_value = 0x13541800,
+ .cpu_name = "460SX",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_460sx,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* 464 in APM821xx */
+ .pvr_mask = 0xfffffff0,
+ .pvr_value = 0x12C41C80,
+ .cpu_name = "APM821XX",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE |
+ PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_apm821xx,
+ .machine_check = machine_check_440A,
+ .platform = "ppc440",
+ },
+ { /* default match */
+ .pvr_mask = 0x00000000,
+ .pvr_value = 0x00000000,
+ .cpu_name = "(generic 44x PPC)",
+ .cpu_features = CPU_FTRS_44X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_44x,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_4xx,
+ .platform = "ppc440",
+ }
+};
diff --git a/arch/powerpc/kernel/cpu_specs_47x.h b/arch/powerpc/kernel/cpu_specs_47x.h
new file mode 100644
index 000000000000..3143cd504a51
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_specs_47x.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ */
+
+#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+ PPC_FEATURE_BOOKE)
+
+static struct cpu_spec cpu_specs[] __initdata = {
+ { /* 476 DD2 core */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x11a52080,
+ .cpu_name = "476",
+ .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST |
+ MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
+ { /* 476fpe */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x7ff50000,
+ .cpu_name = "476fpe",
+ .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST |
+ MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
+ { /* 476 iss */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00050000,
+ .cpu_name = "476",
+ .cpu_features = CPU_FTRS_47X,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST |
+ MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
+ { /* 476 others */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x11a50000,
+ .cpu_name = "476",
+ .cpu_features = CPU_FTRS_47X,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .mmu_features = MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST |
+ MMU_FTR_LOCK_BCAST_INVAL,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ },
+ { /* default match */
+ .pvr_mask = 0x00000000,
+ .pvr_value = 0x00000000,
+ .cpu_name = "(generic 47x PPC)",
+ .cpu_features = CPU_FTRS_47X,
+ .cpu_user_features = COMMON_USER_BOOKE,
+ .mmu_features = MMU_FTR_TYPE_47x,
+ .icache_bsize = 32,
+ .dcache_bsize = 128,
+ .machine_check = machine_check_47x,
+ .platform = "ppc470",
+ }
+};
diff --git a/arch/powerpc/kernel/cpu_specs_85xx.h b/arch/powerpc/kernel/cpu_specs_85xx.h
new file mode 100644
index 000000000000..aaae202c1a89
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_specs_85xx.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ */
+
+#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+ PPC_FEATURE_BOOKE)
+
+static struct cpu_spec cpu_specs[] __initdata = {
+ { /* e500 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80200000,
+ .cpu_name = "e500",
+ .cpu_features = CPU_FTRS_E500,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP |
+ PPC_FEATURE_HAS_EFP_SINGLE_COMP,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .cpu_setup = __setup_cpu_e500v1,
+ .machine_check = machine_check_e500,
+ .platform = "ppc8540",
+ },
+ { /* e500v2 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80210000,
+ .cpu_name = "e500v2",
+ .cpu_features = CPU_FTRS_E500_2,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP |
+ PPC_FEATURE_HAS_EFP_SINGLE_COMP |
+ PPC_FEATURE_HAS_EFP_DOUBLE_COMP,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
+ .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .cpu_setup = __setup_cpu_e500v2,
+ .machine_check = machine_check_e500,
+ .platform = "ppc8548",
+ .cpu_down_flush = cpu_down_flush_e500v2,
+ },
+ { /* default match */
+ .pvr_mask = 0x00000000,
+ .pvr_value = 0x00000000,
+ .cpu_name = "(generic E500 PPC)",
+ .cpu_features = CPU_FTRS_E500,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_SPE_COMP |
+ PPC_FEATURE_HAS_EFP_SINGLE_COMP,
+ .mmu_features = MMU_FTR_TYPE_FSL_E,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_e500,
+ .platform = "powerpc",
+ }
+};
diff --git a/arch/powerpc/kernel/cpu_specs_8xx.h b/arch/powerpc/kernel/cpu_specs_8xx.h
new file mode 100644
index 000000000000..93ddbc202ba3
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_specs_8xx.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ */
+
+static struct cpu_spec cpu_specs[] __initdata = {
+ { /* 8xx */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = PVR_8xx,
+ .cpu_name = "8xx",
+ /*
+ * CPU_FTR_MAYBE_CAN_DOZE is possible,
+ * if the 8xx code is there....
+ */
+ .cpu_features = CPU_FTRS_8XX,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_TYPE_8xx,
+ .icache_bsize = 16,
+ .dcache_bsize = 16,
+ .machine_check = machine_check_8xx,
+ .platform = "ppc823",
+ },
+};
diff --git a/arch/powerpc/kernel/cpu_specs_book3s_32.h b/arch/powerpc/kernel/cpu_specs_book3s_32.h
new file mode 100644
index 000000000000..3714634d194a
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_specs_book3s_32.h
@@ -0,0 +1,605 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ */
+
+#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
+ PPC_FEATURE_HAS_MMU)
+
+static struct cpu_spec cpu_specs[] __initdata = {
+#ifdef CONFIG_PPC_BOOK3S_603
+ { /* 603 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00030000,
+ .cpu_name = "603",
+ .cpu_features = CPU_FTRS_603,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = 0,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
+ .platform = "ppc603",
+ },
+ { /* 603e */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00060000,
+ .cpu_name = "603e",
+ .cpu_features = CPU_FTRS_603,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = 0,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
+ .platform = "ppc603",
+ },
+ { /* 603ev */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00070000,
+ .cpu_name = "603ev",
+ .cpu_features = CPU_FTRS_603,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = 0,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
+ .platform = "ppc603",
+ },
+ { /* 82xx (8240, 8245, 8260 are all 603e cores) */
+ .pvr_mask = 0x7fff0000,
+ .pvr_value = 0x00810000,
+ .cpu_name = "82xx",
+ .cpu_features = CPU_FTRS_82XX,
+ .cpu_user_features = COMMON_USER,
+ .mmu_features = 0,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
+ .platform = "ppc603",
+ },
+ { /* All G2_LE (603e core, plus some) have the same pvr */
+ .pvr_mask = 0x7fff0000,
+ .pvr_value = 0x00820000,
+ .cpu_name = "G2_LE",
+ .cpu_features = CPU_FTRS_G2_LE,
+ .cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_generic,
+ .platform = "ppc603",
+ },
+#ifdef CONFIG_PPC_83xx
+ { /* e300c1 (a 603e core, plus some) on 83xx */
+ .pvr_mask = 0x7fff0000,
+ .pvr_value = 0x00830000,
+ .cpu_name = "e300c1",
+ .cpu_features = CPU_FTRS_E300,
+ .cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_83xx,
+ .platform = "ppc603",
+ },
+ { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */
+ .pvr_mask = 0x7fff0000,
+ .pvr_value = 0x00840000,
+ .cpu_name = "e300c2",
+ .cpu_features = CPU_FTRS_E300C2,
+ .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_83xx,
+ .platform = "ppc603",
+ },
+ { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */
+ .pvr_mask = 0x7fff0000,
+ .pvr_value = 0x00850000,
+ .cpu_name = "e300c3",
+ .cpu_features = CPU_FTRS_E300,
+ .cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_83xx,
+ .num_pmcs = 4,
+ .platform = "ppc603",
+ },
+ { /* e300c4 (e300c1, plus one IU) */
+ .pvr_mask = 0x7fff0000,
+ .pvr_value = 0x00860000,
+ .cpu_name = "e300c4",
+ .cpu_features = CPU_FTRS_E300,
+ .cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .cpu_setup = __setup_cpu_603,
+ .machine_check = machine_check_83xx,
+ .num_pmcs = 4,
+ .platform = "ppc603",
+ },
+#endif
+#endif /* CONFIG_PPC_BOOK3S_603 */
+#ifdef CONFIG_PPC_BOOK3S_604
+ { /* 604 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00040000,
+ .cpu_name = "604",
+ .cpu_features = CPU_FTRS_604,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 2,
+ .cpu_setup = __setup_cpu_604,
+ .machine_check = machine_check_generic,
+ .platform = "ppc604",
+ },
+ { /* 604e */
+ .pvr_mask = 0xfffff000,
+ .pvr_value = 0x00090000,
+ .cpu_name = "604e",
+ .cpu_features = CPU_FTRS_604,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .cpu_setup = __setup_cpu_604,
+ .machine_check = machine_check_generic,
+ .platform = "ppc604",
+ },
+ { /* 604r */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00090000,
+ .cpu_name = "604r",
+ .cpu_features = CPU_FTRS_604,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .cpu_setup = __setup_cpu_604,
+ .machine_check = machine_check_generic,
+ .platform = "ppc604",
+ },
+ { /* 604ev */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x000a0000,
+ .cpu_name = "604ev",
+ .cpu_features = CPU_FTRS_604,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .cpu_setup = __setup_cpu_604,
+ .machine_check = machine_check_generic,
+ .platform = "ppc604",
+ },
+ { /* 740/750 (0x4202, don't support TAU ?) */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x00084202,
+ .cpu_name = "740/750",
+ .cpu_features = CPU_FTRS_740_NOTAU,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 750CX (80100 and 8010x?) */
+ .pvr_mask = 0xfffffff0,
+ .pvr_value = 0x00080100,
+ .cpu_name = "750CX",
+ .cpu_features = CPU_FTRS_750,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .cpu_setup = __setup_cpu_750cx,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 750CX (82201 and 82202) */
+ .pvr_mask = 0xfffffff0,
+ .pvr_value = 0x00082200,
+ .cpu_name = "750CX",
+ .cpu_features = CPU_FTRS_750,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_750cx,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 750CXe (82214) */
+ .pvr_mask = 0xfffffff0,
+ .pvr_value = 0x00082210,
+ .cpu_name = "750CXe",
+ .cpu_features = CPU_FTRS_750,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_750cx,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 750CXe "Gekko" (83214) */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x00083214,
+ .cpu_name = "750CXe",
+ .cpu_features = CPU_FTRS_750,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_750cx,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 750CL (and "Broadway") */
+ .pvr_mask = 0xfffff0e0,
+ .pvr_value = 0x00087000,
+ .cpu_name = "750CL",
+ .cpu_features = CPU_FTRS_750CL,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 745/755 */
+ .pvr_mask = 0xfffff000,
+ .pvr_value = 0x00083000,
+ .cpu_name = "745/755",
+ .cpu_features = CPU_FTRS_750,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 750FX rev 1.x */
+ .pvr_mask = 0xffffff00,
+ .pvr_value = 0x70000100,
+ .cpu_name = "750FX",
+ .cpu_features = CPU_FTRS_750FX1,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 750FX rev 2.0 must disable HID0[DPM] */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x70000200,
+ .cpu_name = "750FX",
+ .cpu_features = CPU_FTRS_750FX2,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 750FX (All revs except 2.0) */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x70000000,
+ .cpu_name = "750FX",
+ .cpu_features = CPU_FTRS_750FX,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_750fx,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 750GX */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x70020000,
+ .cpu_name = "750GX",
+ .cpu_features = CPU_FTRS_750GX,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_750fx,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 740/750 (L2CR bit need fixup for 740) */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00080000,
+ .cpu_name = "740/750",
+ .cpu_features = CPU_FTRS_740,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_750,
+ .machine_check = machine_check_generic,
+ .platform = "ppc750",
+ },
+ { /* 7400 rev 1.1 ? (no TAU) */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x000c1101,
+ .cpu_name = "7400 (1.1)",
+ .cpu_features = CPU_FTRS_7400_NOTAU,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_7400,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7400",
+ },
+ { /* 7400 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x000c0000,
+ .cpu_name = "7400",
+ .cpu_features = CPU_FTRS_7400,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_7400,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7400",
+ },
+ { /* 7410 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x800c0000,
+ .cpu_name = "7410",
+ .cpu_features = CPU_FTRS_7400,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_7410,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7400",
+ },
+ { /* 7450 2.0 - no doze/nap */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x80000200,
+ .cpu_name = "7450",
+ .cpu_features = CPU_FTRS_7450_20,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* 7450 2.1 */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x80000201,
+ .cpu_name = "7450",
+ .cpu_features = CPU_FTRS_7450_21,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* 7450 2.3 and newer */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80000000,
+ .cpu_name = "7450",
+ .cpu_features = CPU_FTRS_7450_23,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* 7455 rev 1.x */
+ .pvr_mask = 0xffffff00,
+ .pvr_value = 0x80010100,
+ .cpu_name = "7455",
+ .cpu_features = CPU_FTRS_7455_1,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* 7455 rev 2.0 */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x80010200,
+ .cpu_name = "7455",
+ .cpu_features = CPU_FTRS_7455_20,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* 7455 others */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80010000,
+ .cpu_name = "7455",
+ .cpu_features = CPU_FTRS_7455,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* 7447/7457 Rev 1.0 */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x80020100,
+ .cpu_name = "7447/7457",
+ .cpu_features = CPU_FTRS_7447_10,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* 7447/7457 Rev 1.1 */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x80020101,
+ .cpu_name = "7447/7457",
+ .cpu_features = CPU_FTRS_7447_10,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* 7447/7457 Rev 1.2 and later */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80020000,
+ .cpu_name = "7447/7457",
+ .cpu_features = CPU_FTRS_7447,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* 7447A */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80030000,
+ .cpu_name = "7447A",
+ .cpu_features = CPU_FTRS_7447A,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* 7448 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80040000,
+ .cpu_name = "7448",
+ .cpu_features = CPU_FTRS_7448,
+ .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP |
+ PPC_FEATURE_PPC_LE,
+ .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_G4,
+ .cpu_setup = __setup_cpu_745x,
+ .machine_check = machine_check_generic,
+ .platform = "ppc7450",
+ },
+ { /* default match, we assume split I/D cache & TB (non-601)... */
+ .pvr_mask = 0x00000000,
+ .pvr_value = 0x00000000,
+ .cpu_name = "(generic PPC)",
+ .cpu_features = CPU_FTRS_CLASSIC32,
+ .cpu_user_features = COMMON_USER,
+ .mmu_features = MMU_FTR_HPTE_TABLE,
+ .icache_bsize = 32,
+ .dcache_bsize = 32,
+ .machine_check = machine_check_generic,
+ .platform = "ppc603",
+ },
+#endif /* CONFIG_PPC_BOOK3S_604 */
+};
diff --git a/arch/powerpc/kernel/cpu_specs_book3s_64.h b/arch/powerpc/kernel/cpu_specs_book3s_64.h
new file mode 100644
index 000000000000..c370c1b804a9
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_specs_book3s_64.h
@@ -0,0 +1,481 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * Modifications for ppc64:
+ * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
+ */
+
+/* NOTE:
+ * Unlike ppc32, ppc64 will only call cpu_setup() for the boot CPU, it's
+ * the responsibility of the appropriate CPU save/restore functions to
+ * eventually copy these settings over. Those save/restore aren't yet
+ * part of the cputable though. That has to be fixed for both ppc32
+ * and ppc64
+ */
+#define COMMON_USER_PPC64 (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
+ PPC_FEATURE_HAS_MMU | PPC_FEATURE_64)
+#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
+#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
+#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
+#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
+ PPC_FEATURE_TRUE_LE | \
+ PPC_FEATURE_PSERIES_PERFMON_COMPAT)
+#define COMMON_USER_POWER7 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
+ PPC_FEATURE_TRUE_LE | \
+ PPC_FEATURE_PSERIES_PERFMON_COMPAT)
+#define COMMON_USER2_POWER7 (PPC_FEATURE2_DSCR)
+#define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\
+ PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
+ PPC_FEATURE_TRUE_LE | \
+ PPC_FEATURE_PSERIES_PERFMON_COMPAT)
+#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
+ PPC_FEATURE2_HTM_COMP | \
+ PPC_FEATURE2_HTM_NOSC_COMP | \
+ PPC_FEATURE2_DSCR | \
+ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
+ PPC_FEATURE2_VEC_CRYPTO)
+#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
+ PPC_FEATURE_TRUE_LE | \
+ PPC_FEATURE_HAS_ALTIVEC_COMP)
+#define COMMON_USER_POWER9 COMMON_USER_POWER8
+#define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \
+ PPC_FEATURE2_ARCH_3_00 | \
+ PPC_FEATURE2_HAS_IEEE128 | \
+ PPC_FEATURE2_DARN | \
+ PPC_FEATURE2_SCV)
+#define COMMON_USER_POWER10 COMMON_USER_POWER9
+#define COMMON_USER2_POWER10 (PPC_FEATURE2_ARCH_3_1 | \
+ PPC_FEATURE2_MMA | \
+ PPC_FEATURE2_ARCH_3_00 | \
+ PPC_FEATURE2_HAS_IEEE128 | \
+ PPC_FEATURE2_DARN | \
+ PPC_FEATURE2_SCV | \
+ PPC_FEATURE2_ARCH_2_07 | \
+ PPC_FEATURE2_DSCR | \
+ PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
+ PPC_FEATURE2_VEC_CRYPTO)
+
+static struct cpu_spec cpu_specs[] __initdata = {
+ { /* PPC970 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00390000,
+ .cpu_name = "PPC970",
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTRS_PPC970,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_ppc970,
+ .cpu_restore = __restore_cpu_ppc970,
+ .platform = "ppc970",
+ },
+ { /* PPC970FX */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x003c0000,
+ .cpu_name = "PPC970FX",
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTRS_PPC970,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_ppc970,
+ .cpu_restore = __restore_cpu_ppc970,
+ .platform = "ppc970",
+ },
+ { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x00440100,
+ .cpu_name = "PPC970MP",
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTRS_PPC970,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_ppc970,
+ .cpu_restore = __restore_cpu_ppc970,
+ .platform = "ppc970",
+ },
+ { /* PPC970MP */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00440000,
+ .cpu_name = "PPC970MP",
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTRS_PPC970,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_ppc970MP,
+ .cpu_restore = __restore_cpu_ppc970,
+ .platform = "ppc970",
+ },
+ { /* PPC970GX */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00450000,
+ .cpu_name = "PPC970GX",
+ .cpu_features = CPU_FTRS_PPC970,
+ .cpu_user_features = COMMON_USER_POWER4 | PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .mmu_features = MMU_FTRS_PPC970,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 8,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_ppc970,
+ .platform = "ppc970",
+ },
+ { /* Power5 GR */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x003a0000,
+ .cpu_name = "POWER5 (gr)",
+ .cpu_features = CPU_FTRS_POWER5,
+ .cpu_user_features = COMMON_USER_POWER5,
+ .mmu_features = MMU_FTRS_POWER5,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .platform = "power5",
+ },
+ { /* Power5++ */
+ .pvr_mask = 0xffffff00,
+ .pvr_value = 0x003b0300,
+ .cpu_name = "POWER5+ (gs)",
+ .cpu_features = CPU_FTRS_POWER5,
+ .cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .mmu_features = MMU_FTRS_POWER5,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .platform = "power5+",
+ },
+ { /* Power5 GS */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x003b0000,
+ .cpu_name = "POWER5+ (gs)",
+ .cpu_features = CPU_FTRS_POWER5,
+ .cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .mmu_features = MMU_FTRS_POWER5,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .platform = "power5+",
+ },
+ { /* POWER6 in P5+ mode; 2.04-compliant processor */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000001,
+ .cpu_name = "POWER5+",
+ .cpu_features = CPU_FTRS_POWER5,
+ .cpu_user_features = COMMON_USER_POWER5_PLUS,
+ .mmu_features = MMU_FTRS_POWER5,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .platform = "power5+",
+ },
+ { /* Power6 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x003e0000,
+ .cpu_name = "POWER6 (raw)",
+ .cpu_features = CPU_FTRS_POWER6,
+ .cpu_user_features = COMMON_USER_POWER6 | PPC_FEATURE_POWER6_EXT,
+ .mmu_features = MMU_FTRS_POWER6,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .platform = "power6x",
+ },
+ { /* 2.05-compliant processor, i.e. Power6 "architected" mode */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000002,
+ .cpu_name = "POWER6 (architected)",
+ .cpu_features = CPU_FTRS_POWER6,
+ .cpu_user_features = COMMON_USER_POWER6,
+ .mmu_features = MMU_FTRS_POWER6,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .platform = "power6",
+ },
+ { /* 2.06-compliant processor, i.e. Power7 "architected" mode */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000003,
+ .cpu_name = "POWER7 (architected)",
+ .cpu_features = CPU_FTRS_POWER7,
+ .cpu_user_features = COMMON_USER_POWER7,
+ .cpu_user_features2 = COMMON_USER2_POWER7,
+ .mmu_features = MMU_FTRS_POWER7,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power7,
+ .cpu_restore = __restore_cpu_power7,
+ .machine_check_early = __machine_check_early_realmode_p7,
+ .platform = "power7",
+ },
+ { /* 2.07-compliant processor, i.e. Power8 "architected" mode */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000004,
+ .cpu_name = "POWER8 (architected)",
+ .cpu_features = CPU_FTRS_POWER8,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
+ .platform = "power8",
+ },
+ { /* 3.00-compliant processor, i.e. Power9 "architected" mode */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000005,
+ .cpu_name = "POWER9 (architected)",
+ .cpu_features = CPU_FTRS_POWER9,
+ .cpu_user_features = COMMON_USER_POWER9,
+ .cpu_user_features2 = COMMON_USER2_POWER9,
+ .mmu_features = MMU_FTRS_POWER9,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power9,
+ .cpu_restore = __restore_cpu_power9,
+ .platform = "power9",
+ },
+ { /* 3.1-compliant processor, i.e. Power10 "architected" mode */
+ .pvr_mask = 0xffffffff,
+ .pvr_value = 0x0f000006,
+ .cpu_name = "POWER10 (architected)",
+ .cpu_features = CPU_FTRS_POWER10,
+ .cpu_user_features = COMMON_USER_POWER10,
+ .cpu_user_features2 = COMMON_USER2_POWER10,
+ .mmu_features = MMU_FTRS_POWER10,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .cpu_setup = __setup_cpu_power10,
+ .cpu_restore = __restore_cpu_power10,
+ .platform = "power10",
+ },
+ { /* Power7 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x003f0000,
+ .cpu_name = "POWER7 (raw)",
+ .cpu_features = CPU_FTRS_POWER7,
+ .cpu_user_features = COMMON_USER_POWER7,
+ .cpu_user_features2 = COMMON_USER2_POWER7,
+ .mmu_features = MMU_FTRS_POWER7,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power7,
+ .cpu_restore = __restore_cpu_power7,
+ .machine_check_early = __machine_check_early_realmode_p7,
+ .platform = "power7",
+ },
+ { /* Power7+ */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x004A0000,
+ .cpu_name = "POWER7+ (raw)",
+ .cpu_features = CPU_FTRS_POWER7,
+ .cpu_user_features = COMMON_USER_POWER7,
+ .cpu_user_features2 = COMMON_USER2_POWER7,
+ .mmu_features = MMU_FTRS_POWER7,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power7,
+ .cpu_restore = __restore_cpu_power7,
+ .machine_check_early = __machine_check_early_realmode_p7,
+ .platform = "power7+",
+ },
+ { /* Power8E */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x004b0000,
+ .cpu_name = "POWER8E (raw)",
+ .cpu_features = CPU_FTRS_POWER8E,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
+ .platform = "power8",
+ },
+ { /* Power8NVL */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x004c0000,
+ .cpu_name = "POWER8NVL (raw)",
+ .cpu_features = CPU_FTRS_POWER8,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
+ .platform = "power8",
+ },
+ { /* Power8 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x004d0000,
+ .cpu_name = "POWER8 (raw)",
+ .cpu_features = CPU_FTRS_POWER8,
+ .cpu_user_features = COMMON_USER_POWER8,
+ .cpu_user_features2 = COMMON_USER2_POWER8,
+ .mmu_features = MMU_FTRS_POWER8,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power8,
+ .cpu_restore = __restore_cpu_power8,
+ .machine_check_early = __machine_check_early_realmode_p8,
+ .platform = "power8",
+ },
+ { /* Power9 DD2.0 */
+ .pvr_mask = 0xffffefff,
+ .pvr_value = 0x004e0200,
+ .cpu_name = "POWER9 (raw)",
+ .cpu_features = CPU_FTRS_POWER9_DD2_0,
+ .cpu_user_features = COMMON_USER_POWER9,
+ .cpu_user_features2 = COMMON_USER2_POWER9,
+ .mmu_features = MMU_FTRS_POWER9,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power9,
+ .cpu_restore = __restore_cpu_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
+ .platform = "power9",
+ },
+ { /* Power9 DD 2.1 */
+ .pvr_mask = 0xffffefff,
+ .pvr_value = 0x004e0201,
+ .cpu_name = "POWER9 (raw)",
+ .cpu_features = CPU_FTRS_POWER9_DD2_1,
+ .cpu_user_features = COMMON_USER_POWER9,
+ .cpu_user_features2 = COMMON_USER2_POWER9,
+ .mmu_features = MMU_FTRS_POWER9,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power9,
+ .cpu_restore = __restore_cpu_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
+ .platform = "power9",
+ },
+ { /* Power9 DD2.2 */
+ .pvr_mask = 0xffffefff,
+ .pvr_value = 0x004e0202,
+ .cpu_name = "POWER9 (raw)",
+ .cpu_features = CPU_FTRS_POWER9_DD2_2,
+ .cpu_user_features = COMMON_USER_POWER9,
+ .cpu_user_features2 = COMMON_USER2_POWER9,
+ .mmu_features = MMU_FTRS_POWER9,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power9,
+ .cpu_restore = __restore_cpu_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
+ .platform = "power9",
+ },
+ { /* Power9 DD2.3 or later */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x004e0000,
+ .cpu_name = "POWER9 (raw)",
+ .cpu_features = CPU_FTRS_POWER9_DD2_3,
+ .cpu_user_features = COMMON_USER_POWER9,
+ .cpu_user_features2 = COMMON_USER2_POWER9,
+ .mmu_features = MMU_FTRS_POWER9,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power9,
+ .cpu_restore = __restore_cpu_power9,
+ .machine_check_early = __machine_check_early_realmode_p9,
+ .platform = "power9",
+ },
+ { /* Power10 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00800000,
+ .cpu_name = "POWER10 (raw)",
+ .cpu_features = CPU_FTRS_POWER10,
+ .cpu_user_features = COMMON_USER_POWER10,
+ .cpu_user_features2 = COMMON_USER2_POWER10,
+ .mmu_features = MMU_FTRS_POWER10,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .cpu_setup = __setup_cpu_power10,
+ .cpu_restore = __restore_cpu_power10,
+ .machine_check_early = __machine_check_early_realmode_p10,
+ .platform = "power10",
+ },
+ { /* Cell Broadband Engine */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x00700000,
+ .cpu_name = "Cell Broadband Engine",
+ .cpu_features = CPU_FTRS_CELL,
+ .cpu_user_features = COMMON_USER_PPC64 | PPC_FEATURE_CELL |
+ PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_SMT,
+ .mmu_features = MMU_FTRS_CELL,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 4,
+ .pmc_type = PPC_PMC_IBM,
+ .platform = "ppc-cell-be",
+ },
+ { /* PA Semi PA6T */
+ .pvr_mask = 0x7fff0000,
+ .pvr_value = 0x00900000,
+ .cpu_name = "PA6T",
+ .cpu_features = CPU_FTRS_PA6T,
+ .cpu_user_features = COMMON_USER_PA6T,
+ .mmu_features = MMU_FTRS_PA6T,
+ .icache_bsize = 64,
+ .dcache_bsize = 64,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_PA6T,
+ .cpu_setup = __setup_cpu_pa6t,
+ .cpu_restore = __restore_cpu_pa6t,
+ .platform = "pa6t",
+ },
+ { /* default match */
+ .pvr_mask = 0x00000000,
+ .pvr_value = 0x00000000,
+ .cpu_name = "POWER5 (compatible)",
+ .cpu_features = CPU_FTRS_COMPATIBLE,
+ .cpu_user_features = COMMON_USER_PPC64,
+ .mmu_features = MMU_FTRS_POWER,
+ .icache_bsize = 128,
+ .dcache_bsize = 128,
+ .num_pmcs = 6,
+ .pmc_type = PPC_PMC_IBM,
+ .platform = "power5",
+ }
+};
diff --git a/arch/powerpc/kernel/cpu_specs_e500mc.h b/arch/powerpc/kernel/cpu_specs_e500mc.h
new file mode 100644
index 000000000000..ceb06b109f83
--- /dev/null
+++ b/arch/powerpc/kernel/cpu_specs_e500mc.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ * Modifications for ppc64:
+ * Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
+ */
+
+#ifdef CONFIG_PPC64
+#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+ PPC_FEATURE_HAS_FPU | PPC_FEATURE_64)
+#else
+#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+ PPC_FEATURE_BOOKE)
+#endif
+
+static struct cpu_spec cpu_specs[] __initdata = {
+#ifdef CONFIG_PPC32
+ { /* e500mc */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80230000,
+ .cpu_name = "e500mc",
+ .cpu_features = CPU_FTRS_E500MC,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
+ .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX,
+ .icache_bsize = 64,
+ .dcache_bsize = 64,
+ .num_pmcs = 4,
+ .cpu_setup = __setup_cpu_e500mc,
+ .machine_check = machine_check_e500mc,
+ .platform = "ppce500mc",
+ .cpu_down_flush = cpu_down_flush_e500mc,
+ },
+#endif /* CONFIG_PPC32 */
+ { /* e5500 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80240000,
+ .cpu_name = "e5500",
+ .cpu_features = CPU_FTRS_E5500,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
+ .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX,
+ .icache_bsize = 64,
+ .dcache_bsize = 64,
+ .num_pmcs = 4,
+ .cpu_setup = __setup_cpu_e5500,
+#ifndef CONFIG_PPC32
+ .cpu_restore = __restore_cpu_e5500,
+#endif
+ .machine_check = machine_check_e500mc,
+ .platform = "ppce5500",
+ .cpu_down_flush = cpu_down_flush_e5500,
+ },
+ { /* e6500 */
+ .pvr_mask = 0xffff0000,
+ .pvr_value = 0x80400000,
+ .cpu_name = "e6500",
+ .cpu_features = CPU_FTRS_E6500,
+ .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU |
+ PPC_FEATURE_HAS_ALTIVEC_COMP,
+ .cpu_user_features2 = PPC_FEATURE2_ISEL,
+ .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX,
+ .icache_bsize = 64,
+ .dcache_bsize = 64,
+ .num_pmcs = 6,
+ .cpu_setup = __setup_cpu_e6500,
+#ifndef CONFIG_PPC32
+ .cpu_restore = __restore_cpu_e6500,
+#endif
+ .machine_check = machine_check_e500mc,
+ .platform = "ppce6500",
+ .cpu_down_flush = cpu_down_flush_e6500,
+ },
+};
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 245be4fafe13..8a32bffefa5b 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -12,12 +12,13 @@
#include <linux/init.h>
#include <linux/export.h>
#include <linux/jump_label.h>
+#include <linux/of.h>
-#include <asm/oprofile_impl.h>
#include <asm/cputable.h>
-#include <asm/prom.h> /* for PTRRELOC on ARCH=ppc */
+#include <asm/mce.h>
#include <asm/mmu.h>
#include <asm/setup.h>
+#include <asm/cpu_setup.h>
static struct cpu_spec the_cpu_spec __read_mostly;
@@ -27,2118 +28,7 @@ EXPORT_SYMBOL(cur_cpu_spec);
/* The platform string corresponding to the real PVR */
const char *powerpc_base_platform;
-/* NOTE:
- * Unlike ppc32, ppc64 will only call this once for the boot CPU, it's
- * the responsibility of the appropriate CPU save/restore functions to
- * eventually copy these settings over. Those save/restore aren't yet
- * part of the cputable though. That has to be fixed for both ppc32
- * and ppc64
- */
-#ifdef CONFIG_PPC32
-extern void __setup_cpu_e200(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_e500v1(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_e500v2(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_e500mc(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_440ep(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_440epx(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_440gx(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_440grx(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_440spe(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec);
-extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec);
-extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_750cx(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_750fx(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_7400(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_7410(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec);
-#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_PPC64
-extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec);
-extern void __restore_cpu_pa6t(void);
-extern void __restore_cpu_ppc970(void);
-extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec);
-extern void __restore_cpu_power7(void);
-extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
-extern void __restore_cpu_power8(void);
-extern void __setup_cpu_power9(unsigned long offset, struct cpu_spec* spec);
-extern void __restore_cpu_power9(void);
-extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
-#endif /* CONFIG_PPC64 */
-#if defined(CONFIG_E500)
-extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
-extern void __setup_cpu_e6500(unsigned long offset, struct cpu_spec* spec);
-extern void __restore_cpu_e5500(void);
-extern void __restore_cpu_e6500(void);
-#endif /* CONFIG_E500 */
-
-/* This table only contains "desktop" CPUs, it need to be filled with embedded
- * ones as well...
- */
-#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
- PPC_FEATURE_HAS_MMU)
-#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64)
-#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
-#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5 |\
- PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
-#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS|\
- PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP)
-#define COMMON_USER_POWER6 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_05 |\
- PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
- PPC_FEATURE_TRUE_LE | \
- PPC_FEATURE_PSERIES_PERFMON_COMPAT)
-#define COMMON_USER_POWER7 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\
- PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
- PPC_FEATURE_TRUE_LE | \
- PPC_FEATURE_PSERIES_PERFMON_COMPAT)
-#define COMMON_USER2_POWER7 (PPC_FEATURE2_DSCR)
-#define COMMON_USER_POWER8 (COMMON_USER_PPC64 | PPC_FEATURE_ARCH_2_06 |\
- PPC_FEATURE_SMT | PPC_FEATURE_ICACHE_SNOOP | \
- PPC_FEATURE_TRUE_LE | \
- PPC_FEATURE_PSERIES_PERFMON_COMPAT)
-#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
- PPC_FEATURE2_HTM_COMP | \
- PPC_FEATURE2_HTM_NOSC_COMP | \
- PPC_FEATURE2_DSCR | \
- PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
- PPC_FEATURE2_VEC_CRYPTO)
-#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
- PPC_FEATURE_TRUE_LE | \
- PPC_FEATURE_HAS_ALTIVEC_COMP)
-#define COMMON_USER_POWER9 COMMON_USER_POWER8
-#define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \
- PPC_FEATURE2_ARCH_3_00 | \
- PPC_FEATURE2_HAS_IEEE128 | \
- PPC_FEATURE2_DARN )
-
-#ifdef CONFIG_PPC_BOOK3E_64
-#define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
-#else
-#define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
- PPC_FEATURE_BOOKE)
-#endif
-
-static struct cpu_spec __initdata cpu_specs[] = {
-#ifdef CONFIG_PPC_BOOK3S_64
- { /* PPC970 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00390000,
- .cpu_name = "PPC970",
- .cpu_features = CPU_FTRS_PPC970,
- .cpu_user_features = COMMON_USER_POWER4 |
- PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTRS_PPC970,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_ppc970,
- .cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .platform = "ppc970",
- },
- { /* PPC970FX */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x003c0000,
- .cpu_name = "PPC970FX",
- .cpu_features = CPU_FTRS_PPC970,
- .cpu_user_features = COMMON_USER_POWER4 |
- PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTRS_PPC970,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_ppc970,
- .cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .platform = "ppc970",
- },
- { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x00440100,
- .cpu_name = "PPC970MP",
- .cpu_features = CPU_FTRS_PPC970,
- .cpu_user_features = COMMON_USER_POWER4 |
- PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTRS_PPC970,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_ppc970,
- .cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970MP",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .platform = "ppc970",
- },
- { /* PPC970MP */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00440000,
- .cpu_name = "PPC970MP",
- .cpu_features = CPU_FTRS_PPC970,
- .cpu_user_features = COMMON_USER_POWER4 |
- PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTRS_PPC970,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_ppc970MP,
- .cpu_restore = __restore_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970MP",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .platform = "ppc970",
- },
- { /* PPC970GX */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00450000,
- .cpu_name = "PPC970GX",
- .cpu_features = CPU_FTRS_PPC970,
- .cpu_user_features = COMMON_USER_POWER4 |
- PPC_FEATURE_HAS_ALTIVEC_COMP,
- .mmu_features = MMU_FTRS_PPC970,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 8,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_ppc970,
- .oprofile_cpu_type = "ppc64/970",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .platform = "ppc970",
- },
- { /* Power5 GR */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x003a0000,
- .cpu_name = "POWER5 (gr)",
- .cpu_features = CPU_FTRS_POWER5,
- .cpu_user_features = COMMON_USER_POWER5,
- .mmu_features = MMU_FTRS_POWER5,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power5",
- .oprofile_type = PPC_OPROFILE_POWER4,
- /* SIHV / SIPR bits are implemented on POWER4+ (GQ)
- * and above but only works on POWER5 and above
- */
- .oprofile_mmcra_sihv = MMCRA_SIHV,
- .oprofile_mmcra_sipr = MMCRA_SIPR,
- .platform = "power5",
- },
- { /* Power5++ */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x003b0300,
- .cpu_name = "POWER5+ (gs)",
- .cpu_features = CPU_FTRS_POWER5,
- .cpu_user_features = COMMON_USER_POWER5_PLUS,
- .mmu_features = MMU_FTRS_POWER5,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .oprofile_cpu_type = "ppc64/power5++",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_mmcra_sihv = MMCRA_SIHV,
- .oprofile_mmcra_sipr = MMCRA_SIPR,
- .platform = "power5+",
- },
- { /* Power5 GS */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x003b0000,
- .cpu_name = "POWER5+ (gs)",
- .cpu_features = CPU_FTRS_POWER5,
- .cpu_user_features = COMMON_USER_POWER5_PLUS,
- .mmu_features = MMU_FTRS_POWER5,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power5+",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_mmcra_sihv = MMCRA_SIHV,
- .oprofile_mmcra_sipr = MMCRA_SIPR,
- .platform = "power5+",
- },
- { /* POWER6 in P5+ mode; 2.04-compliant processor */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x0f000001,
- .cpu_name = "POWER5+",
- .cpu_features = CPU_FTRS_POWER5,
- .cpu_user_features = COMMON_USER_POWER5_PLUS,
- .mmu_features = MMU_FTRS_POWER5,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .platform = "power5+",
- },
- { /* Power6 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x003e0000,
- .cpu_name = "POWER6 (raw)",
- .cpu_features = CPU_FTRS_POWER6,
- .cpu_user_features = COMMON_USER_POWER6 |
- PPC_FEATURE_POWER6_EXT,
- .mmu_features = MMU_FTRS_POWER6,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power6",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV,
- .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
- .oprofile_mmcra_clear = POWER6_MMCRA_THRM |
- POWER6_MMCRA_OTHER,
- .platform = "power6x",
- },
- { /* 2.05-compliant processor, i.e. Power6 "architected" mode */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x0f000002,
- .cpu_name = "POWER6 (architected)",
- .cpu_features = CPU_FTRS_POWER6,
- .cpu_user_features = COMMON_USER_POWER6,
- .mmu_features = MMU_FTRS_POWER6,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .platform = "power6",
- },
- { /* 2.06-compliant processor, i.e. Power7 "architected" mode */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x0f000003,
- .cpu_name = "POWER7 (architected)",
- .cpu_features = CPU_FTRS_POWER7,
- .cpu_user_features = COMMON_USER_POWER7,
- .cpu_user_features2 = COMMON_USER2_POWER7,
- .mmu_features = MMU_FTRS_POWER7,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_POWER4,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
- .cpu_setup = __setup_cpu_power7,
- .cpu_restore = __restore_cpu_power7,
- .machine_check_early = __machine_check_early_realmode_p7,
- .platform = "power7",
- },
- { /* 2.07-compliant processor, i.e. Power8 "architected" mode */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x0f000004,
- .cpu_name = "POWER8 (architected)",
- .cpu_features = CPU_FTRS_POWER8,
- .cpu_user_features = COMMON_USER_POWER8,
- .cpu_user_features2 = COMMON_USER2_POWER8,
- .mmu_features = MMU_FTRS_POWER8,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_INVALID,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
- .cpu_setup = __setup_cpu_power8,
- .cpu_restore = __restore_cpu_power8,
- .machine_check_early = __machine_check_early_realmode_p8,
- .platform = "power8",
- },
- { /* 3.00-compliant processor, i.e. Power9 "architected" mode */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x0f000005,
- .cpu_name = "POWER9 (architected)",
- .cpu_features = CPU_FTRS_POWER9,
- .cpu_user_features = COMMON_USER_POWER9,
- .cpu_user_features2 = COMMON_USER2_POWER9,
- .mmu_features = MMU_FTRS_POWER9,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .oprofile_type = PPC_OPROFILE_INVALID,
- .oprofile_cpu_type = "ppc64/ibm-compat-v1",
- .cpu_setup = __setup_cpu_power9,
- .cpu_restore = __restore_cpu_power9,
- .platform = "power9",
- },
- { /* Power7 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x003f0000,
- .cpu_name = "POWER7 (raw)",
- .cpu_features = CPU_FTRS_POWER7,
- .cpu_user_features = COMMON_USER_POWER7,
- .cpu_user_features2 = COMMON_USER2_POWER7,
- .mmu_features = MMU_FTRS_POWER7,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power7",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .cpu_setup = __setup_cpu_power7,
- .cpu_restore = __restore_cpu_power7,
- .machine_check_early = __machine_check_early_realmode_p7,
- .platform = "power7",
- },
- { /* Power7+ */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x004A0000,
- .cpu_name = "POWER7+ (raw)",
- .cpu_features = CPU_FTRS_POWER7,
- .cpu_user_features = COMMON_USER_POWER7,
- .cpu_user_features2 = COMMON_USER2_POWER7,
- .mmu_features = MMU_FTRS_POWER7,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power7",
- .oprofile_type = PPC_OPROFILE_POWER4,
- .cpu_setup = __setup_cpu_power7,
- .cpu_restore = __restore_cpu_power7,
- .machine_check_early = __machine_check_early_realmode_p7,
- .platform = "power7+",
- },
- { /* Power8E */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x004b0000,
- .cpu_name = "POWER8E (raw)",
- .cpu_features = CPU_FTRS_POWER8E,
- .cpu_user_features = COMMON_USER_POWER8,
- .cpu_user_features2 = COMMON_USER2_POWER8,
- .mmu_features = MMU_FTRS_POWER8,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
- .cpu_setup = __setup_cpu_power8,
- .cpu_restore = __restore_cpu_power8,
- .machine_check_early = __machine_check_early_realmode_p8,
- .platform = "power8",
- },
- { /* Power8NVL */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x004c0000,
- .cpu_name = "POWER8NVL (raw)",
- .cpu_features = CPU_FTRS_POWER8,
- .cpu_user_features = COMMON_USER_POWER8,
- .cpu_user_features2 = COMMON_USER2_POWER8,
- .mmu_features = MMU_FTRS_POWER8,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
- .cpu_setup = __setup_cpu_power8,
- .cpu_restore = __restore_cpu_power8,
- .machine_check_early = __machine_check_early_realmode_p8,
- .platform = "power8",
- },
- { /* Power8 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x004d0000,
- .cpu_name = "POWER8 (raw)",
- .cpu_features = CPU_FTRS_POWER8,
- .cpu_user_features = COMMON_USER_POWER8,
- .cpu_user_features2 = COMMON_USER2_POWER8,
- .mmu_features = MMU_FTRS_POWER8,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power8",
- .oprofile_type = PPC_OPROFILE_INVALID,
- .cpu_setup = __setup_cpu_power8,
- .cpu_restore = __restore_cpu_power8,
- .machine_check_early = __machine_check_early_realmode_p8,
- .platform = "power8",
- },
- { /* Power9 DD2.0 */
- .pvr_mask = 0xffffefff,
- .pvr_value = 0x004e0200,
- .cpu_name = "POWER9 (raw)",
- .cpu_features = CPU_FTRS_POWER9_DD2_0,
- .cpu_user_features = COMMON_USER_POWER9,
- .cpu_user_features2 = COMMON_USER2_POWER9,
- .mmu_features = MMU_FTRS_POWER9,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
- .cpu_setup = __setup_cpu_power9,
- .cpu_restore = __restore_cpu_power9,
- .machine_check_early = __machine_check_early_realmode_p9,
- .platform = "power9",
- },
- { /* Power9 DD 2.1 */
- .pvr_mask = 0xffffefff,
- .pvr_value = 0x004e0201,
- .cpu_name = "POWER9 (raw)",
- .cpu_features = CPU_FTRS_POWER9_DD2_1,
- .cpu_user_features = COMMON_USER_POWER9,
- .cpu_user_features2 = COMMON_USER2_POWER9,
- .mmu_features = MMU_FTRS_POWER9,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
- .cpu_setup = __setup_cpu_power9,
- .cpu_restore = __restore_cpu_power9,
- .machine_check_early = __machine_check_early_realmode_p9,
- .platform = "power9",
- },
- { /* Power9 DD2.2 or later */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x004e0000,
- .cpu_name = "POWER9 (raw)",
- .cpu_features = CPU_FTRS_POWER9_DD2_2,
- .cpu_user_features = COMMON_USER_POWER9,
- .cpu_user_features2 = COMMON_USER2_POWER9,
- .mmu_features = MMU_FTRS_POWER9,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/power9",
- .oprofile_type = PPC_OPROFILE_INVALID,
- .cpu_setup = __setup_cpu_power9,
- .cpu_restore = __restore_cpu_power9,
- .machine_check_early = __machine_check_early_realmode_p9,
- .platform = "power9",
- },
- { /* Cell Broadband Engine */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00700000,
- .cpu_name = "Cell Broadband Engine",
- .cpu_features = CPU_FTRS_CELL,
- .cpu_user_features = COMMON_USER_PPC64 |
- PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP |
- PPC_FEATURE_SMT,
- .mmu_features = MMU_FTRS_CELL,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .oprofile_cpu_type = "ppc64/cell-be",
- .oprofile_type = PPC_OPROFILE_CELL,
- .platform = "ppc-cell-be",
- },
- { /* PA Semi PA6T */
- .pvr_mask = 0x7fff0000,
- .pvr_value = 0x00900000,
- .cpu_name = "PA6T",
- .cpu_features = CPU_FTRS_PA6T,
- .cpu_user_features = COMMON_USER_PA6T,
- .mmu_features = MMU_FTRS_PA6T,
- .icache_bsize = 64,
- .dcache_bsize = 64,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_PA6T,
- .cpu_setup = __setup_cpu_pa6t,
- .cpu_restore = __restore_cpu_pa6t,
- .oprofile_cpu_type = "ppc64/pa6t",
- .oprofile_type = PPC_OPROFILE_PA6T,
- .platform = "pa6t",
- },
- { /* default match */
- .pvr_mask = 0x00000000,
- .pvr_value = 0x00000000,
- .cpu_name = "POWER5 (compatible)",
- .cpu_features = CPU_FTRS_COMPATIBLE,
- .cpu_user_features = COMMON_USER_PPC64,
- .mmu_features = MMU_FTRS_POWER,
- .icache_bsize = 128,
- .dcache_bsize = 128,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_IBM,
- .platform = "power5",
- }
-#endif /* CONFIG_PPC_BOOK3S_64 */
-
-#ifdef CONFIG_PPC32
-#ifdef CONFIG_PPC_BOOK3S_601
- { /* 601 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00010000,
- .cpu_name = "601",
- .cpu_features = CPU_FTRS_PPC601,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_601_INSTR |
- PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_generic,
- .platform = "ppc601",
- },
-#endif /* CONFIG_PPC_BOOK3S_601 */
-#ifdef CONFIG_PPC_BOOK3S_6xx
- { /* 603 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00030000,
- .cpu_name = "603",
- .cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = 0,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
- .platform = "ppc603",
- },
- { /* 603e */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00060000,
- .cpu_name = "603e",
- .cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = 0,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
- .platform = "ppc603",
- },
- { /* 603ev */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00070000,
- .cpu_name = "603ev",
- .cpu_features = CPU_FTRS_603,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = 0,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
- .platform = "ppc603",
- },
- { /* 604 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00040000,
- .cpu_name = "604",
- .cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 2,
- .cpu_setup = __setup_cpu_604,
- .machine_check = machine_check_generic,
- .platform = "ppc604",
- },
- { /* 604e */
- .pvr_mask = 0xfffff000,
- .pvr_value = 0x00090000,
- .cpu_name = "604e",
- .cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_604,
- .machine_check = machine_check_generic,
- .platform = "ppc604",
- },
- { /* 604r */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00090000,
- .cpu_name = "604r",
- .cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_604,
- .machine_check = machine_check_generic,
- .platform = "ppc604",
- },
- { /* 604ev */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x000a0000,
- .cpu_name = "604ev",
- .cpu_features = CPU_FTRS_604,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_604,
- .machine_check = machine_check_generic,
- .platform = "ppc604",
- },
- { /* 740/750 (0x4202, don't support TAU ?) */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x00084202,
- .cpu_name = "740/750",
- .cpu_features = CPU_FTRS_740_NOTAU,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- },
- { /* 750CX (80100 and 8010x?) */
- .pvr_mask = 0xfffffff0,
- .pvr_value = 0x00080100,
- .cpu_name = "750CX",
- .cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .cpu_setup = __setup_cpu_750cx,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- },
- { /* 750CX (82201 and 82202) */
- .pvr_mask = 0xfffffff0,
- .pvr_value = 0x00082200,
- .cpu_name = "750CX",
- .cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_750cx,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- },
- { /* 750CXe (82214) */
- .pvr_mask = 0xfffffff0,
- .pvr_value = 0x00082210,
- .cpu_name = "750CXe",
- .cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_750cx,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- },
- { /* 750CXe "Gekko" (83214) */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x00083214,
- .cpu_name = "750CXe",
- .cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_750cx,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- },
- { /* 750CL (and "Broadway") */
- .pvr_mask = 0xfffff0e0,
- .pvr_value = 0x00087000,
- .cpu_name = "750CL",
- .cpu_features = CPU_FTRS_750CL,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_750,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
- },
- { /* 745/755 */
- .pvr_mask = 0xfffff000,
- .pvr_value = 0x00083000,
- .cpu_name = "745/755",
- .cpu_features = CPU_FTRS_750,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_750,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- },
- { /* 750FX rev 1.x */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x70000100,
- .cpu_name = "750FX",
- .cpu_features = CPU_FTRS_750FX1,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_750,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
- },
- { /* 750FX rev 2.0 must disable HID0[DPM] */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x70000200,
- .cpu_name = "750FX",
- .cpu_features = CPU_FTRS_750FX2,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_750,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
- },
- { /* 750FX (All revs except 2.0) */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x70000000,
- .cpu_name = "750FX",
- .cpu_features = CPU_FTRS_750FX,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_750fx,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
- },
- { /* 750GX */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x70020000,
- .cpu_name = "750GX",
- .cpu_features = CPU_FTRS_750GX,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_750fx,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- .oprofile_cpu_type = "ppc/750",
- .oprofile_type = PPC_OPROFILE_G4,
- },
- { /* 740/750 (L2CR bit need fixup for 740) */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00080000,
- .cpu_name = "740/750",
- .cpu_features = CPU_FTRS_740,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_IBM,
- .cpu_setup = __setup_cpu_750,
- .machine_check = machine_check_generic,
- .platform = "ppc750",
- },
- { /* 7400 rev 1.1 ? (no TAU) */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x000c1101,
- .cpu_name = "7400 (1.1)",
- .cpu_features = CPU_FTRS_7400_NOTAU,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_7400,
- .machine_check = machine_check_generic,
- .platform = "ppc7400",
- },
- { /* 7400 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x000c0000,
- .cpu_name = "7400",
- .cpu_features = CPU_FTRS_7400,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_7400,
- .machine_check = machine_check_generic,
- .platform = "ppc7400",
- },
- { /* 7410 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x800c0000,
- .cpu_name = "7410",
- .cpu_features = CPU_FTRS_7400,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_7410,
- .machine_check = machine_check_generic,
- .platform = "ppc7400",
- },
- { /* 7450 2.0 - no doze/nap */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x80000200,
- .cpu_name = "7450",
- .cpu_features = CPU_FTRS_7450_20,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 7450 2.1 */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x80000201,
- .cpu_name = "7450",
- .cpu_features = CPU_FTRS_7450_21,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 7450 2.3 and newer */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80000000,
- .cpu_name = "7450",
- .cpu_features = CPU_FTRS_7450_23,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 7455 rev 1.x */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x80010100,
- .cpu_name = "7455",
- .cpu_features = CPU_FTRS_7455_1,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 7455 rev 2.0 */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x80010200,
- .cpu_name = "7455",
- .cpu_features = CPU_FTRS_7455_20,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 7455 others */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80010000,
- .cpu_name = "7455",
- .cpu_features = CPU_FTRS_7455,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 7447/7457 Rev 1.0 */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x80020100,
- .cpu_name = "7447/7457",
- .cpu_features = CPU_FTRS_7447_10,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 7447/7457 Rev 1.1 */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x80020101,
- .cpu_name = "7447/7457",
- .cpu_features = CPU_FTRS_7447_10,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 7447/7457 Rev 1.2 and later */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80020000,
- .cpu_name = "7447/7457",
- .cpu_features = CPU_FTRS_7447,
- .cpu_user_features = COMMON_USER | PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 7447A */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80030000,
- .cpu_name = "7447A",
- .cpu_features = CPU_FTRS_7447A,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 7448 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80040000,
- .cpu_name = "7448",
- .cpu_features = CPU_FTRS_7448,
- .cpu_user_features = COMMON_USER |
- PPC_FEATURE_HAS_ALTIVEC_COMP | PPC_FEATURE_PPC_LE,
- .mmu_features = MMU_FTR_HPTE_TABLE | MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 6,
- .pmc_type = PPC_PMC_G4,
- .cpu_setup = __setup_cpu_745x,
- .oprofile_cpu_type = "ppc/7450",
- .oprofile_type = PPC_OPROFILE_G4,
- .machine_check = machine_check_generic,
- .platform = "ppc7450",
- },
- { /* 82xx (8240, 8245, 8260 are all 603e cores) */
- .pvr_mask = 0x7fff0000,
- .pvr_value = 0x00810000,
- .cpu_name = "82xx",
- .cpu_features = CPU_FTRS_82XX,
- .cpu_user_features = COMMON_USER,
- .mmu_features = 0,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
- .platform = "ppc603",
- },
- { /* All G2_LE (603e core, plus some) have the same pvr */
- .pvr_mask = 0x7fff0000,
- .pvr_value = 0x00820000,
- .cpu_name = "G2_LE",
- .cpu_features = CPU_FTRS_G2_LE,
- .cpu_user_features = COMMON_USER,
- .mmu_features = MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_generic,
- .platform = "ppc603",
- },
-#ifdef CONFIG_PPC_83xx
- { /* e300c1 (a 603e core, plus some) on 83xx */
- .pvr_mask = 0x7fff0000,
- .pvr_value = 0x00830000,
- .cpu_name = "e300c1",
- .cpu_features = CPU_FTRS_E300,
- .cpu_user_features = COMMON_USER,
- .mmu_features = MMU_FTR_USE_HIGH_BATS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_83xx,
- .platform = "ppc603",
- },
- { /* e300c2 (an e300c1 core, plus some, minus FPU) on 83xx */
- .pvr_mask = 0x7fff0000,
- .pvr_value = 0x00840000,
- .cpu_name = "e300c2",
- .cpu_features = CPU_FTRS_E300C2,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .mmu_features = MMU_FTR_USE_HIGH_BATS |
- MMU_FTR_NEED_DTLB_SW_LRU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_83xx,
- .platform = "ppc603",
- },
- { /* e300c3 (e300c1, plus one IU, half cache size) on 83xx */
- .pvr_mask = 0x7fff0000,
- .pvr_value = 0x00850000,
- .cpu_name = "e300c3",
- .cpu_features = CPU_FTRS_E300,
- .cpu_user_features = COMMON_USER,
- .mmu_features = MMU_FTR_USE_HIGH_BATS |
- MMU_FTR_NEED_DTLB_SW_LRU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_83xx,
- .num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e300",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
- .platform = "ppc603",
- },
- { /* e300c4 (e300c1, plus one IU) */
- .pvr_mask = 0x7fff0000,
- .pvr_value = 0x00860000,
- .cpu_name = "e300c4",
- .cpu_features = CPU_FTRS_E300,
- .cpu_user_features = COMMON_USER,
- .mmu_features = MMU_FTR_USE_HIGH_BATS |
- MMU_FTR_NEED_DTLB_SW_LRU,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_603,
- .machine_check = machine_check_83xx,
- .num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e300",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
- .platform = "ppc603",
- },
-#endif
- { /* default match, we assume split I/D cache & TB (non-601)... */
- .pvr_mask = 0x00000000,
- .pvr_value = 0x00000000,
- .cpu_name = "(generic PPC)",
- .cpu_features = CPU_FTRS_CLASSIC32,
- .cpu_user_features = COMMON_USER,
- .mmu_features = MMU_FTR_HPTE_TABLE,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_generic,
- .platform = "ppc603",
- },
-#endif /* CONFIG_PPC_BOOK3S_6xx */
-#ifdef CONFIG_PPC_8xx
- { /* 8xx */
- .pvr_mask = 0xffff0000,
- .pvr_value = PVR_8xx,
- .cpu_name = "8xx",
- /* CPU_FTR_MAYBE_CAN_DOZE is possible,
- * if the 8xx code is there.... */
- .cpu_features = CPU_FTRS_8XX,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .mmu_features = MMU_FTR_TYPE_8xx,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- .machine_check = machine_check_8xx,
- .platform = "ppc823",
- },
-#endif /* CONFIG_PPC_8xx */
-#ifdef CONFIG_40x
- { /* 403GC */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x00200200,
- .cpu_name = "403GC",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- .machine_check = machine_check_4xx,
- .platform = "ppc403",
- },
- { /* 403GCX */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x00201400,
- .cpu_name = "403GCX",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- .machine_check = machine_check_4xx,
- .platform = "ppc403",
- },
- { /* 403G ?? */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00200000,
- .cpu_name = "403G ??",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 16,
- .dcache_bsize = 16,
- .machine_check = machine_check_4xx,
- .platform = "ppc403",
- },
- { /* 405GP */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x40110000,
- .cpu_name = "405GP",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* STB 03xxx */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x40130000,
- .cpu_name = "STB03xxx",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* STB 04xxx */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x41810000,
- .cpu_name = "STB04xxx",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* NP405L */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x41610000,
- .cpu_name = "NP405L",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* NP4GS3 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x40B10000,
- .cpu_name = "NP4GS3",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* NP405H */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x41410000,
- .cpu_name = "NP405H",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405GPr */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x50910000,
- .cpu_name = "405GPr",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* STBx25xx */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x51510000,
- .cpu_name = "STBx25xx",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405LP */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x41F10000,
- .cpu_name = "405LP",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* Xilinx Virtex-II Pro */
- .pvr_mask = 0xfffff000,
- .pvr_value = 0x20010000,
- .cpu_name = "Virtex-II Pro",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* Xilinx Virtex-4 FX */
- .pvr_mask = 0xfffff000,
- .pvr_value = 0x20011000,
- .cpu_name = "Virtex-4 FX",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EP */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x51210000,
- .cpu_name = "405EP",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EX Rev. A/B with Security */
- .pvr_mask = 0xffff000f,
- .pvr_value = 0x12910007,
- .cpu_name = "405EX Rev. A/B",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EX Rev. C without Security */
- .pvr_mask = 0xffff000f,
- .pvr_value = 0x1291000d,
- .cpu_name = "405EX Rev. C",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EX Rev. C with Security */
- .pvr_mask = 0xffff000f,
- .pvr_value = 0x1291000f,
- .cpu_name = "405EX Rev. C",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EX Rev. D without Security */
- .pvr_mask = 0xffff000f,
- .pvr_value = 0x12910003,
- .cpu_name = "405EX Rev. D",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EX Rev. D with Security */
- .pvr_mask = 0xffff000f,
- .pvr_value = 0x12910005,
- .cpu_name = "405EX Rev. D",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EXr Rev. A/B without Security */
- .pvr_mask = 0xffff000f,
- .pvr_value = 0x12910001,
- .cpu_name = "405EXr Rev. A/B",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EXr Rev. C without Security */
- .pvr_mask = 0xffff000f,
- .pvr_value = 0x12910009,
- .cpu_name = "405EXr Rev. C",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EXr Rev. C with Security */
- .pvr_mask = 0xffff000f,
- .pvr_value = 0x1291000b,
- .cpu_name = "405EXr Rev. C",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EXr Rev. D without Security */
- .pvr_mask = 0xffff000f,
- .pvr_value = 0x12910000,
- .cpu_name = "405EXr Rev. D",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* 405EXr Rev. D with Security */
- .pvr_mask = 0xffff000f,
- .pvr_value = 0x12910002,
- .cpu_name = "405EXr Rev. D",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- {
- /* 405EZ */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x41510000,
- .cpu_name = "405EZ",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* APM8018X */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x7ff11432,
- .cpu_name = "APM8018X",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- },
- { /* default match */
- .pvr_mask = 0x00000000,
- .pvr_value = 0x00000000,
- .cpu_name = "(generic 40x PPC)",
- .cpu_features = CPU_FTRS_40X,
- .cpu_user_features = PPC_FEATURE_32 |
- PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
- .mmu_features = MMU_FTR_TYPE_40x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc405",
- }
-
-#endif /* CONFIG_40x */
-#ifdef CONFIG_44x
- {
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x40000850,
- .cpu_name = "440GR Rev. A",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc440",
- },
- { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x40000858,
- .cpu_name = "440EP Rev. A",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440ep,
- .machine_check = machine_check_4xx,
- .platform = "ppc440",
- },
- {
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x400008d3,
- .cpu_name = "440GR Rev. B",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc440",
- },
- { /* Matches both physical and logical PVR for 440EP (logical pvr = pvr | 0x8) */
- .pvr_mask = 0xf0000ff7,
- .pvr_value = 0x400008d4,
- .cpu_name = "440EP Rev. C",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440ep,
- .machine_check = machine_check_4xx,
- .platform = "ppc440",
- },
- { /* Use logical PVR for 440EP (logical pvr = pvr | 0x8) */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x400008db,
- .cpu_name = "440EP Rev. B",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440ep,
- .machine_check = machine_check_4xx,
- .platform = "ppc440",
- },
- { /* 440GRX */
- .pvr_mask = 0xf0000ffb,
- .pvr_value = 0x200008D0,
- .cpu_name = "440GRX",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440grx,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* Use logical PVR for 440EPx (logical pvr = pvr | 0x8) */
- .pvr_mask = 0xf0000ffb,
- .pvr_value = 0x200008D8,
- .cpu_name = "440EPX",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440epx,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 440GP Rev. B */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x40000440,
- .cpu_name = "440GP Rev. B",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc440gp",
- },
- { /* 440GP Rev. C */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x40000481,
- .cpu_name = "440GP Rev. C",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc440gp",
- },
- { /* 440GX Rev. A */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x50000850,
- .cpu_name = "440GX Rev. A",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440gx,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 440GX Rev. B */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x50000851,
- .cpu_name = "440GX Rev. B",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440gx,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 440GX Rev. C */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x50000892,
- .cpu_name = "440GX Rev. C",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440gx,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 440GX Rev. F */
- .pvr_mask = 0xf0000fff,
- .pvr_value = 0x50000894,
- .cpu_name = "440GX Rev. F",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440gx,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 440SP Rev. A */
- .pvr_mask = 0xfff00fff,
- .pvr_value = 0x53200891,
- .cpu_name = "440SP Rev. A",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc440",
- },
- { /* 440SPe Rev. A */
- .pvr_mask = 0xfff00fff,
- .pvr_value = 0x53400890,
- .cpu_name = "440SPe Rev. A",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440spe,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 440SPe Rev. B */
- .pvr_mask = 0xfff00fff,
- .pvr_value = 0x53400891,
- .cpu_name = "440SPe Rev. B",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440spe,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 440 in Xilinx Virtex-5 FXT */
- .pvr_mask = 0xfffffff0,
- .pvr_value = 0x7ff21910,
- .cpu_name = "440 in Virtex-5 FXT",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_440x5,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 460EX */
- .pvr_mask = 0xffff0006,
- .pvr_value = 0x13020002,
- .cpu_name = "460EX",
- .cpu_features = CPU_FTRS_440x6,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_460ex,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 460EX Rev B */
- .pvr_mask = 0xffff0007,
- .pvr_value = 0x13020004,
- .cpu_name = "460EX Rev. B",
- .cpu_features = CPU_FTRS_440x6,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_460ex,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 460GT */
- .pvr_mask = 0xffff0006,
- .pvr_value = 0x13020000,
- .cpu_name = "460GT",
- .cpu_features = CPU_FTRS_440x6,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_460gt,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 460GT Rev B */
- .pvr_mask = 0xffff0007,
- .pvr_value = 0x13020005,
- .cpu_name = "460GT Rev. B",
- .cpu_features = CPU_FTRS_440x6,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_460gt,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 460SX */
- .pvr_mask = 0xffffff00,
- .pvr_value = 0x13541800,
- .cpu_name = "460SX",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_460sx,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
- { /* 464 in APM821xx */
- .pvr_mask = 0xfffffff0,
- .pvr_value = 0x12C41C80,
- .cpu_name = "APM821XX",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_apm821xx,
- .machine_check = machine_check_440A,
- .platform = "ppc440",
- },
-#ifdef CONFIG_PPC_47x
- { /* 476 DD2 core */
- .pvr_mask = 0xffffffff,
- .pvr_value = 0x11a52080,
- .cpu_name = "476",
- .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_47x |
- MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
- .icache_bsize = 32,
- .dcache_bsize = 128,
- .machine_check = machine_check_47x,
- .platform = "ppc470",
- },
- { /* 476fpe */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x7ff50000,
- .cpu_name = "476fpe",
- .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_47x |
- MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
- .icache_bsize = 32,
- .dcache_bsize = 128,
- .machine_check = machine_check_47x,
- .platform = "ppc470",
- },
- { /* 476 iss */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x00050000,
- .cpu_name = "476",
- .cpu_features = CPU_FTRS_47X,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_47x |
- MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
- .icache_bsize = 32,
- .dcache_bsize = 128,
- .machine_check = machine_check_47x,
- .platform = "ppc470",
- },
- { /* 476 others */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x11a50000,
- .cpu_name = "476",
- .cpu_features = CPU_FTRS_47X,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_FPU,
- .mmu_features = MMU_FTR_TYPE_47x |
- MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
- .icache_bsize = 32,
- .dcache_bsize = 128,
- .machine_check = machine_check_47x,
- .platform = "ppc470",
- },
-#endif /* CONFIG_PPC_47x */
- { /* default match */
- .pvr_mask = 0x00000000,
- .pvr_value = 0x00000000,
- .cpu_name = "(generic 44x PPC)",
- .cpu_features = CPU_FTRS_44X,
- .cpu_user_features = COMMON_USER_BOOKE,
- .mmu_features = MMU_FTR_TYPE_44x,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_4xx,
- .platform = "ppc440",
- }
-#endif /* CONFIG_44x */
-#ifdef CONFIG_E200
- { /* e200z5 */
- .pvr_mask = 0xfff00000,
- .pvr_value = 0x81000000,
- .cpu_name = "e200z5",
- /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
- .cpu_features = CPU_FTRS_E200,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_EFP_SINGLE |
- PPC_FEATURE_UNIFIED_CACHE,
- .mmu_features = MMU_FTR_TYPE_FSL_E,
- .dcache_bsize = 32,
- .machine_check = machine_check_e200,
- .platform = "ppc5554",
- },
- { /* e200z6 */
- .pvr_mask = 0xfff00000,
- .pvr_value = 0x81100000,
- .cpu_name = "e200z6",
- /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
- .cpu_features = CPU_FTRS_E200,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_SPE_COMP |
- PPC_FEATURE_HAS_EFP_SINGLE_COMP |
- PPC_FEATURE_UNIFIED_CACHE,
- .mmu_features = MMU_FTR_TYPE_FSL_E,
- .dcache_bsize = 32,
- .machine_check = machine_check_e200,
- .platform = "ppc5554",
- },
- { /* default match */
- .pvr_mask = 0x00000000,
- .pvr_value = 0x00000000,
- .cpu_name = "(generic E200 PPC)",
- .cpu_features = CPU_FTRS_E200,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_EFP_SINGLE |
- PPC_FEATURE_UNIFIED_CACHE,
- .mmu_features = MMU_FTR_TYPE_FSL_E,
- .dcache_bsize = 32,
- .cpu_setup = __setup_cpu_e200,
- .machine_check = machine_check_e200,
- .platform = "ppc5554",
- }
-#endif /* CONFIG_E200 */
-#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_E500
-#ifdef CONFIG_PPC32
-#ifndef CONFIG_PPC_E500MC
- { /* e500 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80200000,
- .cpu_name = "e500",
- .cpu_features = CPU_FTRS_E500,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_SPE_COMP |
- PPC_FEATURE_HAS_EFP_SINGLE_COMP,
- .cpu_user_features2 = PPC_FEATURE2_ISEL,
- .mmu_features = MMU_FTR_TYPE_FSL_E,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
- .cpu_setup = __setup_cpu_e500v1,
- .machine_check = machine_check_e500,
- .platform = "ppc8540",
- },
- { /* e500v2 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80210000,
- .cpu_name = "e500v2",
- .cpu_features = CPU_FTRS_E500_2,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_SPE_COMP |
- PPC_FEATURE_HAS_EFP_SINGLE_COMP |
- PPC_FEATURE_HAS_EFP_DOUBLE_COMP,
- .cpu_user_features2 = PPC_FEATURE2_ISEL,
- .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
- .cpu_setup = __setup_cpu_e500v2,
- .machine_check = machine_check_e500,
- .platform = "ppc8548",
- .cpu_down_flush = cpu_down_flush_e500v2,
- },
-#else
- { /* e500mc */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80230000,
- .cpu_name = "e500mc",
- .cpu_features = CPU_FTRS_E500MC,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .cpu_user_features2 = PPC_FEATURE2_ISEL,
- .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
- MMU_FTR_USE_TLBILX,
- .icache_bsize = 64,
- .dcache_bsize = 64,
- .num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500mc",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
- .cpu_setup = __setup_cpu_e500mc,
- .machine_check = machine_check_e500mc,
- .platform = "ppce500mc",
- .cpu_down_flush = cpu_down_flush_e500mc,
- },
-#endif /* CONFIG_PPC_E500MC */
-#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_PPC_E500MC
- { /* e5500 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80240000,
- .cpu_name = "e5500",
- .cpu_features = CPU_FTRS_E5500,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
- .cpu_user_features2 = PPC_FEATURE2_ISEL,
- .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
- MMU_FTR_USE_TLBILX,
- .icache_bsize = 64,
- .dcache_bsize = 64,
- .num_pmcs = 4,
- .oprofile_cpu_type = "ppc/e500mc",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
- .cpu_setup = __setup_cpu_e5500,
-#ifndef CONFIG_PPC32
- .cpu_restore = __restore_cpu_e5500,
-#endif
- .machine_check = machine_check_e500mc,
- .platform = "ppce5500",
- .cpu_down_flush = cpu_down_flush_e5500,
- },
- { /* e6500 */
- .pvr_mask = 0xffff0000,
- .pvr_value = 0x80400000,
- .cpu_name = "e6500",
- .cpu_features = CPU_FTRS_E6500,
- .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU |
- PPC_FEATURE_HAS_ALTIVEC_COMP,
- .cpu_user_features2 = PPC_FEATURE2_ISEL,
- .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
- MMU_FTR_USE_TLBILX,
- .icache_bsize = 64,
- .dcache_bsize = 64,
- .num_pmcs = 6,
- .oprofile_cpu_type = "ppc/e6500",
- .oprofile_type = PPC_OPROFILE_FSL_EMB,
- .cpu_setup = __setup_cpu_e6500,
-#ifndef CONFIG_PPC32
- .cpu_restore = __restore_cpu_e6500,
-#endif
- .machine_check = machine_check_e500mc,
- .platform = "ppce6500",
- .cpu_down_flush = cpu_down_flush_e6500,
- },
-#endif /* CONFIG_PPC_E500MC */
-#ifdef CONFIG_PPC32
- { /* default match */
- .pvr_mask = 0x00000000,
- .pvr_value = 0x00000000,
- .cpu_name = "(generic E500 PPC)",
- .cpu_features = CPU_FTRS_E500,
- .cpu_user_features = COMMON_USER_BOOKE |
- PPC_FEATURE_HAS_SPE_COMP |
- PPC_FEATURE_HAS_EFP_SINGLE_COMP,
- .mmu_features = MMU_FTR_TYPE_FSL_E,
- .icache_bsize = 32,
- .dcache_bsize = 32,
- .machine_check = machine_check_e500,
- .platform = "powerpc",
- }
-#endif /* CONFIG_PPC32 */
-#endif /* CONFIG_E500 */
-};
+#include "cpu_specs.h"
void __init set_cur_cpu_spec(struct cpu_spec *s)
{
@@ -2177,30 +67,12 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
if (old.num_pmcs && !s->num_pmcs) {
t->num_pmcs = old.num_pmcs;
t->pmc_type = old.pmc_type;
- t->oprofile_type = old.oprofile_type;
- t->oprofile_mmcra_sihv = old.oprofile_mmcra_sihv;
- t->oprofile_mmcra_sipr = old.oprofile_mmcra_sipr;
- t->oprofile_mmcra_clear = old.oprofile_mmcra_clear;
/*
- * If we have passed through this logic once before and
- * have pulled the default case because the real PVR was
- * not found inside cpu_specs[], then we are possibly
- * running in compatibility mode. In that case, let the
- * oprofiler know which set of compatibility counters to
- * pull from by making sure the oprofile_cpu_type string
- * is set to that of compatibility mode. If the
- * oprofile_cpu_type already has a value, then we are
- * possibly overriding a real PVR with a logical one,
- * and, in that case, keep the current value for
- * oprofile_cpu_type. Futhermore, let's ensure that the
+ * Let's ensure that the
* fix for the PMAO bug is enabled on compatibility mode.
*/
- if (old.oprofile_cpu_type != NULL) {
- t->oprofile_cpu_type = old.oprofile_cpu_type;
- t->oprofile_type = old.oprofile_type;
- t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
- }
+ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
}
*PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
@@ -2232,6 +104,8 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
struct cpu_spec *s = cpu_specs;
int i;
+ BUILD_BUG_ON(!ARRAY_SIZE(cpu_specs));
+
s = PTRRELOC(s);
for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
@@ -2288,7 +162,7 @@ void __init cpu_feature_keys_init(void)
struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
[0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
};
-EXPORT_SYMBOL_GPL(mmu_feature_keys);
+EXPORT_SYMBOL(mmu_feature_keys);
void __init mmu_feature_keys_init(void)
{
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 05745ddbd229..9a3b85bfc83f 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -12,12 +12,13 @@
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/code-patching.h>
#include <asm/kdump.h>
-#include <asm/prom.h>
#include <asm/firmware.h>
-#include <linux/uaccess.h>
+#include <linux/uio.h>
#include <asm/rtas.h>
+#include <asm/inst.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -34,7 +35,7 @@ void __init reserve_kdump_trampoline(void)
static void __init create_trampoline(unsigned long addr)
{
- unsigned int *p = (unsigned int *)addr;
+ u32 *p = (u32 *)addr;
/* The maximum range of a single instruction branch, is the current
* instruction's address + (32 MB - 4) bytes. For the trampoline we
@@ -44,8 +45,8 @@ static void __init create_trampoline(unsigned long addr)
* branch to "addr" we jump to ("addr" + 32 MB). Although it requires
* two instructions it doesn't require any registers.
*/
- patch_instruction(p, PPC_INST_NOP);
- patch_branch(++p, addr + PHYSICAL_START, 0);
+ patch_instruction(p, ppc_inst(PPC_RAW_NOP()));
+ patch_branch(p + 1, addr + PHYSICAL_START, 0);
}
void __init setup_kdump_trampoline(void)
@@ -67,33 +68,8 @@ void __init setup_kdump_trampoline(void)
}
#endif /* CONFIG_NONSTATIC_KERNEL */
-static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
- unsigned long offset, int userbuf)
-{
- if (userbuf) {
- if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
- return -EFAULT;
- } else
- memcpy(buf, (vaddr + offset), csize);
-
- return csize;
-}
-
-/**
- * copy_oldmem_page - copy one page from "oldmem"
- * @pfn: page frame number to be copied
- * @buf: target memory address for the copy; this can be in kernel address
- * space or user address space (see @userbuf)
- * @csize: number of bytes to copy
- * @offset: offset in bytes into the page (based on pfn) to begin the copy
- * @userbuf: if set, @buf is in user address space, use copy_to_user(),
- * otherwise @buf is in kernel address space, use memcpy().
- *
- * Copy a page from "oldmem". For this page, there is no pte mapped
- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
- */
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
- size_t csize, unsigned long offset, int userbuf)
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
{
void *vaddr;
phys_addr_t paddr;
@@ -106,10 +82,10 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (memblock_is_region_memory(paddr, csize)) {
vaddr = __va(paddr);
- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
} else {
vaddr = ioremap_cache(paddr, PAGE_SIZE);
- csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
iounmap(vaddr);
}
diff --git a/arch/powerpc/kernel/dawr.c b/arch/powerpc/kernel/dawr.c
index cc14aa6c4a1b..909a05cd2809 100644
--- a/arch/powerpc/kernel/dawr.c
+++ b/arch/powerpc/kernel/dawr.c
@@ -9,14 +9,14 @@
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
-#include <asm/debugfs.h>
#include <asm/machdep.h>
#include <asm/hvcall.h>
+#include <asm/firmware.h>
bool dawr_force_enable;
EXPORT_SYMBOL_GPL(dawr_force_enable);
-int set_dawr(struct arch_hw_breakpoint *brk)
+int set_dawr(int nr, struct arch_hw_breakpoint *brk)
{
unsigned long dawr, dawrx, mrd;
@@ -28,7 +28,7 @@ int set_dawr(struct arch_hw_breakpoint *brk)
dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) >> 3;
/*
* DAWR length is stored in field MDR bits 48:53. Matches range in
- * doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
+ * doublewords (64 bits) biased by -1 eg. 0b000000=1DW and
* 0b111111=64DW.
* brk->hw_len is in bytes.
* This aligns up to double word size, shifts and does the bias.
@@ -37,17 +37,26 @@ int set_dawr(struct arch_hw_breakpoint *brk)
dawrx |= (mrd & 0x3f) << (63 - 53);
if (ppc_md.set_dawr)
- return ppc_md.set_dawr(dawr, dawrx);
-
- mtspr(SPRN_DAWR, dawr);
- mtspr(SPRN_DAWRX, dawrx);
+ return ppc_md.set_dawr(nr, dawr, dawrx);
+
+ if (nr == 0) {
+ mtspr(SPRN_DAWR0, dawr);
+ mtspr(SPRN_DAWRX0, dawrx);
+ } else {
+ mtspr(SPRN_DAWR1, dawr);
+ mtspr(SPRN_DAWRX1, dawrx);
+ }
return 0;
}
-static void set_dawr_cb(void *info)
+static void disable_dawrs_cb(void *info)
{
- set_dawr(info);
+ struct arch_hw_breakpoint null_brk = {0};
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++)
+ set_dawr(i, &null_brk);
}
static ssize_t dawr_write_file_bool(struct file *file,
@@ -60,7 +69,7 @@ static ssize_t dawr_write_file_bool(struct file *file,
/* Send error to user if they hypervisor won't allow us to write DAWR */
if (!dawr_force_enable &&
firmware_has_feature(FW_FEATURE_LPAR) &&
- set_dawr(&null_brk) != H_SUCCESS)
+ set_dawr(0, &null_brk) != H_SUCCESS)
return -ENODEV;
rc = debugfs_write_file_bool(file, user_buf, count, ppos);
@@ -69,7 +78,7 @@ static ssize_t dawr_write_file_bool(struct file *file,
/* If we are clearing, make sure all CPUs have the DAWR cleared */
if (!dawr_force_enable)
- smp_call_function(set_dawr_cb, &null_brk, 0);
+ smp_call_function(disable_dawrs_cb, NULL, 0);
return rc;
}
@@ -92,7 +101,7 @@ static int __init dawr_force_setup(void)
if (PVR_VER(mfspr(SPRN_PVR)) == PVR_POWER9) {
/* Turn DAWR off by default, but allow admin to turn it on */
debugfs_create_file_unsafe("dawr_enable_dangerous", 0600,
- powerpc_debugfs_root,
+ arch_debugfs_dir,
&dawr_force_enable,
&dawr_enable_fops);
}
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index f17ff1200eaa..f55c6fb34a3a 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -12,77 +12,23 @@
#include <linux/hardirq.h>
#include <asm/dbell.h>
+#include <asm/interrupt.h>
#include <asm/irq_regs.h>
#include <asm/kvm_ppc.h>
#include <asm/trace.h>
#ifdef CONFIG_SMP
-/*
- * Doorbells must only be used if CPU_FTR_DBELL is available.
- * msgsnd is used in HV, and msgsndp is used in !HV.
- *
- * These should be used by platform code that is aware of restrictions.
- * Other arch code should use ->cause_ipi.
- *
- * doorbell_global_ipi() sends a dbell to any target CPU.
- * Must be used only by architectures that address msgsnd target
- * by PIR/get_hard_smp_processor_id.
- */
-void doorbell_global_ipi(int cpu)
-{
- u32 tag = get_hard_smp_processor_id(cpu);
-
- kvmppc_set_host_ipi(cpu);
- /* Order previous accesses vs. msgsnd, which is treated as a store */
- ppc_msgsnd_sync();
- ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
-}
-
-/*
- * doorbell_core_ipi() sends a dbell to a target CPU in the same core.
- * Must be used only by architectures that address msgsnd target
- * by TIR/cpu_thread_in_core.
- */
-void doorbell_core_ipi(int cpu)
-{
- u32 tag = cpu_thread_in_core(cpu);
-
- kvmppc_set_host_ipi(cpu);
- /* Order previous accesses vs. msgsnd, which is treated as a store */
- ppc_msgsnd_sync();
- ppc_msgsnd(PPC_DBELL_MSGTYPE, 0, tag);
-}
-
-/*
- * Attempt to cause a core doorbell if destination is on the same core.
- * Returns 1 on success, 0 on failure.
- */
-int doorbell_try_core_ipi(int cpu)
-{
- int this_cpu = get_cpu();
- int ret = 0;
-
- if (cpumask_test_cpu(cpu, cpu_sibling_mask(this_cpu))) {
- doorbell_core_ipi(cpu);
- ret = 1;
- }
-
- put_cpu();
-
- return ret;
-}
-
-void doorbell_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- irq_enter();
trace_doorbell_entry(regs);
ppc_msgsync();
- may_hard_irq_enable();
+ if (should_hard_irq_enable())
+ do_hard_irq_enable();
kvmppc_clear_host_ipi(smp_processor_id());
__this_cpu_inc(irq_stat.doorbell_irqs);
@@ -90,13 +36,12 @@ void doorbell_exception(struct pt_regs *regs)
smp_ipi_demux_relaxed(); /* already performed the barrier */
trace_doorbell_exit(regs);
- irq_exit();
+
set_irq_regs(old_regs);
}
#else /* CONFIG_SMP */
-void doorbell_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
{
printk(KERN_WARNING "Received doorbell on non-smp system\n");
}
#endif /* CONFIG_SMP */
-
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index e486d1d78de2..038ce8d9061d 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -10,26 +10,66 @@
#include <linux/pci.h>
#include <asm/iommu.h>
-/*
- * Generic iommu implementation
- */
+#ifdef CONFIG_ARCH_HAS_DMA_MAP_DIRECT
+#define can_map_direct(dev, addr) \
+ ((dev)->bus_dma_limit >= phys_to_dma((dev), (addr)))
-/*
- * The coherent mask may be smaller than the real mask, check if we can
- * really use a direct window.
- */
-static inline bool dma_iommu_alloc_bypass(struct device *dev)
+bool arch_dma_map_page_direct(struct device *dev, phys_addr_t addr)
+{
+ if (likely(!dev->bus_dma_limit))
+ return false;
+
+ return can_map_direct(dev, addr);
+}
+
+#define is_direct_handle(dev, h) ((h) >= (dev)->archdata.dma_offset)
+
+bool arch_dma_unmap_page_direct(struct device *dev, dma_addr_t dma_handle)
{
- return dev->archdata.iommu_bypass && !iommu_fixed_is_weak &&
- dma_direct_supported(dev, dev->coherent_dma_mask);
+ if (likely(!dev->bus_dma_limit))
+ return false;
+
+ return is_direct_handle(dev, dma_handle);
}
-static inline bool dma_iommu_map_bypass(struct device *dev,
- unsigned long attrs)
+bool arch_dma_map_sg_direct(struct device *dev, struct scatterlist *sg,
+ int nents)
{
- return dev->archdata.iommu_bypass &&
- (!iommu_fixed_is_weak || (attrs & DMA_ATTR_WEAK_ORDERING));
+ struct scatterlist *s;
+ int i;
+
+ if (likely(!dev->bus_dma_limit))
+ return false;
+
+ for_each_sg(sg, s, nents, i) {
+ if (!can_map_direct(dev, sg_phys(s) + s->offset + s->length))
+ return false;
+ }
+
+ return true;
+}
+
+bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg,
+ int nents)
+{
+ struct scatterlist *s;
+ int i;
+
+ if (likely(!dev->bus_dma_limit))
+ return false;
+
+ for_each_sg(sg, s, nents, i) {
+ if (!is_direct_handle(dev, s->dma_address + s->length))
+ return false;
+ }
+
+ return true;
}
+#endif /* CONFIG_ARCH_HAS_DMA_MAP_DIRECT */
+
+/*
+ * Generic iommu implementation
+ */
/* Allocates a contiguous real buffer and creates mappings over it.
* Returns the virtual address of the buffer and sets dma_handle
@@ -39,8 +79,6 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
- if (dma_iommu_alloc_bypass(dev))
- return dma_direct_alloc(dev, size, dma_handle, flag, attrs);
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag,
dev_to_node(dev));
@@ -50,11 +88,7 @@ static void dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
- if (dma_iommu_alloc_bypass(dev))
- dma_direct_free(dev, size, vaddr, dma_handle, attrs);
- else
- iommu_free_coherent(get_iommu_table_base(dev), size, vaddr,
- dma_handle);
+ iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
}
/* Creates TCEs for a user provided buffer. The user buffer must be
@@ -67,9 +101,6 @@ static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
enum dma_data_direction direction,
unsigned long attrs)
{
- if (dma_iommu_map_bypass(dev, attrs))
- return dma_direct_map_page(dev, page, offset, size, direction,
- attrs);
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
size, dma_get_mask(dev), direction, attrs);
}
@@ -79,11 +110,8 @@ static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
unsigned long attrs)
{
- if (!dma_iommu_map_bypass(dev, attrs))
- iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size,
- direction, attrs);
- else
- dma_direct_unmap_page(dev, dma_handle, size, direction, attrs);
+ iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
+ attrs);
}
@@ -91,8 +119,6 @@ static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
- if (dma_iommu_map_bypass(dev, attrs))
- return dma_direct_map_sg(dev, sglist, nelems, direction, attrs);
return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
dma_get_mask(dev), direction, attrs);
}
@@ -101,11 +127,8 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
unsigned long attrs)
{
- if (!dma_iommu_map_bypass(dev, attrs))
- ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
+ ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
direction, attrs);
- else
- dma_direct_unmap_sg(dev, sglist, nelems, direction, attrs);
}
static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
@@ -113,8 +136,9 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
struct pci_dev *pdev = to_pci_dev(dev);
struct pci_controller *phb = pci_bus_to_host(pdev->bus);
- return phb->controller_ops.iommu_bypass_supported &&
- phb->controller_ops.iommu_bypass_supported(pdev, mask);
+ if (iommu_fixed_is_weak || !phb->controller_ops.iommu_bypass_supported)
+ return false;
+ return phb->controller_ops.iommu_bypass_supported(pdev, mask);
}
/* We support DMA to/from any memory page via the iommu */
@@ -123,8 +147,18 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
struct iommu_table *tbl = get_iommu_table_base(dev);
if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
- dev->archdata.iommu_bypass = true;
- dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
+ /*
+ * dma_iommu_bypass_supported() sets dma_max when there is
+ * 1:1 mapping but it is somehow limited.
+ * ibm,pmemory is one example.
+ */
+ dev->dma_ops_bypass = dev->bus_dma_limit == 0;
+ if (!dev->dma_ops_bypass)
+ dev_warn(dev,
+ "iommu: 64-bit OK but direct DMA is limited by %llx\n",
+ dev->bus_dma_limit);
+ else
+ dev_dbg(dev, "iommu: 64-bit OK, using fixed ops\n");
return 1;
}
@@ -141,7 +175,7 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
}
dev_dbg(dev, "iommu: not 64-bit, using default ops\n");
- dev->archdata.iommu_bypass = false;
+ dev->dma_ops_bypass = false;
return 1;
}
@@ -150,50 +184,25 @@ u64 dma_iommu_get_required_mask(struct device *dev)
struct iommu_table *tbl = get_iommu_table_base(dev);
u64 mask;
- if (!tbl)
- return 0;
-
if (dev_is_pci(dev)) {
u64 bypass_mask = dma_direct_get_required_mask(dev);
- if (dma_iommu_bypass_supported(dev, bypass_mask))
+ if (dma_iommu_dma_supported(dev, bypass_mask)) {
+ dev_info(dev, "%s: returning bypass mask 0x%llx\n", __func__, bypass_mask);
return bypass_mask;
+ }
}
- mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
+ if (!tbl)
+ return 0;
+
+ mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
+ tbl->it_page_shift - 1);
mask += mask - 1;
return mask;
}
-static void dma_iommu_sync_for_cpu(struct device *dev, dma_addr_t addr,
- size_t size, enum dma_data_direction dir)
-{
- if (dma_iommu_alloc_bypass(dev))
- dma_direct_sync_single_for_cpu(dev, addr, size, dir);
-}
-
-static void dma_iommu_sync_for_device(struct device *dev, dma_addr_t addr,
- size_t sz, enum dma_data_direction dir)
-{
- if (dma_iommu_alloc_bypass(dev))
- dma_direct_sync_single_for_device(dev, addr, sz, dir);
-}
-
-extern void dma_iommu_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
- if (dma_iommu_alloc_bypass(dev))
- dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir);
-}
-
-extern void dma_iommu_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nents, enum dma_data_direction dir)
-{
- if (dma_iommu_alloc_bypass(dev))
- dma_direct_sync_sg_for_device(dev, sgl, nents, dir);
-}
-
const struct dma_map_ops dma_iommu_ops = {
.alloc = dma_iommu_alloc_coherent,
.free = dma_iommu_free_coherent,
@@ -203,10 +212,8 @@ const struct dma_map_ops dma_iommu_ops = {
.map_page = dma_iommu_map_page,
.unmap_page = dma_iommu_unmap_page,
.get_required_mask = dma_iommu_get_required_mask,
- .sync_single_for_cpu = dma_iommu_sync_for_cpu,
- .sync_single_for_device = dma_iommu_sync_for_device,
- .sync_sg_for_cpu = dma_iommu_sync_sg_for_cpu,
- .sync_sg_for_device = dma_iommu_sync_sg_for_device,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
};
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index fc7816126a40..ba256c37bcc0 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -10,6 +10,7 @@
#include <asm/swiotlb.h>
unsigned int ppc_swiotlb_enable;
+unsigned int ppc_swiotlb_flags;
void __init swiotlb_detect_4g(void)
{
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 182b4047c1ef..c3fb9fdf5bd7 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -10,6 +10,7 @@
#include <linux/jump_label.h>
#include <linux/libfdt.h>
#include <linux/memblock.h>
+#include <linux/of_fdt.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/string.h>
@@ -17,15 +18,14 @@
#include <asm/cputable.h>
#include <asm/dt_cpu_ftrs.h>
+#include <asm/mce.h>
#include <asm/mmu.h>
-#include <asm/oprofile_impl.h>
-#include <asm/prom.h>
#include <asm/setup.h>
/* Device-tree visible constants follow */
-#define ISA_V2_07B 2070
#define ISA_V3_0B 3000
+#define ISA_V3_1 3100
#define USABLE_PR (1U << 0)
#define USABLE_OS (1U << 1)
@@ -64,44 +64,25 @@ struct dt_cpu_feature {
* Set up the base CPU
*/
-extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
-extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
-
static int hv_mode;
static struct {
u64 lpcr;
- u64 lpcr_clear;
u64 hfscr;
u64 fscr;
+ u64 pcr;
} system_registers;
static void (*init_pmu_registers)(void);
static void __restore_cpu_cpufeatures(void)
{
- u64 lpcr;
-
- /*
- * LPCR is restored by the power on engine already. It can be changed
- * after early init e.g., by radix enable, and we have no unified API
- * for saving and restoring such SPRs.
- *
- * This ->restore hook should really be removed from idle and register
- * restore moved directly into the idle restore code, because this code
- * doesn't know how idle is implemented or what it needs restored here.
- *
- * The best we can do to accommodate secondary boot and idle restore
- * for now is "or" LPCR with existing.
- */
- lpcr = mfspr(SPRN_LPCR);
- lpcr |= system_registers.lpcr;
- lpcr &= ~system_registers.lpcr_clear;
- mtspr(SPRN_LPCR, lpcr);
+ mtspr(SPRN_LPCR, system_registers.lpcr);
if (hv_mode) {
mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_HFSCR, system_registers.hfscr);
- mtspr(SPRN_PCR, PCR_MASK);
+ mtspr(SPRN_PCR, system_registers.pcr);
}
mtspr(SPRN_FSCR, system_registers.fscr);
@@ -121,8 +102,6 @@ static struct cpu_spec __initdata base_cpu_spec = {
.dcache_bsize = 32, /* cache info init. */
.num_pmcs = 0,
.pmc_type = PPC_PMC_DEFAULT,
- .oprofile_cpu_type = NULL,
- .oprofile_type = PPC_OPROFILE_INVALID,
.cpu_setup = NULL,
.cpu_restore = __restore_cpu_cpufeatures,
.machine_check_early = NULL,
@@ -139,7 +118,6 @@ static void __init cpufeatures_setup_cpu(void)
/* Initialize the base environment -- clear FSCR/HFSCR. */
hv_mode = !!(mfmsr() & MSR_HV);
if (hv_mode) {
- /* CPU_FTR_HVMODE is used early in PACA setup */
cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE;
mtspr(SPRN_HFSCR, 0);
}
@@ -238,6 +216,7 @@ static int __init feat_enable_hv(struct dt_cpu_feature *f)
}
mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~LPCR_LPES0; /* HV external interrupts */
@@ -275,13 +254,6 @@ static int __init feat_enable_idle_nap(struct dt_cpu_feature *f)
return 1;
}
-static int __init feat_enable_align_dsisr(struct dt_cpu_feature *f)
-{
- cur_cpu_spec->cpu_features &= ~CPU_FTR_NODSISRALIGN;
-
- return 1;
-}
-
static int __init feat_enable_idle_stop(struct dt_cpu_feature *f)
{
u64 lpcr;
@@ -300,6 +272,9 @@ static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
{
u64 lpcr;
+ if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
+ return 0;
+
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~LPCR_ISL;
@@ -319,7 +294,9 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
{
u64 lpcr;
- system_registers.lpcr_clear |= (LPCR_ISL | LPCR_UPRT | LPCR_HR);
+ if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
+ return 0;
+
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
mtspr(SPRN_LPCR, lpcr);
@@ -333,20 +310,29 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
{
-#ifdef CONFIG_PPC_RADIX_MMU
+ if (!IS_ENABLED(CONFIG_PPC_RADIX_MMU))
+ return 0;
+
+ cur_cpu_spec->mmu_features |= MMU_FTR_KERNEL_RO;
cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
- cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
+ cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
return 1;
-#endif
- return 0;
}
static int __init feat_enable_dscr(struct dt_cpu_feature *f)
{
u64 lpcr;
+ /*
+ * Linux relies on FSCR[DSCR] being clear, so that we can take the
+ * facility unavailable interrupt and track the task's usage of DSCR.
+ * See facility_unavailable_exception().
+ * Clear the bit here so that feat_enable() doesn't set it.
+ */
+ f->fscr_bit_nr = -1;
+
feat_enable(f);
lpcr = mfspr(SPRN_LPCR);
@@ -357,7 +343,7 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f)
return 1;
}
-static void hfscr_pmu_enable(void)
+static void __init hfscr_pmu_enable(void)
{
u64 hfscr = mfspr(SPRN_HFSCR);
hfscr |= PPC_BIT(60);
@@ -372,7 +358,7 @@ static void init_pmu_power8(void)
}
mtspr(SPRN_MMCRA, 0);
- mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
mtspr(SPRN_MMCRS, 0);
@@ -400,7 +386,6 @@ static int __init feat_enable_pmu_power8(struct dt_cpu_feature *f)
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
- cur_cpu_spec->oprofile_cpu_type = "ppc64/power8";
return 1;
}
@@ -411,7 +396,7 @@ static void init_pmu_power9(void)
mtspr(SPRN_MMCRC, 0);
mtspr(SPRN_MMCRA, 0);
- mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
}
@@ -436,7 +421,39 @@ static int __init feat_enable_pmu_power9(struct dt_cpu_feature *f)
cur_cpu_spec->num_pmcs = 6;
cur_cpu_spec->pmc_type = PPC_PMC_IBM;
- cur_cpu_spec->oprofile_cpu_type = "ppc64/power9";
+
+ return 1;
+}
+
+static void init_pmu_power10(void)
+{
+ init_pmu_power9();
+
+ mtspr(SPRN_MMCR3, 0);
+ mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
+ mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
+}
+
+static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
+{
+ hfscr_pmu_enable();
+
+ init_pmu_power10();
+ init_pmu_registers = init_pmu_power10;
+
+ cur_cpu_spec->cpu_features |= CPU_FTR_MMCRA;
+ cur_cpu_spec->cpu_user_features |= PPC_FEATURE_PSERIES_PERFMON_COMPAT;
+
+ cur_cpu_spec->num_pmcs = 6;
+ cur_cpu_spec->pmc_type = PPC_PMC_IBM;
+
+ return 1;
+}
+
+static int __init feat_enable_mce_power10(struct dt_cpu_feature *f)
+{
+ cur_cpu_spec->platform = "power10";
+ cur_cpu_spec->machine_check_early = __machine_check_early_realmode_p10;
return 1;
}
@@ -553,6 +570,18 @@ static int __init feat_enable_large_ci(struct dt_cpu_feature *f)
return 1;
}
+static int __init feat_enable_mma(struct dt_cpu_feature *f)
+{
+ u64 pcr;
+
+ feat_enable(f);
+ pcr = mfspr(SPRN_PCR);
+ pcr &= ~PCR_MMA_DIS;
+ mtspr(SPRN_PCR, pcr);
+
+ return 1;
+}
+
struct dt_cpu_feature_match {
const char *name;
int (*enable)(struct dt_cpu_feature *f);
@@ -566,6 +595,7 @@ static struct dt_cpu_feature_match __initdata
{"little-endian", feat_enable_le, CPU_FTR_REAL_LE},
{"smt", feat_enable_smt, 0},
{"interrupt-facilities", feat_enable, 0},
+ {"system-call-vectored", feat_enable, 0},
{"timer-facilities", feat_enable, 0},
{"timer-facilities-v3", feat_enable, 0},
{"debug-facilities", feat_enable, 0},
@@ -588,7 +618,7 @@ static struct dt_cpu_feature_match __initdata
{"tm-suspend-hypervisor-assist", feat_enable, CPU_FTR_P9_TM_HV_ASSIST},
{"tm-suspend-xer-so-bug", feat_enable, CPU_FTR_P9_TM_XER_SO_BUG},
{"idle-nap", feat_enable_idle_nap, 0},
- {"alignment-interrupt-dsisr", feat_enable_align_dsisr, 0},
+ /* alignment-interrupt-dsisr ignored */
{"idle-stop", feat_enable_idle_stop, 0},
{"machine-check-power8", feat_enable_mce_power8, 0},
{"performance-monitor-power8", feat_enable_pmu_power8, 0},
@@ -617,7 +647,9 @@ static struct dt_cpu_feature_match __initdata
{"group-start-register", feat_enable, 0},
{"pc-relative-addressing", feat_enable, 0},
{"machine-check-power9", feat_enable_mce_power9, 0},
+ {"machine-check-power10", feat_enable_mce_power10, 0},
{"performance-monitor-power9", feat_enable_pmu_power9, 0},
+ {"performance-monitor-power10", feat_enable_pmu_power10, 0},
{"event-based-branch-v3", feat_enable, 0},
{"random-number-generator", feat_enable, 0},
{"system-call-vectored", feat_disable, 0},
@@ -626,6 +658,9 @@ static struct dt_cpu_feature_match __initdata
{"vector-binary128", feat_enable, 0},
{"vector-binary16", feat_enable, 0},
{"wait-v3", feat_enable, 0},
+ {"prefix-instructions", feat_enable, 0},
+ {"matrix-multiply-assist", feat_enable_mma, 0},
+ {"debug-facilities-v31", feat_enable, CPU_FTR_DAWR1},
};
static bool __initdata using_dt_cpu_ftrs;
@@ -651,10 +686,15 @@ static void __init cpufeatures_setup_start(u32 isa)
{
pr_info("setup for ISA %d\n", isa);
- if (isa >= 3000) {
+ if (isa >= ISA_V3_0B) {
cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
}
+
+ if (isa >= ISA_V3_1) {
+ cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_31;
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_1;
+ }
}
static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
@@ -730,30 +770,30 @@ static __init void cpufeatures_cpu_quirks(void)
if ((version & 0xffffefff) == 0x004e0200) {
/* DD2.0 has no feature flag */
cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
} else if ((version & 0xffffefff) == 0x004e0201) {
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_RADIX_PREFETCH_BUG;
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
} else if ((version & 0xffffefff) == 0x004e0202) {
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
+ } else if ((version & 0xffffefff) == 0x004e0203) {
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
} else if ((version & 0xffff0000) == 0x004e0000) {
/* DD2.1 and up have DD2_1 */
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
}
if ((version & 0xffff0000) == 0x004e0000) {
- cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
}
update_tlbie_feature_flag(version);
- /*
- * PKEY was not in the initial base or feature node
- * specification, but it should become optional in the next
- * cpu feature version sequence.
- */
- cur_cpu_spec->cpu_features |= CPU_FTR_PKEY;
}
static void __init cpufeatures_setup_finished(void)
@@ -771,6 +811,7 @@ static void __init cpufeatures_setup_finished(void)
system_registers.lpcr = mfspr(SPRN_LPCR);
system_registers.hfscr = mfspr(SPRN_HFSCR);
system_registers.fscr = mfspr(SPRN_FSCR);
+ system_registers.pcr = mfspr(SPRN_PCR);
pr_info("final cpu/mmu features = 0x%016lx 0x%08x\n",
cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
@@ -1058,14 +1099,14 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
prop = of_get_flat_dt_prop(node, "display-name", NULL);
if (prop && strlen((char *)prop) != 0) {
- strlcpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
+ strscpy(dt_cpu_name, (char *)prop, sizeof(dt_cpu_name));
cur_cpu_spec->cpu_name = dt_cpu_name;
}
cpufeatures_setup_finished();
- memblock_free(__pa(dt_cpu_features),
- sizeof(struct dt_cpu_feature)*nr_dt_cpu_features);
+ memblock_free(dt_cpu_features,
+ sizeof(struct dt_cpu_feature) * nr_dt_cpu_features);
return 0;
}
diff --git a/arch/powerpc/kernel/early_32.c b/arch/powerpc/kernel/early_32.c
index ef2ad4945904..03f1135ef64f 100644
--- a/arch/powerpc/kernel/early_32.c
+++ b/arch/powerpc/kernel/early_32.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <asm/setup.h>
#include <asm/sections.h>
-#include <asm/asm-prototypes.h>
/*
* We're called here very early in the boot.
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 17cb3e9b5697..ab316e155ea9 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -21,9 +21,9 @@
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/of.h>
+#include <linux/debugfs.h>
#include <linux/atomic.h>
-#include <asm/debugfs.h>
#include <asm/eeh.h>
#include <asm/eeh_event.h>
#include <asm/io.h>
@@ -167,39 +167,33 @@ void eeh_show_enabled(void)
*/
static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
{
- struct pci_dn *pdn = eeh_dev_to_pdn(edev);
u32 cfg;
int cap, i;
int n = 0, l = 0;
char buffer[128];
- if (!pdn) {
- pr_warn("EEH: Note: No error log for absent device.\n");
- return 0;
- }
-
n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
- pdn->phb->global_number, pdn->busno,
- PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
+ edev->pe->phb->global_number, edev->bdfn >> 8,
+ PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn));
pr_warn("EEH: of node=%04x:%02x:%02x.%01x\n",
- pdn->phb->global_number, pdn->busno,
- PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
+ edev->pe->phb->global_number, edev->bdfn >> 8,
+ PCI_SLOT(edev->bdfn), PCI_FUNC(edev->bdfn));
- eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
+ eeh_ops->read_config(edev, PCI_VENDOR_ID, 4, &cfg);
n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
pr_warn("EEH: PCI device/vendor: %08x\n", cfg);
- eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cfg);
+ eeh_ops->read_config(edev, PCI_COMMAND, 4, &cfg);
n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
pr_warn("EEH: PCI cmd/status register: %08x\n", cfg);
/* Gather bridge-specific registers */
if (edev->mode & EEH_DEV_BRIDGE) {
- eeh_ops->read_config(pdn, PCI_SEC_STATUS, 2, &cfg);
+ eeh_ops->read_config(edev, PCI_SEC_STATUS, 2, &cfg);
n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
pr_warn("EEH: Bridge secondary status: %04x\n", cfg);
- eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg);
+ eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &cfg);
n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
pr_warn("EEH: Bridge control: %04x\n", cfg);
}
@@ -207,11 +201,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
/* Dump out the PCI-X command and status regs */
cap = edev->pcix_cap;
if (cap) {
- eeh_ops->read_config(pdn, cap, 4, &cfg);
+ eeh_ops->read_config(edev, cap, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
pr_warn("EEH: PCI-X cmd: %08x\n", cfg);
- eeh_ops->read_config(pdn, cap+4, 4, &cfg);
+ eeh_ops->read_config(edev, cap+4, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
pr_warn("EEH: PCI-X status: %08x\n", cfg);
}
@@ -223,7 +217,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
pr_warn("EEH: PCI-E capabilities and status follow:\n");
for (i=0; i<=8; i++) {
- eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
+ eeh_ops->read_config(edev, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
if ((i % 4) == 0) {
@@ -250,7 +244,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
pr_warn("EEH: PCI-E AER capability register set follows:\n");
for (i=0; i<=13; i++) {
- eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
+ eeh_ops->read_config(edev, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
if ((i % 4) == 0) {
@@ -352,31 +346,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
*/
static inline unsigned long eeh_token_to_phys(unsigned long token)
{
- pte_t *ptep;
- unsigned long pa;
- int hugepage_shift;
-
- /*
- * We won't find hugepages here(this is iomem). Hence we are not
- * worried about _PAGE_SPLITTING/collapse. Also we will not hit
- * page table free, because of init_mm.
- */
- ptep = find_init_mm_pte(token, &hugepage_shift);
- if (!ptep)
- return token;
-
- pa = pte_pfn(*ptep);
-
- /* On radix we can do hugepage mappings for io, so handle that */
- if (hugepage_shift) {
- pa <<= hugepage_shift;
- pa |= token & ((1ul << hugepage_shift) - 1);
- } else {
- pa <<= PAGE_SHIFT;
- pa |= token & (PAGE_SIZE - 1);
- }
-
- return pa;
+ return ppc_find_vmap_phys(token);
}
/*
@@ -429,6 +399,14 @@ out:
return ret;
}
+static inline const char *eeh_driver_name(struct pci_dev *pdev)
+{
+ if (pdev)
+ return dev_driver_string(&pdev->dev);
+
+ return "<null>";
+}
+
/**
* eeh_dev_check_failure - Check if all 1's data is due to EEH slot freeze
* @edev: eeh device
@@ -472,11 +450,6 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
return 0;
}
- if (!pe->addr && !pe->config_addr) {
- eeh_stats.no_cfg_addr++;
- return 0;
- }
-
/*
* On PowerNV platform, we might already have fenced PHB
* there and we need take care of that firstly.
@@ -624,6 +597,7 @@ EXPORT_SYMBOL(eeh_check_failure);
/**
* eeh_pci_enable - Enable MMIO or DMA transfers for this slot
* @pe: EEH PE
+ * @function: EEH option
*
* This routine should be called to reenable frozen MMIO or DMA
* so that it would work correctly again. It's useful while doing
@@ -726,7 +700,6 @@ static void eeh_disable_and_save_dev_state(struct eeh_dev *edev,
static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata)
{
- struct pci_dn *pdn = eeh_dev_to_pdn(edev);
struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
struct pci_dev *dev = userdata;
@@ -734,73 +707,14 @@ static void eeh_restore_dev_state(struct eeh_dev *edev, void *userdata)
return;
/* Apply customization from firmware */
- if (pdn && eeh_ops->restore_config)
- eeh_ops->restore_config(pdn);
+ if (eeh_ops->restore_config)
+ eeh_ops->restore_config(edev);
/* The caller should restore state for the specified device */
if (pdev != dev)
pci_restore_state(pdev);
}
-int eeh_restore_vf_config(struct pci_dn *pdn)
-{
- struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
- u32 devctl, cmd, cap2, aer_capctl;
- int old_mps;
-
- if (edev->pcie_cap) {
- /* Restore MPS */
- old_mps = (ffs(pdn->mps) - 8) << 5;
- eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
- 2, &devctl);
- devctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
- devctl |= old_mps;
- eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
- 2, devctl);
-
- /* Disable Completion Timeout if possible */
- eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP2,
- 4, &cap2);
- if (cap2 & PCI_EXP_DEVCAP2_COMP_TMOUT_DIS) {
- eeh_ops->read_config(pdn,
- edev->pcie_cap + PCI_EXP_DEVCTL2,
- 4, &cap2);
- cap2 |= PCI_EXP_DEVCTL2_COMP_TMOUT_DIS;
- eeh_ops->write_config(pdn,
- edev->pcie_cap + PCI_EXP_DEVCTL2,
- 4, cap2);
- }
- }
-
- /* Enable SERR and parity checking */
- eeh_ops->read_config(pdn, PCI_COMMAND, 2, &cmd);
- cmd |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
- eeh_ops->write_config(pdn, PCI_COMMAND, 2, cmd);
-
- /* Enable report various errors */
- if (edev->pcie_cap) {
- eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
- 2, &devctl);
- devctl &= ~PCI_EXP_DEVCTL_CERE;
- devctl |= (PCI_EXP_DEVCTL_NFERE |
- PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_URRE);
- eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
- 2, devctl);
- }
-
- /* Enable ECRC generation and check */
- if (edev->pcie_cap && edev->aer_cap) {
- eeh_ops->read_config(pdn, edev->aer_cap + PCI_ERR_CAP,
- 4, &aer_capctl);
- aer_capctl |= (PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE);
- eeh_ops->write_config(pdn, edev->aer_cap + PCI_ERR_CAP,
- 4, aer_capctl);
- }
-
- return 0;
-}
-
/**
* pcibios_set_pcie_reset_state - Set PCI-E reset state
* @dev: pci device struct
@@ -850,14 +764,14 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
default:
eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED, true);
return -EINVAL;
- };
+ }
return 0;
}
/**
- * eeh_set_pe_freset - Check the required reset for the indicated device
- * @data: EEH device
+ * eeh_set_dev_freset - Check the required reset for the indicated device
+ * @edev: EEH device
* @flag: return value
*
* Each device might have its preferred reset type: fundamental or
@@ -896,6 +810,7 @@ static void eeh_pe_refreeze_passed(struct eeh_pe *root)
/**
* eeh_pe_reset_full - Complete a full reset process on the indicated PE
* @pe: EEH PE
+ * @include_passed: include passed-through devices?
*
* This function executes a full reset procedure on a PE, including setting
* the appropriate flags, performing a fundamental or hot reset, and then
@@ -977,15 +892,13 @@ int eeh_pe_reset_full(struct eeh_pe *pe, bool include_passed)
*/
void eeh_save_bars(struct eeh_dev *edev)
{
- struct pci_dn *pdn;
int i;
- pdn = eeh_dev_to_pdn(edev);
- if (!pdn)
+ if (!edev)
return;
for (i = 0; i < 16; i++)
- eeh_ops->read_config(pdn, i * 4, 4, &edev->config_space[i]);
+ eeh_ops->read_config(edev, i * 4, 4, &edev->config_space[i]);
/*
* For PCI bridges including root port, we need enable bus
@@ -997,56 +910,6 @@ void eeh_save_bars(struct eeh_dev *edev)
edev->config_space[1] |= PCI_COMMAND_MASTER;
}
-/**
- * eeh_ops_register - Register platform dependent EEH operations
- * @ops: platform dependent EEH operations
- *
- * Register the platform dependent EEH operation callback
- * functions. The platform should call this function before
- * any other EEH operations.
- */
-int __init eeh_ops_register(struct eeh_ops *ops)
-{
- if (!ops->name) {
- pr_warn("%s: Invalid EEH ops name for %p\n",
- __func__, ops);
- return -EINVAL;
- }
-
- if (eeh_ops && eeh_ops != ops) {
- pr_warn("%s: EEH ops of platform %s already existing (%s)\n",
- __func__, eeh_ops->name, ops->name);
- return -EEXIST;
- }
-
- eeh_ops = ops;
-
- return 0;
-}
-
-/**
- * eeh_ops_unregister - Unreigster platform dependent EEH operations
- * @name: name of EEH platform operations
- *
- * Unregister the platform dependent EEH operation callback
- * functions.
- */
-int __exit eeh_ops_unregister(const char *name)
-{
- if (!name || !strlen(name)) {
- pr_warn("%s: Invalid EEH ops name\n",
- __func__);
- return -EINVAL;
- }
-
- if (eeh_ops && !strcmp(eeh_ops->name, name)) {
- eeh_ops = NULL;
- return 0;
- }
-
- return -EEXIST;
-}
-
static int eeh_reboot_notifier(struct notifier_block *nb,
unsigned long action, void *unused)
{
@@ -1058,45 +921,66 @@ static struct notifier_block eeh_reboot_nb = {
.notifier_call = eeh_reboot_notifier,
};
+static int eeh_device_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ switch (action) {
+ /*
+ * Note: It's not possible to perform EEH device addition (i.e.
+ * {pseries,pnv}_pcibios_bus_add_device()) here because it depends on
+ * the device's resources, which have not yet been set up.
+ */
+ case BUS_NOTIFY_DEL_DEVICE:
+ eeh_remove_device(to_pci_dev(dev));
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block eeh_device_nb = {
+ .notifier_call = eeh_device_notifier,
+};
+
/**
- * eeh_init - EEH initialization
+ * eeh_init - System wide EEH initialization
+ * @ops: struct to trace EEH operation callback functions
*
- * Initialize EEH by trying to enable it for all of the adapters in the system.
- * As a side effect we can determine here if eeh is supported at all.
- * Note that we leave EEH on so failed config cycles won't cause a machine
- * check. If a user turns off EEH for a particular adapter they are really
- * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't
- * grant access to a slot if EEH isn't enabled, and so we always enable
- * EEH for all slots/all devices.
- *
- * The eeh-force-off option disables EEH checking globally, for all slots.
- * Even if force-off is set, the EEH hardware is still enabled, so that
- * newer systems can boot.
+ * It's the platform's job to call this from an arch_initcall().
*/
-static int eeh_init(void)
+int eeh_init(struct eeh_ops *ops)
{
struct pci_controller *hose, *tmp;
int ret = 0;
+ /* the platform should only initialise EEH once */
+ if (WARN_ON(eeh_ops))
+ return -EEXIST;
+ if (WARN_ON(!ops))
+ return -ENOENT;
+ eeh_ops = ops;
+
/* Register reboot notifier */
ret = register_reboot_notifier(&eeh_reboot_nb);
if (ret) {
- pr_warn("%s: Failed to register notifier (%d)\n",
+ pr_warn("%s: Failed to register reboot notifier (%d)\n",
__func__, ret);
return ret;
}
- /* call platform initialization function */
- if (!eeh_ops) {
- pr_warn("%s: Platform EEH operation not found\n",
- __func__);
- return -EEXIST;
- } else if ((ret = eeh_ops->init()))
+ ret = bus_register_notifier(&pci_bus_type, &eeh_device_nb);
+ if (ret) {
+ pr_warn("%s: Failed to register bus notifier (%d)\n",
+ __func__, ret);
return ret;
+ }
/* Initialize PHB PEs */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
- eeh_dev_phb_init_dynamic(hose);
+ eeh_phb_pe_create(hose);
eeh_addr_cache_init();
@@ -1104,91 +988,45 @@ static int eeh_init(void)
return eeh_event_init();
}
-core_initcall_sync(eeh_init);
-
-/**
- * eeh_add_device_early - Enable EEH for the indicated device node
- * @pdn: PCI device node for which to set up EEH
- *
- * This routine must be used to perform EEH initialization for PCI
- * devices that were added after system boot (e.g. hotplug, dlpar).
- * This routine must be called before any i/o is performed to the
- * adapter (inluding any config-space i/o).
- * Whether this actually enables EEH or not for this device depends
- * on the CEC architecture, type of the device, on earlier boot
- * command-line arguments & etc.
- */
-void eeh_add_device_early(struct pci_dn *pdn)
-{
- struct pci_controller *phb = pdn ? pdn->phb : NULL;
- struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
-
- if (!edev)
- return;
-
- if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
- return;
-
- /* USB Bus children of PCI devices will not have BUID's */
- if (NULL == phb ||
- (eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid))
- return;
-
- eeh_ops->probe(pdn, NULL);
-}
-
/**
- * eeh_add_device_tree_early - Enable EEH for the indicated device
- * @pdn: PCI device node
- *
- * This routine must be used to perform EEH initialization for the
- * indicated PCI device that was added after system boot (e.g.
- * hotplug, dlpar).
- */
-void eeh_add_device_tree_early(struct pci_dn *pdn)
-{
- struct pci_dn *n;
-
- if (!pdn)
- return;
-
- list_for_each_entry(n, &pdn->child_list, list)
- eeh_add_device_tree_early(n);
- eeh_add_device_early(pdn);
-}
-EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
-
-/**
- * eeh_add_device_late - Perform EEH initialization for the indicated pci device
+ * eeh_probe_device() - Perform EEH initialization for the indicated pci device
* @dev: pci device for which to set up EEH
*
* This routine must be used to complete EEH initialization for PCI
* devices that were added after system boot (e.g. hotplug, dlpar).
*/
-void eeh_add_device_late(struct pci_dev *dev)
+void eeh_probe_device(struct pci_dev *dev)
{
- struct pci_dn *pdn;
struct eeh_dev *edev;
- if (!dev)
+ pr_debug("EEH: Adding device %s\n", pci_name(dev));
+
+ /*
+ * pci_dev_to_eeh_dev() can only work if eeh_probe_dev() was
+ * already called for this device.
+ */
+ if (WARN_ON_ONCE(pci_dev_to_eeh_dev(dev))) {
+ pci_dbg(dev, "Already bound to an eeh_dev!\n");
return;
+ }
- pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
- edev = pdn_to_eeh_dev(pdn);
- eeh_edev_dbg(edev, "Adding device\n");
- if (edev->pdev == dev) {
- eeh_edev_dbg(edev, "Device already referenced!\n");
+ edev = eeh_ops->probe(dev);
+ if (!edev) {
+ pr_debug("EEH: Adding device failed\n");
return;
}
/*
- * The EEH cache might not be removed correctly because of
- * unbalanced kref to the device during unplug time, which
- * relies on pcibios_release_device(). So we have to remove
- * that here explicitly.
+ * FIXME: We rely on pcibios_release_device() to remove the
+ * existing EEH state. The release function is only called if
+ * the pci_dev's refcount drops to zero so if something is
+ * keeping a ref to a device (e.g. a filesystem) we need to
+ * remove the old EEH state.
+ *
+ * FIXME: HEY MA, LOOK AT ME, NO LOCKING!
*/
- if (edev->pdev) {
- eeh_rmv_from_parent_pe(edev);
+ if (edev->pdev && edev->pdev != dev) {
+ eeh_pe_tree_remove(edev);
eeh_addr_cache_rmv_dev(edev->pdev);
eeh_sysfs_remove_device(edev->pdev);
@@ -1198,69 +1036,16 @@ void eeh_add_device_late(struct pci_dev *dev)
* into error handler afterwards.
*/
edev->mode |= EEH_DEV_NO_HANDLER;
-
- edev->pdev = NULL;
- dev->dev.archdata.edev = NULL;
}
- if (eeh_has_flag(EEH_PROBE_MODE_DEV))
- eeh_ops->probe(pdn, NULL);
-
+ /* bind the pdev and the edev together */
edev->pdev = dev;
dev->dev.archdata.edev = edev;
-
eeh_addr_cache_insert_dev(dev);
+ eeh_sysfs_add_device(dev);
}
/**
- * eeh_add_device_tree_late - Perform EEH initialization for the indicated PCI bus
- * @bus: PCI bus
- *
- * This routine must be used to perform EEH initialization for PCI
- * devices which are attached to the indicated PCI bus. The PCI bus
- * is added after system boot through hotplug or dlpar.
- */
-void eeh_add_device_tree_late(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- if (eeh_has_flag(EEH_FORCE_DISABLED))
- return;
- list_for_each_entry(dev, &bus->devices, bus_list) {
- eeh_add_device_late(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- struct pci_bus *subbus = dev->subordinate;
- if (subbus)
- eeh_add_device_tree_late(subbus);
- }
- }
-}
-EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
-
-/**
- * eeh_add_sysfs_files - Add EEH sysfs files for the indicated PCI bus
- * @bus: PCI bus
- *
- * This routine must be used to add EEH sysfs files for PCI
- * devices which are attached to the indicated PCI bus. The PCI bus
- * is added after system boot through hotplug or dlpar.
- */
-void eeh_add_sysfs_files(struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- eeh_sysfs_add_device(dev);
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- struct pci_bus *subbus = dev->subordinate;
- if (subbus)
- eeh_add_sysfs_files(subbus);
- }
- }
-}
-EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
-
-/**
* eeh_remove_device - Undo EEH setup for the indicated pci device
* @dev: pci device to be removed
*
@@ -1320,7 +1105,7 @@ void eeh_remove_device(struct pci_dev *dev)
edev->in_error = false;
dev->dev.archdata.edev = NULL;
if (!(edev->pe->state & EEH_PE_KEEP))
- eeh_rmv_from_parent_pe(edev);
+ eeh_pe_tree_remove(edev);
else
edev->mode |= EEH_DEV_DISCONNECTED;
}
@@ -1544,7 +1329,7 @@ int eeh_pe_set_option(struct eeh_pe *pe, int option)
/*
* EEH functionality could possibly be disabled, just
- * return error for the case. And the EEH functinality
+ * return error for the case. And the EEH functionality
* isn't expected to be disabled on one specific PE.
*/
switch (option) {
@@ -1668,6 +1453,7 @@ static int eeh_pe_reenable_devices(struct eeh_pe *pe, bool include_passed)
* eeh_pe_reset - Issue PE reset according to specified type
* @pe: EEH PE
* @option: reset type
+ * @include_passed: include passed-through devices?
*
* The routine is called to reset the specified PE with the
* indicated type, either fundamental reset or hot reset.
@@ -1739,12 +1525,12 @@ EXPORT_SYMBOL_GPL(eeh_pe_configure);
* eeh_pe_inject_err - Injecting the specified PCI error to the indicated PE
* @pe: the indicated PE
* @type: error type
- * @function: error function
+ * @func: error function
* @addr: address
* @mask: address mask
*
* The routine is called to inject the specified PCI error, which
- * is determined by @type and @function, to the indicated PE for
+ * is determined by @type and @func, to the indicated PE for
* testing purpose.
*/
int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
@@ -1770,6 +1556,7 @@ int eeh_pe_inject_err(struct eeh_pe *pe, int type, int func,
}
EXPORT_SYMBOL_GPL(eeh_pe_inject_err);
+#ifdef CONFIG_PROC_FS
static int proc_eeh_show(struct seq_file *m, void *v)
{
if (!eeh_enabled()) {
@@ -1796,8 +1583,38 @@ static int proc_eeh_show(struct seq_file *m, void *v)
return 0;
}
+#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_DEBUG_FS
+
+
+static struct pci_dev *eeh_debug_lookup_pdev(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ uint32_t domain, bus, dev, fn;
+ struct pci_dev *pdev;
+ char buf[20];
+ int ret;
+
+ memset(buf, 0, sizeof(buf));
+ ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
+ if (!ret)
+ return ERR_PTR(-EFAULT);
+
+ ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
+ if (ret != 4) {
+ pr_err("%s: expected 4 args, got %d\n", __func__, ret);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ return pdev;
+}
+
static int eeh_enable_dbgfs_set(void *data, u64 val)
{
if (val)
@@ -1854,7 +1671,7 @@ static ssize_t eeh_force_recover_write(struct file *filp,
return -ENODEV;
/* Retrieve PE */
- pe = eeh_pe_get(hose, pe_no, 0);
+ pe = eeh_pe_get(hose, pe_no);
if (!pe)
return -ENODEV;
@@ -1890,26 +1707,13 @@ static ssize_t eeh_dev_check_write(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- uint32_t domain, bus, dev, fn;
struct pci_dev *pdev;
struct eeh_dev *edev;
- char buf[20];
int ret;
- memset(buf, 0, sizeof(buf));
- ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
- if (!ret)
- return -EFAULT;
-
- ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
- if (ret != 4) {
- pr_err("%s: expected 4 args, got %d\n", __func__, ret);
- return -EINVAL;
- }
-
- pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
- if (!pdev)
- return -ENODEV;
+ pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
edev = pci_dev_to_eeh_dev(pdev);
if (!edev) {
@@ -1919,8 +1723,8 @@ static ssize_t eeh_dev_check_write(struct file *filp,
}
ret = eeh_dev_check_failure(edev);
- pci_info(pdev, "eeh_dev_check_failure(%04x:%02x:%02x.%01x) = %d\n",
- domain, bus, dev, fn, ret);
+ pci_info(pdev, "eeh_dev_check_failure(%s) = %d\n",
+ pci_name(pdev), ret);
pci_dev_put(pdev);
@@ -2000,7 +1804,7 @@ static int eeh_debugfs_break_device(struct pci_dev *pdev)
* PE freeze. Using the in_8() accessor skips the eeh detection hook
* so the freeze hook so the EEH Detection machinery won't be
* triggered here. This is to match the usual behaviour of EEH
- * where the HW will asyncronously freeze a PE and it's up to
+ * where the HW will asynchronously freeze a PE and it's up to
* the kernel to notice and deal with it.
*
* 3. Turn Memory space back on. This is more important for VFs
@@ -2031,25 +1835,12 @@ static ssize_t eeh_dev_break_write(struct file *filp,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
- uint32_t domain, bus, dev, fn;
struct pci_dev *pdev;
- char buf[20];
int ret;
- memset(buf, 0, sizeof(buf));
- ret = simple_write_to_buffer(buf, sizeof(buf)-1, ppos, user_buf, count);
- if (!ret)
- return -EFAULT;
-
- ret = sscanf(buf, "%x:%x:%x.%x", &domain, &bus, &dev, &fn);
- if (ret != 4) {
- pr_err("%s: expected 4 args, got %d\n", __func__, ret);
- return -EINVAL;
- }
-
- pdev = pci_get_domain_bus_and_slot(domain, bus, (dev << 3) | fn);
- if (!pdev)
- return -ENODEV;
+ pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
ret = eeh_debugfs_break_device(pdev);
pci_dev_put(pdev);
@@ -2067,6 +1858,53 @@ static const struct file_operations eeh_dev_break_fops = {
.read = eeh_debugfs_dev_usage,
};
+static ssize_t eeh_dev_can_recover(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_driver *drv;
+ struct pci_dev *pdev;
+ size_t ret;
+
+ pdev = eeh_debug_lookup_pdev(filp, user_buf, count, ppos);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ /*
+ * In order for error recovery to work the driver needs to implement
+ * .error_detected(), so it can quiesce IO to the device, and
+ * .slot_reset() so it can re-initialise the device after a reset.
+ *
+ * Ideally they'd implement .resume() too, but some drivers which
+ * we need to support (notably IPR) don't so I guess we can tolerate
+ * that.
+ *
+ * .mmio_enabled() is mostly there as a work-around for devices which
+ * take forever to re-init after a hot reset. Implementing that is
+ * strictly optional.
+ */
+ drv = pci_dev_driver(pdev);
+ if (drv &&
+ drv->err_handler &&
+ drv->err_handler->error_detected &&
+ drv->err_handler->slot_reset) {
+ ret = count;
+ } else {
+ ret = -EOPNOTSUPP;
+ }
+
+ pci_dev_put(pdev);
+
+ return ret;
+}
+
+static const struct file_operations eeh_dev_can_recover_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = eeh_dev_can_recover,
+ .read = eeh_debugfs_dev_usage,
+};
+
#endif
static int __init eeh_init_proc(void)
@@ -2075,22 +1913,25 @@ static int __init eeh_init_proc(void)
proc_create_single("powerpc/eeh", 0, NULL, proc_eeh_show);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file_unsafe("eeh_enable", 0600,
- powerpc_debugfs_root, NULL,
+ arch_debugfs_dir, NULL,
&eeh_enable_dbgfs_ops);
debugfs_create_u32("eeh_max_freezes", 0600,
- powerpc_debugfs_root, &eeh_max_freezes);
+ arch_debugfs_dir, &eeh_max_freezes);
debugfs_create_bool("eeh_disable_recovery", 0600,
- powerpc_debugfs_root,
+ arch_debugfs_dir,
&eeh_debugfs_no_recover);
debugfs_create_file_unsafe("eeh_dev_check", 0600,
- powerpc_debugfs_root, NULL,
+ arch_debugfs_dir, NULL,
&eeh_dev_check_fops);
debugfs_create_file_unsafe("eeh_dev_break", 0600,
- powerpc_debugfs_root, NULL,
+ arch_debugfs_dir, NULL,
&eeh_dev_break_fops);
debugfs_create_file_unsafe("eeh_force_recover", 0600,
- powerpc_debugfs_root, NULL,
+ arch_debugfs_dir, NULL,
&eeh_force_recover_fops);
+ debugfs_create_file_unsafe("eeh_dev_can_recover", 0600,
+ arch_debugfs_dir, NULL,
+ &eeh_dev_can_recover_fops);
eeh_cache_debugfs_init();
#endif
}
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index 6b50bf15d8c1..2f9dbf8ad2ee 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -12,8 +12,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
+#include <linux/debugfs.h>
#include <asm/pci-bridge.h>
-#include <asm/debugfs.h>
#include <asm/ppc-pci.h>
@@ -264,8 +264,9 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
{
struct pci_io_addr_range *piar;
struct rb_node *n;
+ unsigned long flags;
- spin_lock(&pci_io_addr_cache_root.piar_lock);
+ spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) {
piar = rb_entry(n, struct pci_io_addr_range, rb_node);
@@ -273,15 +274,15 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
(piar->flags & IORESOURCE_IO) ? "i/o" : "mem",
&piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev));
}
- spin_unlock(&pci_io_addr_cache_root.piar_lock);
+ spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(eeh_addr_cache);
-void eeh_cache_debugfs_init(void)
+void __init eeh_cache_debugfs_init(void)
{
debugfs_create_file_unsafe("eeh_address_cache", 0400,
- powerpc_debugfs_root, NULL,
+ arch_debugfs_dir, NULL,
&eeh_addr_cache_fops);
}
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
deleted file mode 100644
index 7370185c7a05..000000000000
--- a/arch/powerpc/kernel/eeh_dev.c
+++ /dev/null
@@ -1,67 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * The file intends to implement dynamic creation of EEH device, which will
- * be bound with OF node and PCI device simutaneously. The EEH devices would
- * be foundamental information for EEH core components to work proerly. Besides,
- * We have to support multiple situations where dynamic creation of EEH device
- * is required:
- *
- * 1) Before PCI emunation starts, we need create EEH devices according to the
- * PCI sensitive OF nodes.
- * 2) When PCI emunation is done, we need do the binding between PCI device and
- * the associated EEH device.
- * 3) DR (Dynamic Reconfiguration) would create PCI sensitive OF node. EEH device
- * will be created while PCI sensitive OF node is detected from DR.
- * 4) PCI hotplug needs redoing the binding between PCI device and EEH device. If
- * PHB is newly inserted, we also need create EEH devices accordingly.
- *
- * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
- */
-
-#include <linux/export.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-/**
- * eeh_dev_init - Create EEH device according to OF node
- * @pdn: PCI device node
- *
- * It will create EEH device according to the given OF node. The function
- * might be called by PCI emunation, DR, PHB hotplug.
- */
-struct eeh_dev *eeh_dev_init(struct pci_dn *pdn)
-{
- struct eeh_dev *edev;
-
- /* Allocate EEH device */
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return NULL;
-
- /* Associate EEH device with OF node */
- pdn->edev = edev;
- edev->pdn = pdn;
- edev->bdfn = (pdn->busno << 8) | pdn->devfn;
- edev->controller = pdn->phb;
-
- return edev;
-}
-
-/**
- * eeh_dev_phb_init_dynamic - Create EEH devices for devices included in PHB
- * @phb: PHB
- *
- * Scan the PHB OF node and its child association, then create the
- * EEH devices accordingly
- */
-void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
-{
- /* EEH PE for PHB */
- eeh_phb_pe_create(phb);
-}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 7b048cee767c..f279295179bd 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -16,7 +16,6 @@
#include <asm/eeh_event.h>
#include <asm/ppc-pci.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
struct eeh_rmv_data {
@@ -104,13 +103,13 @@ static bool eeh_edev_actionable(struct eeh_dev *edev)
*/
static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
{
- if (!pdev || !pdev->driver)
+ if (!pdev || !pdev->dev.driver)
return NULL;
- if (!try_module_get(pdev->driver->driver.owner))
+ if (!try_module_get(pdev->dev.driver->owner))
return NULL;
- return pdev->driver;
+ return to_pci_driver(pdev->dev.driver);
}
/**
@@ -122,10 +121,10 @@ static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
*/
static inline void eeh_pcid_put(struct pci_dev *pdev)
{
- if (!pdev || !pdev->driver)
+ if (!pdev || !pdev->dev.driver)
return;
- module_put(pdev->driver->driver.owner);
+ module_put(pdev->dev.driver->owner);
}
/**
@@ -214,7 +213,7 @@ static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata)
pci_save_state(pdev);
}
-static void eeh_set_channel_state(struct eeh_pe *root, enum pci_channel_state s)
+static void eeh_set_channel_state(struct eeh_pe *root, pci_channel_state_t s)
{
struct eeh_pe *pe;
struct eeh_dev *edev, *tmp;
@@ -425,8 +424,8 @@ static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev,
pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED);
#ifdef CONFIG_PCI_IOV
- if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
- eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
+ if (eeh_ops->notify_resume)
+ eeh_ops->notify_resume(edev);
#endif
return PCI_ERS_RESULT_NONE;
}
@@ -477,7 +476,7 @@ static void *eeh_add_virt_device(struct eeh_dev *edev)
}
#ifdef CONFIG_PCI_IOV
- pci_iov_add_virtfn(edev->physfn, eeh_dev_to_pdn(edev)->vf_index);
+ pci_iov_add_virtfn(edev->physfn, edev->vf_index);
#endif
return NULL;
}
@@ -521,9 +520,7 @@ static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
if (edev->physfn) {
#ifdef CONFIG_PCI_IOV
- struct pci_dn *pdn = eeh_dev_to_pdn(edev);
-
- pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
+ pci_iov_remove_virtfn(edev->physfn, edev->vf_index);
edev->pdev = NULL;
#endif
if (rmv_data)
@@ -544,7 +541,7 @@ static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata)
continue;
edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
- eeh_rmv_from_parent_pe(edev);
+ eeh_pe_tree_remove(edev);
}
return NULL;
@@ -753,7 +750,7 @@ static void eeh_pe_cleanup(struct eeh_pe *pe)
* @pdev: pci_dev to check
*
* This function may return a false positive if we can't determine the slot's
- * presence state. This might happen for for PCIe slots if the PE containing
+ * presence state. This might happen for PCIe slots if the PE containing
* the upstream bridge is also frozen, or the bridge is part of the same PE
* as the device.
*
@@ -907,18 +904,19 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
}
#endif /* CONFIG_STACKTRACE */
+ eeh_for_each_pe(pe, tmp_pe)
+ eeh_pe_for_each_dev(tmp_pe, edev, tmp)
+ edev->mode &= ~EEH_DEV_NO_HANDLER;
+
eeh_pe_update_time_stamp(pe);
pe->freeze_count++;
if (pe->freeze_count > eeh_max_freezes) {
pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
pe->phb->global_number, pe->addr,
pe->freeze_count);
- result = PCI_ERS_RESULT_DISCONNECT;
- }
- eeh_for_each_pe(pe, tmp_pe)
- eeh_pe_for_each_dev(tmp_pe, edev, tmp)
- edev->mode &= ~EEH_DEV_NO_HANDLER;
+ goto recover_failed;
+ }
/* Walk the various device drivers attached to this slot through
* a reset sequence, giving each an opportunity to do what it needs
@@ -930,39 +928,38 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
* the error. Override the result if necessary to have partially
* hotplug for this case.
*/
- if (result != PCI_ERS_RESULT_DISCONNECT) {
- pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
- pe->freeze_count, eeh_max_freezes);
- pr_info("EEH: Notify device drivers to shutdown\n");
- eeh_set_channel_state(pe, pci_channel_io_frozen);
- eeh_set_irq_state(pe, false);
- eeh_pe_report("error_detected(IO frozen)", pe,
- eeh_report_error, &result);
- if ((pe->type & EEH_PE_PHB) &&
- result != PCI_ERS_RESULT_NONE &&
- result != PCI_ERS_RESULT_NEED_RESET)
- result = PCI_ERS_RESULT_NEED_RESET;
- }
+ pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
+ pe->freeze_count, eeh_max_freezes);
+ pr_info("EEH: Notify device drivers to shutdown\n");
+ eeh_set_channel_state(pe, pci_channel_io_frozen);
+ eeh_set_irq_state(pe, false);
+ eeh_pe_report("error_detected(IO frozen)", pe,
+ eeh_report_error, &result);
+ if (result == PCI_ERS_RESULT_DISCONNECT)
+ goto recover_failed;
+
+ /*
+ * Error logged on a PHB are always fences which need a full
+ * PHB reset to clear so force that to happen.
+ */
+ if ((pe->type & EEH_PE_PHB) && result != PCI_ERS_RESULT_NONE)
+ result = PCI_ERS_RESULT_NEED_RESET;
/* Get the current PCI slot state. This can take a long time,
* sometimes over 300 seconds for certain systems.
*/
- if (result != PCI_ERS_RESULT_DISCONNECT) {
- rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
- if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
- pr_warn("EEH: Permanent failure\n");
- result = PCI_ERS_RESULT_DISCONNECT;
- }
+ rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY * 1000);
+ if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
+ pr_warn("EEH: Permanent failure\n");
+ goto recover_failed;
}
/* Since rtas may enable MMIO when posting the error log,
* don't post the error log until after all dev drivers
* have been informed.
*/
- if (result != PCI_ERS_RESULT_DISCONNECT) {
- pr_info("EEH: Collect temporary log\n");
- eeh_slot_error_detail(pe, EEH_LOG_TEMP);
- }
+ pr_info("EEH: Collect temporary log\n");
+ eeh_slot_error_detail(pe, EEH_LOG_TEMP);
/* If all device drivers were EEH-unaware, then shut
* down all of the device drivers, and hope they
@@ -972,9 +969,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
pr_info("EEH: Reset with hotplug activity\n");
rc = eeh_reset_device(pe, bus, NULL, false);
if (rc) {
- pr_warn("%s: Unable to reset, err=%d\n",
- __func__, rc);
- result = PCI_ERS_RESULT_DISCONNECT;
+ pr_warn("%s: Unable to reset, err=%d\n", __func__, rc);
+ goto recover_failed;
}
}
@@ -982,10 +978,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
pr_info("EEH: Enable I/O for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
+ if (rc < 0)
+ goto recover_failed;
- if (rc < 0) {
- result = PCI_ERS_RESULT_DISCONNECT;
- } else if (rc) {
+ if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
pr_info("EEH: Notify device drivers to resume I/O\n");
@@ -993,15 +989,13 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
eeh_report_mmio_enabled, &result);
}
}
-
- /* If all devices reported they can proceed, then re-enable DMA */
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
pr_info("EEH: Enabled DMA for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
+ if (rc < 0)
+ goto recover_failed;
- if (rc < 0) {
- result = PCI_ERS_RESULT_DISCONNECT;
- } else if (rc) {
+ if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
/*
@@ -1019,16 +1013,15 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
pr_info("EEH: Reset without hotplug activity\n");
rc = eeh_reset_device(pe, bus, &rmv_data, true);
if (rc) {
- pr_warn("%s: Cannot reset, err=%d\n",
- __func__, rc);
- result = PCI_ERS_RESULT_DISCONNECT;
- } else {
- result = PCI_ERS_RESULT_NONE;
- eeh_set_channel_state(pe, pci_channel_io_normal);
- eeh_set_irq_state(pe, true);
- eeh_pe_report("slot_reset", pe, eeh_report_reset,
- &result);
+ pr_warn("%s: Cannot reset, err=%d\n", __func__, rc);
+ goto recover_failed;
}
+
+ result = PCI_ERS_RESULT_NONE;
+ eeh_set_channel_state(pe, pci_channel_io_normal);
+ eeh_set_irq_state(pe, true);
+ eeh_pe_report("slot_reset", pe, eeh_report_reset,
+ &result);
}
if ((result == PCI_ERS_RESULT_RECOVERED) ||
@@ -1056,45 +1049,47 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
}
pr_info("EEH: Recovery successful.\n");
- } else {
- /*
- * About 90% of all real-life EEH failures in the field
- * are due to poorly seated PCI cards. Only 10% or so are
- * due to actual, failed cards.
- */
- pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
- "Please try reseating or replacing it\n",
- pe->phb->global_number, pe->addr);
+ goto out;
+ }
- eeh_slot_error_detail(pe, EEH_LOG_PERM);
+recover_failed:
+ /*
+ * About 90% of all real-life EEH failures in the field
+ * are due to poorly seated PCI cards. Only 10% or so are
+ * due to actual, failed cards.
+ */
+ pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
+ "Please try reseating or replacing it\n",
+ pe->phb->global_number, pe->addr);
- /* Notify all devices that they're about to go down. */
- eeh_set_channel_state(pe, pci_channel_io_perm_failure);
- eeh_set_irq_state(pe, false);
- eeh_pe_report("error_detected(permanent failure)", pe,
- eeh_report_failure, NULL);
+ eeh_slot_error_detail(pe, EEH_LOG_PERM);
- /* Mark the PE to be removed permanently */
- eeh_pe_state_mark(pe, EEH_PE_REMOVED);
+ /* Notify all devices that they're about to go down. */
+ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
+ eeh_set_irq_state(pe, false);
+ eeh_pe_report("error_detected(permanent failure)", pe,
+ eeh_report_failure, NULL);
- /*
- * Shut down the device drivers for good. We mark
- * all removed devices correctly to avoid access
- * the their PCI config any more.
- */
- if (pe->type & EEH_PE_VF) {
- eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
- eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
- } else {
- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
- eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+ /* Mark the PE to be removed permanently */
+ eeh_pe_state_mark(pe, EEH_PE_REMOVED);
- pci_lock_rescan_remove();
- pci_hp_remove_devices(bus);
- pci_unlock_rescan_remove();
- /* The passed PE should no longer be used */
- return;
- }
+ /*
+ * Shut down the device drivers for good. We mark
+ * all removed devices correctly to avoid access
+ * the their PCI config any more.
+ */
+ if (pe->type & EEH_PE_VF) {
+ eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
+ eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+ } else {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
+ eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+
+ pci_lock_rescan_remove();
+ pci_hp_remove_devices(bus);
+ pci_unlock_rescan_remove();
+ /* The passed PE should no longer be used */
+ return;
}
out:
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index a7a8dc182efb..c23a454af08a 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -143,7 +143,7 @@ int __eeh_send_failure_event(struct eeh_pe *pe)
int eeh_send_failure_event(struct eeh_pe *pe)
{
/*
- * If we've manually supressed recovery events via debugfs
+ * If we've manually suppressed recovery events via debugfs
* then just drop it on the floor.
*/
if (eeh_debugfs_no_recover) {
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 177852e39a25..d2873d17d2b1 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -13,6 +13,7 @@
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/string.h>
@@ -251,43 +252,21 @@ void eeh_pe_dev_traverse(struct eeh_pe *root,
/**
* __eeh_pe_get - Check the PE address
- * @data: EEH PE
- * @flag: EEH device
*
* For one particular PE, it can be identified by PE address
* or tranditional BDF address. BDF address is composed of
* Bus/Device/Function number. The extra data referred by flag
* indicates which type of address should be used.
*/
-struct eeh_pe_get_flag {
- int pe_no;
- int config_addr;
-};
-
static void *__eeh_pe_get(struct eeh_pe *pe, void *flag)
{
- struct eeh_pe_get_flag *tmp = (struct eeh_pe_get_flag *) flag;
+ int *target_pe = flag;
- /* Unexpected PHB PE */
+ /* PHB PEs are special and should be ignored */
if (pe->type & EEH_PE_PHB)
return NULL;
- /*
- * We prefer PE address. For most cases, we should
- * have non-zero PE address
- */
- if (eeh_has_flag(EEH_VALID_PE_ZERO)) {
- if (tmp->pe_no == pe->addr)
- return pe;
- } else {
- if (tmp->pe_no &&
- (tmp->pe_no == pe->addr))
- return pe;
- }
-
- /* Try BDF address */
- if (tmp->config_addr &&
- (tmp->config_addr == pe->config_addr))
+ if (*target_pe == pe->addr)
return pe;
return NULL;
@@ -297,7 +276,6 @@ static void *__eeh_pe_get(struct eeh_pe *pe, void *flag)
* eeh_pe_get - Search PE based on the given address
* @phb: PCI controller
* @pe_no: PE number
- * @config_addr: Config address
*
* Search the corresponding PE based on the specified address which
* is included in the eeh device. The function is used to check if
@@ -306,75 +284,30 @@ static void *__eeh_pe_get(struct eeh_pe *pe, void *flag)
* which is composed of PCI bus/device/function number, or unified
* PE address.
*/
-struct eeh_pe *eeh_pe_get(struct pci_controller *phb,
- int pe_no, int config_addr)
+struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no)
{
struct eeh_pe *root = eeh_phb_pe_get(phb);
- struct eeh_pe_get_flag tmp = { pe_no, config_addr };
- struct eeh_pe *pe;
-
- pe = eeh_pe_traverse(root, __eeh_pe_get, &tmp);
- return pe;
+ return eeh_pe_traverse(root, __eeh_pe_get, &pe_no);
}
/**
- * eeh_pe_get_parent - Retrieve the parent PE
+ * eeh_pe_tree_insert - Add EEH device to parent PE
* @edev: EEH device
+ * @new_pe_parent: PE to create additional PEs under
*
- * The whole PEs existing in the system are organized as hierarchy
- * tree. The function is used to retrieve the parent PE according
- * to the parent EEH device.
- */
-static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev)
-{
- struct eeh_dev *parent;
- struct pci_dn *pdn = eeh_dev_to_pdn(edev);
-
- /*
- * It might have the case for the indirect parent
- * EEH device already having associated PE, but
- * the direct parent EEH device doesn't have yet.
- */
- if (edev->physfn)
- pdn = pci_get_pdn(edev->physfn);
- else
- pdn = pdn ? pdn->parent : NULL;
- while (pdn) {
- /* We're poking out of PCI territory */
- parent = pdn_to_eeh_dev(pdn);
- if (!parent)
- return NULL;
-
- if (parent->pe)
- return parent->pe;
-
- pdn = pdn->parent;
- }
-
- return NULL;
-}
-
-/**
- * eeh_add_to_parent_pe - Add EEH device to parent PE
- * @edev: EEH device
+ * Add EEH device to the PE in edev->pe_config_addr. If a PE already
+ * exists with that address then @edev is added to that PE. Otherwise
+ * a new PE is created and inserted into the PE tree as a child of
+ * @new_pe_parent.
*
- * Add EEH device to the parent PE. If the parent PE already
- * exists, the PE type will be changed to EEH_PE_BUS. Otherwise,
- * we have to create new PE to hold the EEH device and the new
- * PE will be linked to its parent PE as well.
+ * If @new_pe_parent is NULL then the new PE will be inserted under
+ * directly under the PHB.
*/
-int eeh_add_to_parent_pe(struct eeh_dev *edev)
+int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent)
{
+ struct pci_controller *hose = edev->controller;
struct eeh_pe *pe, *parent;
- struct pci_dn *pdn = eeh_dev_to_pdn(edev);
- int config_addr = (pdn->busno << 8) | (pdn->devfn);
-
- /* Check if the PE number is valid */
- if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) {
- eeh_edev_err(edev, "PE#0 is invalid for this PHB!\n");
- return -EINVAL;
- }
/*
* Search the PE has been existing or not according
@@ -382,7 +315,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
* PE should be composed of PCI bus and its subordinate
* components.
*/
- pe = eeh_pe_get(pdn->phb, edev->pe_config_addr, config_addr);
+ pe = eeh_pe_get(hose, edev->pe_config_addr);
if (pe) {
if (pe->type & EEH_PE_INVALID) {
list_add_tail(&edev->entry, &pe->edevs);
@@ -399,8 +332,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
parent = parent->parent;
}
- eeh_edev_dbg(edev,
- "Added to device PE (parent: PE#%x)\n",
+ eeh_edev_dbg(edev, "Added to existing PE (parent: PE#%x)\n",
pe->parent->addr);
} else {
/* Mark the PE as type of PCI bus */
@@ -416,15 +348,15 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
/* Create a new EEH PE */
if (edev->physfn)
- pe = eeh_pe_alloc(pdn->phb, EEH_PE_VF);
+ pe = eeh_pe_alloc(hose, EEH_PE_VF);
else
- pe = eeh_pe_alloc(pdn->phb, EEH_PE_DEVICE);
+ pe = eeh_pe_alloc(hose, EEH_PE_DEVICE);
if (!pe) {
pr_err("%s: out of memory!\n", __func__);
return -ENOMEM;
}
- pe->addr = edev->pe_config_addr;
- pe->config_addr = config_addr;
+
+ pe->addr = edev->pe_config_addr;
/*
* Put the new EEH PE into hierarchy tree. If the parent
@@ -432,34 +364,35 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
* to PHB directly. Otherwise, we have to associate the
* PE with its parent.
*/
- parent = eeh_pe_get_parent(edev);
- if (!parent) {
- parent = eeh_phb_pe_get(pdn->phb);
- if (!parent) {
+ if (!new_pe_parent) {
+ new_pe_parent = eeh_phb_pe_get(hose);
+ if (!new_pe_parent) {
pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
- __func__, pdn->phb->global_number);
+ __func__, hose->global_number);
edev->pe = NULL;
kfree(pe);
return -EEXIST;
}
}
- pe->parent = parent;
+
+ /* link new PE into the tree */
+ pe->parent = new_pe_parent;
+ list_add_tail(&pe->child, &new_pe_parent->child_list);
/*
* Put the newly created PE into the child list and
* link the EEH device accordingly.
*/
- list_add_tail(&pe->child, &parent->child_list);
list_add_tail(&edev->entry, &pe->edevs);
edev->pe = pe;
- eeh_edev_dbg(edev, "Added to device PE (parent: PE#%x)\n",
- pe->parent->addr);
+ eeh_edev_dbg(edev, "Added to new (parent: PE#%x)\n",
+ new_pe_parent->addr);
return 0;
}
/**
- * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE
+ * eeh_pe_tree_remove - Remove one EEH device from the associated PE
* @edev: EEH device
*
* The PE hierarchy tree might be changed when doing PCI hotplug.
@@ -467,7 +400,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
* during EEH recovery. So we have to call the function remove the
* corresponding PE accordingly if necessary.
*/
-int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
+int eeh_pe_tree_remove(struct eeh_dev *edev)
{
struct eeh_pe *pe, *parent, *child;
bool keep, recover;
@@ -698,7 +631,6 @@ void eeh_pe_state_clear(struct eeh_pe *root, int state, bool include_passed)
*/
static void eeh_bridge_check_link(struct eeh_dev *edev)
{
- struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int cap;
uint32_t val;
int timeout = 0;
@@ -714,32 +646,32 @@ static void eeh_bridge_check_link(struct eeh_dev *edev)
/* Check slot status */
cap = edev->pcie_cap;
- eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val);
+ eeh_ops->read_config(edev, cap + PCI_EXP_SLTSTA, 2, &val);
if (!(val & PCI_EXP_SLTSTA_PDS)) {
eeh_edev_dbg(edev, "No card in the slot (0x%04x) !\n", val);
return;
}
/* Check power status if we have the capability */
- eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val);
+ eeh_ops->read_config(edev, cap + PCI_EXP_SLTCAP, 2, &val);
if (val & PCI_EXP_SLTCAP_PCP) {
- eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val);
+ eeh_ops->read_config(edev, cap + PCI_EXP_SLTCTL, 2, &val);
if (val & PCI_EXP_SLTCTL_PCC) {
eeh_edev_dbg(edev, "In power-off state, power it on ...\n");
val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
- eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val);
+ eeh_ops->write_config(edev, cap + PCI_EXP_SLTCTL, 2, val);
msleep(2 * 1000);
}
}
/* Enable link */
- eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val);
+ eeh_ops->read_config(edev, cap + PCI_EXP_LNKCTL, 2, &val);
val &= ~PCI_EXP_LNKCTL_LD;
- eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val);
+ eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val);
/* Check link */
- eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val);
+ eeh_ops->read_config(edev, cap + PCI_EXP_LNKCAP, 4, &val);
if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
eeh_edev_dbg(edev, "No link reporting capability (0x%08x) \n", val);
msleep(1000);
@@ -752,7 +684,7 @@ static void eeh_bridge_check_link(struct eeh_dev *edev)
msleep(20);
timeout += 20;
- eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val);
+ eeh_ops->read_config(edev, cap + PCI_EXP_LNKSTA, 2, &val);
if (val & PCI_EXP_LNKSTA_DLLLA)
break;
}
@@ -769,7 +701,6 @@ static void eeh_bridge_check_link(struct eeh_dev *edev)
static void eeh_restore_bridge_bars(struct eeh_dev *edev)
{
- struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int i;
/*
@@ -777,20 +708,20 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
* Bus numbers and windows: 0x18 - 0x30
*/
for (i = 4; i < 13; i++)
- eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]);
+ eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
/* Rom: 0x38 */
- eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]);
+ eeh_ops->write_config(edev, 14*4, 4, edev->config_space[14]);
/* Cache line & Latency timer: 0xC 0xD */
- eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
+ eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
- eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1,
- SAVED_BYTE(PCI_LATENCY_TIMER));
+ eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
+ SAVED_BYTE(PCI_LATENCY_TIMER));
/* Max latency, min grant, interrupt ping and line: 0x3C */
- eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
+ eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */
- eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
+ eeh_ops->write_config(edev, PCI_COMMAND, 4, edev->config_space[1] |
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
/* Check the PCIe link is ready */
@@ -799,28 +730,27 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
static void eeh_restore_device_bars(struct eeh_dev *edev)
{
- struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int i;
u32 cmd;
for (i = 4; i < 10; i++)
- eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]);
+ eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
/* 12 == Expansion ROM Address */
- eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]);
+ eeh_ops->write_config(edev, 12*4, 4, edev->config_space[12]);
- eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
+ eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
- eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1,
+ eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* max latency, min grant, interrupt pin and line */
- eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
+ eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
/*
* Restore PERR & SERR bits, some devices require it,
* don't touch the other command bits
*/
- eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd);
+ eeh_ops->read_config(edev, PCI_COMMAND, 4, &cmd);
if (edev->config_space[1] & PCI_COMMAND_PARITY)
cmd |= PCI_COMMAND_PARITY;
else
@@ -829,7 +759,7 @@ static void eeh_restore_device_bars(struct eeh_dev *edev)
cmd |= PCI_COMMAND_SERR;
else
cmd &= ~PCI_COMMAND_SERR;
- eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd);
+ eeh_ops->write_config(edev, PCI_COMMAND, 4, cmd);
}
/**
@@ -843,16 +773,14 @@ static void eeh_restore_device_bars(struct eeh_dev *edev)
*/
static void eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag)
{
- struct pci_dn *pdn = eeh_dev_to_pdn(edev);
-
/* Do special restore for bridges */
if (edev->mode & EEH_DEV_BRIDGE)
eeh_restore_bridge_bars(edev);
else
eeh_restore_device_bars(edev);
- if (eeh_ops->restore_config && pdn)
- eeh_ops->restore_config(pdn);
+ if (eeh_ops->restore_config)
+ eeh_ops->restore_config(edev);
}
/**
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index 4fb0f1e1017a..706e1eb95efe 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -6,6 +6,7 @@
*
* Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
*/
+#include <linux/of.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <asm/ppc-pci.h>
@@ -99,7 +100,7 @@ static ssize_t eeh_notify_resume_store(struct device *dev,
if (!edev || !edev->pe || !eeh_ops->notify_resume)
return -ENODEV;
- if (eeh_ops->notify_resume(pci_get_pdn(pdev)))
+ if (eeh_ops->notify_resume(edev))
return -EIO;
return count;
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 16af0d8d90a8..3fc7c9886bb7 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -28,256 +28,37 @@
#include <asm/unistd.h>
#include <asm/ptrace.h>
#include <asm/export.h>
-#include <asm/asm-405.h>
#include <asm/feature-fixups.h>
#include <asm/barrier.h>
#include <asm/kup.h>
#include <asm/bug.h>
+#include <asm/interrupt.h>
#include "head_32.h"
/*
+ * powerpc relies on return from interrupt/syscall being context synchronising
+ * (which rfi is) to support ARCH_HAS_MEMBARRIER_SYNC_CORE without additional
+ * synchronisation instructions.
+ */
+
+/*
* Align to 4k in order to ensure that all functions modyfing srr0/srr1
* fit into one page in order to not encounter a TLB miss between the
* modification of srr0/srr1 and the associated rfi.
*/
.align 12
-#ifdef CONFIG_BOOKE
- .globl mcheck_transfer_to_handler
-mcheck_transfer_to_handler:
- mfspr r0,SPRN_DSRR0
- stw r0,_DSRR0(r11)
- mfspr r0,SPRN_DSRR1
- stw r0,_DSRR1(r11)
- /* fall through */
-
- .globl debug_transfer_to_handler
-debug_transfer_to_handler:
- mfspr r0,SPRN_CSRR0
- stw r0,_CSRR0(r11)
- mfspr r0,SPRN_CSRR1
- stw r0,_CSRR1(r11)
- /* fall through */
-
- .globl crit_transfer_to_handler
-crit_transfer_to_handler:
-#ifdef CONFIG_PPC_BOOK3E_MMU
- mfspr r0,SPRN_MAS0
- stw r0,MAS0(r11)
- mfspr r0,SPRN_MAS1
- stw r0,MAS1(r11)
- mfspr r0,SPRN_MAS2
- stw r0,MAS2(r11)
- mfspr r0,SPRN_MAS3
- stw r0,MAS3(r11)
- mfspr r0,SPRN_MAS6
- stw r0,MAS6(r11)
-#ifdef CONFIG_PHYS_64BIT
- mfspr r0,SPRN_MAS7
- stw r0,MAS7(r11)
-#endif /* CONFIG_PHYS_64BIT */
-#endif /* CONFIG_PPC_BOOK3E_MMU */
-#ifdef CONFIG_44x
- mfspr r0,SPRN_MMUCR
- stw r0,MMUCR(r11)
-#endif
- mfspr r0,SPRN_SRR0
- stw r0,_SRR0(r11)
- mfspr r0,SPRN_SRR1
- stw r0,_SRR1(r11)
-
- /* set the stack limit to the current stack */
- mfspr r8,SPRN_SPRG_THREAD
- lwz r0,KSP_LIMIT(r8)
- stw r0,SAVED_KSP_LIMIT(r11)
- rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
- stw r0,KSP_LIMIT(r8)
- /* fall through */
-#endif
-
-#ifdef CONFIG_40x
- .globl crit_transfer_to_handler
-crit_transfer_to_handler:
- lwz r0,crit_r10@l(0)
- stw r0,GPR10(r11)
- lwz r0,crit_r11@l(0)
- stw r0,GPR11(r11)
- mfspr r0,SPRN_SRR0
- stw r0,crit_srr0@l(0)
- mfspr r0,SPRN_SRR1
- stw r0,crit_srr1@l(0)
-
- /* set the stack limit to the current stack */
- mfspr r8,SPRN_SPRG_THREAD
- lwz r0,KSP_LIMIT(r8)
- stw r0,saved_ksp_limit@l(0)
- rlwinm r0,r1,0,0,(31 - THREAD_SHIFT)
- stw r0,KSP_LIMIT(r8)
- /* fall through */
-#endif
-
-/*
- * This code finishes saving the registers to the exception frame
- * and jumps to the appropriate handler for the exception, turning
- * on address translation.
- * Note that we rely on the caller having set cr0.eq iff the exception
- * occurred in kernel mode (i.e. MSR:PR = 0).
- */
- .globl transfer_to_handler_full
-transfer_to_handler_full:
- SAVE_NVGPRS(r11)
- /* fall through */
-
- .globl transfer_to_handler
-transfer_to_handler:
- stw r2,GPR2(r11)
- stw r12,_NIP(r11)
- stw r9,_MSR(r11)
- andi. r2,r9,MSR_PR
- mfctr r12
- mfspr r2,SPRN_XER
- stw r12,_CTR(r11)
- stw r2,_XER(r11)
- mfspr r12,SPRN_SPRG_THREAD
- tovirt_vmstack r12, r12
- beq 2f /* if from user, fix up THREAD.regs */
- addi r2, r12, -THREAD
- addi r11,r1,STACK_FRAME_OVERHEAD
- stw r11,PT_REGS(r12)
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
- /* Check to see if the dbcr0 register is set up to debug. Use the
- internal debug mode bit to do this. */
- lwz r12,THREAD_DBCR0(r12)
- andis. r12,r12,DBCR0_IDM@h
-#endif
- ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
-#ifdef CONFIG_PPC_BOOK3S_32
- kuep_lock r11, r12
-#endif
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
- beq+ 3f
- /* From user and task is ptraced - load up global dbcr0 */
- li r12,-1 /* clear all pending debug events */
- mtspr SPRN_DBSR,r12
- lis r11,global_dbcr0@ha
- tophys(r11,r11)
- addi r11,r11,global_dbcr0@l
-#ifdef CONFIG_SMP
- lwz r9,TASK_CPU(r2)
- slwi r9,r9,3
- add r11,r11,r9
-#endif
- lwz r12,0(r11)
- mtspr SPRN_DBCR0,r12
- lwz r12,4(r11)
- addi r12,r12,-1
- stw r12,4(r11)
-#endif
-
- b 3f
-
-2: /* if from kernel, check interrupted DOZE/NAP mode and
- * check for stack overflow
- */
- kuap_save_and_lock r11, r12, r9, r2, r6
- addi r2, r12, -THREAD
-#ifndef CONFIG_VMAP_STACK
- lwz r9,KSP_LIMIT(r12)
- cmplw r1,r9 /* if r1 <= ksp_limit */
- ble- stack_ovf /* then the kernel stack overflowed */
-#endif
-5:
-#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_E500)
+ .globl prepare_transfer_to_handler
+prepare_transfer_to_handler:
+ /* if from kernel, check interrupted DOZE/NAP mode */
lwz r12,TI_LOCAL_FLAGS(r2)
mtcrf 0x01,r12
bt- 31-TLF_NAPPING,4f
bt- 31-TLF_SLEEPING,7f
-#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
- .globl transfer_to_handler_cont
-transfer_to_handler_cont:
-3:
- mflr r9
- tovirt_novmstack r2, r2 /* set r2 to current */
- tovirt_vmstack r9, r9
- lwz r11,0(r9) /* virtual address of handler */
- lwz r9,4(r9) /* where to go when done */
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
- mtspr SPRN_NRI, r0
-#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * When tracing IRQ state (lockdep) we enable the MMU before we call
- * the IRQ tracing functions as they might access vmalloc space or
- * perform IOs for console output.
- *
- * To speed up the syscall path where interrupts stay on, let's check
- * first if we are changing the MSR value at all.
- */
- tophys_novmstack r12, r1
- lwz r12,_MSR(r12)
- andi. r12,r12,MSR_EE
- bne 1f
-
- /* MSR isn't changing, just transition directly */
-#endif
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r10
- mtlr r9
- SYNC
- RFI /* jump to handler, enable MMU */
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-1: /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
- * keep interrupts disabled at this point otherwise we might risk
- * taking an interrupt before we tell lockdep they are enabled.
- */
- lis r12,reenable_mmu@h
- ori r12,r12,reenable_mmu@l
- LOAD_REG_IMMEDIATE(r0, MSR_KERNEL)
- mtspr SPRN_SRR0,r12
- mtspr SPRN_SRR1,r0
- SYNC
- RFI
-
-reenable_mmu:
- /*
- * We save a bunch of GPRs,
- * r3 can be different from GPR3(r1) at this point, r9 and r11
- * contains the old MSR and handler address respectively,
- * r4 & r5 can contain page fault arguments that need to be passed
- * along as well. r12, CCR, CTR, XER etc... are left clobbered as
- * they aren't useful past this point (aren't syscall arguments),
- * the rest is restored from the exception frame.
- */
-
- stwu r1,-32(r1)
- stw r9,8(r1)
- stw r11,12(r1)
- stw r3,16(r1)
- stw r4,20(r1)
- stw r5,24(r1)
-
- /* If we are disabling interrupts (normal case), simply log it with
- * lockdep
- */
-1: bl trace_hardirqs_off
-2: lwz r5,24(r1)
- lwz r4,20(r1)
- lwz r3,16(r1)
- lwz r11,12(r1)
- lwz r9,8(r1)
- addi r1,r1,32
- lwz r0,GPR0(r1)
- lwz r6,GPR6(r1)
- lwz r7,GPR7(r1)
- lwz r8,GPR8(r1)
- mtctr r11
- mtlr r9
- bctr /* jump to handler */
-#endif /* CONFIG_TRACE_IRQFLAGS */
+ blr
-#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
4: rlwinm r12,r12,0,~_TLF_NAPPING
stw r12,TI_LOCAL_FLAGS(r2)
b power_save_ppc32_restore
@@ -287,186 +68,107 @@ reenable_mmu:
lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
rlwinm r9,r9,0,~MSR_EE
lwz r12,_LINK(r11) /* and return to address in LR */
- kuap_restore r11, r2, r3, r4, r5
- lwz r2, GPR2(r11)
+ REST_GPR(2, r11)
b fast_exception_return
-#endif
-
-#ifndef CONFIG_VMAP_STACK
-/*
- * On kernel stack overflow, load up an initial stack pointer
- * and call StackOverflow(regs), which should not return.
- */
-stack_ovf:
- /* sometimes we use a statically-allocated stack, which is OK. */
- lis r12,_end@h
- ori r12,r12,_end@l
- cmplw r1,r12
- ble 5b /* r1 <= &_end is OK */
- SAVE_NVGPRS(r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- lis r1,init_thread_union@ha
- addi r1,r1,init_thread_union@l
- addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
- lis r9,StackOverflow@ha
- addi r9,r9,StackOverflow@l
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
- mtspr SPRN_NRI, r0
-#endif
- mtspr SPRN_SRR0,r9
- mtspr SPRN_SRR1,r10
- SYNC
- RFI
-#endif
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-trace_syscall_entry_irq_off:
- /*
- * Syscall shouldn't happen while interrupts are disabled,
- * so let's do a warning here.
- */
-0: trap
- EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
- bl trace_hardirqs_on
+_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
+#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_PPC_E500 */
+
+#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
+ .globl __kuep_lock
+__kuep_lock:
+ lwz r9, THREAD+THSR0(r2)
+ update_user_segments_by_4 r9, r10, r11, r12
+ blr
- /* Now enable for real */
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
- mtmsr r10
+__kuep_unlock:
+ lwz r9, THREAD+THSR0(r2)
+ rlwinm r9,r9,0,~SR_NX
+ update_user_segments_by_4 r9, r10, r11, r12
+ blr
- REST_GPR(0, r1)
- REST_4GPRS(3, r1)
- REST_2GPRS(7, r1)
- b DoSyscall
-#endif /* CONFIG_TRACE_IRQFLAGS */
+.macro kuep_lock
+ bl __kuep_lock
+.endm
+.macro kuep_unlock
+ bl __kuep_unlock
+.endm
+#else
+.macro kuep_lock
+.endm
+.macro kuep_unlock
+.endm
+#endif
.globl transfer_to_syscall
transfer_to_syscall:
-#ifdef CONFIG_TRACE_IRQFLAGS
- andi. r12,r9,MSR_EE
- beq- trace_syscall_entry_irq_off
-#endif /* CONFIG_TRACE_IRQFLAGS */
-
-/*
- * Handle a system call.
- */
- .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
- .stabs "entry_32.S",N_SO,0,0,0f
-0:
-
-_GLOBAL(DoSyscall)
- stw r3,ORIG_GPR3(r1)
- li r12,0
- stw r12,RESULT(r1)
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Make sure interrupts are enabled */
- mfmsr r11
- andi. r12,r11,MSR_EE
- /* We came in with interrupts disabled, we WARN and mark them enabled
- * for lockdep now */
-0: tweqi r12, 0
- EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
-#endif /* CONFIG_TRACE_IRQFLAGS */
- lwz r11,TI_FLAGS(r2)
- andi. r11,r11,_TIF_SYSCALL_DOTRACE
- bne- syscall_dotrace
-syscall_dotrace_cont:
- cmplwi 0,r0,NR_syscalls
- lis r10,sys_call_table@h
- ori r10,r10,sys_call_table@l
- slwi r0,r0,2
- bge- 66f
+ stw r3, ORIG_GPR3(r1)
+ stw r11, GPR1(r1)
+ stw r11, 0(r1)
+ mflr r12
+ stw r12, _LINK(r1)
+#ifdef CONFIG_BOOKE_OR_40x
+ rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+#endif
+ lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
+ SAVE_GPR(2, r1)
+ addi r12,r12,STACK_FRAME_REGS_MARKER@l
+ stw r9,_MSR(r1)
+ li r2, INTERRUPT_SYSCALL
+ stw r12,8(r1)
+ stw r2,_TRAP(r1)
+ SAVE_GPR(0, r1)
+ SAVE_GPRS(3, 8, r1)
+ addi r2,r10,-THREAD
+ SAVE_NVGPRS(r1)
+ kuep_lock
- barrier_nospec_asm
- /*
- * Prevent the load of the handler below (based on the user-passed
- * system call number) being speculatively executed until the test
- * against NR_syscalls and branch to .66f above has
- * committed.
- */
+ /* Calling convention has r3 = regs, r4 = orig r0 */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ mr r4,r0
+ bl system_call_exception
- lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
- mtlr r10
- addi r9,r1,STACK_FRAME_OVERHEAD
- PPC440EP_ERR42
- blrl /* Call handler */
- .globl ret_from_syscall
ret_from_syscall:
-#ifdef CONFIG_DEBUG_RSEQ
- /* Check whether the syscall is issued inside a restartable sequence */
- stw r3,GPR3(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl rseq_syscall
- lwz r3,GPR3(r1)
-#endif
- mr r6,r3
- /* disable interrupts so current_thread_info()->flags can't change */
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */
- /* Note: We don't bother telling lockdep about it */
- SYNC
- mtmsr r10
- lwz r9,TI_FLAGS(r2)
- li r8,-MAX_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
- bne- syscall_exit_work
- cmplw 0,r3,r8
- blt+ syscall_exit_cont
- lwz r11,_CCR(r1) /* Load CR */
- neg r3,r3
- oris r11,r11,0x1000 /* Set SO bit in CR */
- stw r11,_CCR(r1)
-syscall_exit_cont:
- lwz r8,_MSR(r1)
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* If we are going to return from the syscall with interrupts
- * off, we trace that here. It shouldn't normally happen.
- */
- andi. r10,r8,MSR_EE
- bne+ 1f
- stw r3,GPR3(r1)
- bl trace_hardirqs_off
- lwz r3,GPR3(r1)
-1:
-#endif /* CONFIG_TRACE_IRQFLAGS */
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- /* If the process has its own DBCR0 value, load it up. The internal
- debug mode bit tells us that dbcr0 should be loaded. */
- lwz r0,THREAD+THREAD_DBCR0(r2)
- andis. r10,r0,DBCR0_IDM@h
- bnel- load_dbcr0
-#endif
-#ifdef CONFIG_44x
-BEGIN_MMU_FTR_SECTION
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r5,0
+ bl syscall_exit_prepare
+#ifdef CONFIG_PPC_47x
lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0
bne- 2f
-1:
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
-#endif /* CONFIG_44x */
-BEGIN_FTR_SECTION
- lwarx r7,0,r1
-END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
- stwcx. r0,0,r1 /* to clear the reservation */
- ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
-#ifdef CONFIG_PPC_BOOK3S_32
- kuep_unlock r5, r7
-#endif
- kuap_check r2, r4
+#endif /* CONFIG_PPC_47x */
+ kuep_unlock
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
- mtcr r5
lwz r7,_NIP(r1)
- lwz r2,GPR2(r1)
- lwz r1,GPR1(r1)
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
- mtspr SPRN_NRI, r0
-#endif
+ lwz r8,_MSR(r1)
+ cmpwi r3,0
+ REST_GPR(3, r1)
+syscall_exit_finish:
mtspr SPRN_SRR0,r7
mtspr SPRN_SRR1,r8
- SYNC
- RFI
+
+ bne 3f
+ mtcr r5
+
+1: REST_GPR(2, r1)
+ REST_GPR(1, r1)
+ rfi
+#ifdef CONFIG_40x
+ b . /* Prevent prefetch past rfi */
+#endif
+
+3: mtcr r5
+ lwz r4,_CTR(r1)
+ lwz r5,_XER(r1)
+ REST_NVGPRS(r1)
+ mtctr r4
+ mtxer r5
+ REST_GPR(0, r1)
+ REST_GPRS(3, 12, r1)
+ b 1b
+
#ifdef CONFIG_44x
2: li r7,0
iccci r0,r0
@@ -474,9 +176,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
b 1b
#endif /* CONFIG_44x */
-66: li r3,-ENOSYS
- b ret_from_syscall
-
.globl ret_from_fork
ret_from_fork:
REST_NVGPRS(r1)
@@ -488,175 +187,13 @@ ret_from_fork:
ret_from_kernel_thread:
REST_NVGPRS(r1)
bl schedule_tail
- mtlr r14
+ mtctr r14
mr r3,r15
PPC440EP_ERR42
- blrl
+ bctrl
li r3,0
b ret_from_syscall
-/* Traced system call support */
-syscall_dotrace:
- SAVE_NVGPRS(r1)
- li r0,0xc00
- stw r0,_TRAP(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_syscall_trace_enter
- /*
- * Restore argument registers possibly just changed.
- * We use the return value of do_syscall_trace_enter
- * for call number to look up in the table (r0).
- */
- mr r0,r3
- lwz r3,GPR3(r1)
- lwz r4,GPR4(r1)
- lwz r5,GPR5(r1)
- lwz r6,GPR6(r1)
- lwz r7,GPR7(r1)
- lwz r8,GPR8(r1)
- REST_NVGPRS(r1)
-
- cmplwi r0,NR_syscalls
- /* Return code is already in r3 thanks to do_syscall_trace_enter() */
- bge- ret_from_syscall
- b syscall_dotrace_cont
-
-syscall_exit_work:
- andi. r0,r9,_TIF_RESTOREALL
- beq+ 0f
- REST_NVGPRS(r1)
- b 2f
-0: cmplw 0,r3,r8
- blt+ 1f
- andi. r0,r9,_TIF_NOERROR
- bne- 1f
- lwz r11,_CCR(r1) /* Load CR */
- neg r3,r3
- oris r11,r11,0x1000 /* Set SO bit in CR */
- stw r11,_CCR(r1)
-
-1: stw r6,RESULT(r1) /* Save result */
- stw r3,GPR3(r1) /* Update return value */
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
- beq 4f
-
- /* Clear per-syscall TIF flags if any are set. */
-
- li r11,_TIF_PERSYSCALL_MASK
- addi r12,r2,TI_FLAGS
-3: lwarx r8,0,r12
- andc r8,r8,r11
-#ifdef CONFIG_IBM405_ERR77
- dcbt 0,r12
-#endif
- stwcx. r8,0,r12
- bne- 3b
-
-4: /* Anything which requires enabling interrupts? */
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
- beq ret_from_except
-
- /* Re-enable interrupts. There is no need to trace that with
- * lockdep as we are supposed to have IRQs on at this point
- */
- ori r10,r10,MSR_EE
- SYNC
- mtmsr r10
-
- /* Save NVGPRS if they're not saved already */
- lwz r4,_TRAP(r1)
- andi. r4,r4,1
- beq 5f
- SAVE_NVGPRS(r1)
- li r4,0xc00
- stw r4,_TRAP(r1)
-5:
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_syscall_trace_leave
- b ret_from_except_full
-
-/*
- * The fork/clone functions need to copy the full register set into
- * the child process. Therefore we need to save all the nonvolatile
- * registers (r13 - r31) before calling the C code.
- */
- .globl ppc_fork
-ppc_fork:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_fork
-
- .globl ppc_vfork
-ppc_vfork:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_vfork
-
- .globl ppc_clone
-ppc_clone:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_clone
-
- .globl ppc_clone3
-ppc_clone3:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_clone3
-
- .globl ppc_swapcontext
-ppc_swapcontext:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
- stw r0,_TRAP(r1) /* register set saved */
- b sys_swapcontext
-
-/*
- * Top-level page fault handling.
- * This is in assembler because if do_page_fault tells us that
- * it is a bad kernel page fault, we want to save the non-volatile
- * registers before calling bad_page_fault.
- */
- .globl handle_page_fault
-handle_page_fault:
- addi r3,r1,STACK_FRAME_OVERHEAD
-#ifdef CONFIG_PPC_BOOK3S_32
- andis. r0,r5,DSISR_DABRMATCH@h
- bne- handle_dabr_fault
-#endif
- bl do_page_fault
- cmpwi r3,0
- beq+ ret_from_except
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- clrrwi r0,r0,1
- stw r0,_TRAP(r1)
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- lwz r4,_DAR(r1)
- bl bad_page_fault
- b ret_from_except_full
-
-#ifdef CONFIG_PPC_BOOK3S_32
- /* We have a data breakpoint exception - handle it */
-handle_dabr_fault:
- SAVE_NVGPRS(r1)
- lwz r0,_TRAP(r1)
- clrrwi r0,r0,1
- stw r0,_TRAP(r1)
- bl do_break
- b ret_from_except_full
-#endif
-
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -684,33 +221,10 @@ _GLOBAL(_switch)
/* r3-r12 are caller saved -- Cort */
SAVE_NVGPRS(r1)
stw r0,_NIP(r1) /* Return to switch caller */
- mfmsr r11
- li r0,MSR_FP /* Disable floating-point */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- oris r0,r0,MSR_VEC@h /* Disable altivec */
- mfspr r12,SPRN_VRSAVE /* save vrsave register value */
- stw r12,THREAD+THREAD_VRSAVE(r2)
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-BEGIN_FTR_SECTION
- oris r0,r0,MSR_SPE@h /* Disable SPE */
- mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
- stw r12,THREAD+THREAD_SPEFSCR(r2)
-END_FTR_SECTION_IFSET(CPU_FTR_SPE)
-#endif /* CONFIG_SPE */
- and. r0,r0,r11 /* FP or altivec or SPE enabled? */
- beq+ 1f
- andc r11,r11,r0
- mtmsr r11
- isync
-1: stw r11,_MSR(r1)
mfcr r10
stw r10,_CCR(r1)
stw r1,KSP(r3) /* Set old stack pointer */
- kuap_check r2, r4
#ifdef CONFIG_SMP
/* We need a sync somewhere here to make sure that if the
* previous task gets rescheduled on another CPU, it sees all
@@ -727,19 +241,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
mr r3,r2
addi r2,r4,-THREAD /* Update current */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- lwz r0,THREAD+THREAD_VRSAVE(r2)
- mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-BEGIN_FTR_SECTION
- lwz r0,THREAD+THREAD_SPEFSCR(r2)
- mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
-END_FTR_SECTION_IFSET(CPU_FTR_SPE)
-#endif /* CONFIG_SPE */
-
lwz r0,_CCR(r1)
mtcrf 0xFF,r0
/* r3-r12 are destroyed -- Cort */
@@ -754,16 +255,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPE)
fast_exception_return:
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
andi. r10,r9,MSR_RI /* check for recoverable interrupt */
- beq 1f /* if not, we've got problems */
+ beq 3f /* if not, we've got problems */
#endif
-2: REST_4GPRS(3, r11)
- lwz r10,_CCR(r11)
- REST_GPR(1, r11)
+2: lwz r10,_CCR(r11)
+ REST_GPRS(1, 6, r11)
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
- /* Clear the exception_marker on the stack to avoid confusing stacktrace */
+ /* Clear the exception marker on the stack to avoid confusing stacktrace */
li r10, 0
stw r10, 8(r11)
REST_GPR(10, r11)
@@ -774,279 +274,152 @@ fast_exception_return:
mtspr SPRN_SRR0,r12
REST_GPR(9, r11)
REST_GPR(12, r11)
- lwz r11,GPR11(r11)
- SYNC
- RFI
-
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
-/* check if the exception happened in a restartable section */
-1: lis r3,exc_exit_restart_end@ha
- addi r3,r3,exc_exit_restart_end@l
- cmplw r12,r3
-#ifdef CONFIG_PPC_BOOK3S_601
- bge 2b
-#else
- bge 3f
-#endif
- lis r4,exc_exit_restart@ha
- addi r4,r4,exc_exit_restart@l
- cmplw r12,r4
-#ifdef CONFIG_PPC_BOOK3S_601
- blt 2b
-#else
- blt 3f
+ REST_GPR(11, r11)
+ rfi
+#ifdef CONFIG_40x
+ b . /* Prevent prefetch past rfi */
#endif
- lis r3,fee_restarts@ha
- tophys(r3,r3)
- lwz r5,fee_restarts@l(r3)
- addi r5,r5,1
- stw r5,fee_restarts@l(r3)
- mr r12,r4 /* restart at exc_exit_restart */
- b 2b
-
- .section .bss
- .align 2
-fee_restarts:
- .space 4
- .previous
+_ASM_NOKPROBE_SYMBOL(fast_exception_return)
/* aargh, a nonrecoverable interrupt, panic */
/* aargh, we don't know which trap this is */
-/* but the 601 doesn't implement the RI bit, so assume it's OK */
3:
li r10,-1
stw r10,_TRAP(r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- lis r10,MSR_KERNEL@h
- ori r10,r10,MSR_KERNEL@l
- bl transfer_to_handler_full
- .long unrecoverable_exception
- .long ret_from_except
-#endif
-
- .globl ret_from_except_full
-ret_from_except_full:
- REST_NVGPRS(r1)
- /* fall through */
-
- .globl ret_from_except
-ret_from_except:
- /* Hard-disable interrupts so that current_thread_info()->flags
- * can't change between when we test it and when we return
- * from the interrupt. */
- /* Note: We don't bother telling lockdep about it */
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
- SYNC /* Some chip revs have problems here... */
- mtmsr r10 /* disable interrupts */
-
- lwz r3,_MSR(r1) /* Returning to user mode? */
- andi. r0,r3,MSR_PR
- beq resume_kernel
-
-user_exc_return: /* r10 contains MSR_KERNEL here */
- /* Check current_thread_info()->flags */
- lwz r9,TI_FLAGS(r2)
- andi. r0,r9,_TIF_USER_WORK_MASK
- bne do_work
+ prepare_transfer_to_handler
+ bl unrecoverable_exception
+ trap /* should not get here */
-restore_user:
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- /* Check whether this process has its own DBCR0 value. The internal
- debug mode bit tells us that dbcr0 should be loaded. */
- lwz r0,THREAD+THREAD_DBCR0(r2)
- andis. r10,r0,DBCR0_IDM@h
- bnel- load_dbcr0
-#endif
- ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
-#ifdef CONFIG_PPC_BOOK3S_32
- kuep_unlock r10, r11
-#endif
+ .globl interrupt_return
+interrupt_return:
+ lwz r4,_MSR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ andi. r0,r4,MSR_PR
+ beq .Lkernel_interrupt_return
+ bl interrupt_exit_user_prepare
+ cmpwi r3,0
+ kuep_unlock
+ bne- .Lrestore_nvgprs
- b restore
+.Lfast_user_interrupt_return:
+ lwz r11,_NIP(r1)
+ lwz r12,_MSR(r1)
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
-/* N.B. the only way to get here is from the beq following ret_from_except. */
-resume_kernel:
- /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
- lwz r8,TI_FLAGS(r2)
- andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
- beq+ 1f
+BEGIN_FTR_SECTION
+ stwcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ lwarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
- addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
+ lwz r3,_CCR(r1)
+ lwz r4,_LINK(r1)
+ lwz r5,_CTR(r1)
+ lwz r6,_XER(r1)
+ li r0,0
- lwz r3,GPR1(r1)
- subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
- mr r4,r1 /* src: current exception frame */
- mr r1,r3 /* Reroute the trampoline frame to r1 */
+ /*
+ * Leaving a stale exception marker on the stack can confuse
+ * the reliable stack unwinder later on. Clear it.
+ */
+ stw r0,8(r1)
+ REST_GPRS(7, 12, r1)
- /* Copy from the original to the trampoline. */
- li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
- li r6,0 /* start offset: 0 */
+ mtcr r3
+ mtlr r4
mtctr r5
-2: lwzx r0,r6,r4
- stwx r0,r6,r3
- addi r6,r6,4
- bdnz 2b
-
- /* Do real store operation to complete stwu */
- lwz r5,GPR1(r1)
- stw r8,0(r5)
-
- /* Clear _TIF_EMULATE_STACK_STORE flag */
- lis r11,_TIF_EMULATE_STACK_STORE@h
- addi r5,r2,TI_FLAGS
-0: lwarx r8,0,r5
- andc r8,r8,r11
-#ifdef CONFIG_IBM405_ERR77
- dcbt 0,r5
-#endif
- stwcx. r8,0,r5
- bne- 0b
-1:
+ mtspr SPRN_XER,r6
-#ifdef CONFIG_PREEMPTION
- /* check current_thread_info->preempt_count */
- lwz r0,TI_PREEMPT(r2)
- cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
- bne restore_kuap
- andi. r8,r8,_TIF_NEED_RESCHED
- beq+ restore_kuap
- lwz r3,_MSR(r1)
- andi. r0,r3,MSR_EE /* interrupts off? */
- beq restore_kuap /* don't schedule if so */
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Lockdep thinks irqs are enabled, we need to call
- * preempt_schedule_irq with IRQs off, so we inform lockdep
- * now that we -did- turn them off already
- */
- bl trace_hardirqs_off
-#endif
- bl preempt_schedule_irq
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* And now, to properly rebalance the above, we tell lockdep they
- * are being turned back on, which will happen when we return
- */
- bl trace_hardirqs_on
+ REST_GPRS(2, 6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ rfi
+#ifdef CONFIG_40x
+ b . /* Prevent prefetch past rfi */
#endif
-#endif /* CONFIG_PREEMPTION */
-restore_kuap:
- kuap_restore r1, r2, r9, r10, r0
-
- /* interrupts are hard-disabled at this point */
-restore:
-#ifdef CONFIG_44x
-BEGIN_MMU_FTR_SECTION
- b 1f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
- lis r4,icache_44x_need_flush@ha
- lwz r5,icache_44x_need_flush@l(r4)
- cmplwi cr0,r5,0
- beq+ 1f
- li r6,0
- iccci r0,r0
- stw r6,icache_44x_need_flush@l(r4)
-1:
-#endif /* CONFIG_44x */
- lwz r9,_MSR(r1)
-#ifdef CONFIG_TRACE_IRQFLAGS
- /* Lockdep doesn't know about the fact that IRQs are temporarily turned
- * off in this assembly code while peeking at TI_FLAGS() and such. However
- * we need to inform it if the exception turned interrupts off, and we
- * are about to trun them back on.
- */
- andi. r10,r9,MSR_EE
- beq 1f
- stwu r1,-32(r1)
- mflr r0
- stw r0,4(r1)
- bl trace_hardirqs_on
- addi r1, r1, 32
- lwz r9,_MSR(r1)
-1:
-#endif /* CONFIG_TRACE_IRQFLAGS */
+.Lrestore_nvgprs:
+ REST_NVGPRS(r1)
+ b .Lfast_user_interrupt_return
- lwz r0,GPR0(r1)
- lwz r2,GPR2(r1)
- REST_4GPRS(3, r1)
- REST_2GPRS(7, r1)
+.Lkernel_interrupt_return:
+ bl interrupt_exit_kernel_prepare
- lwz r10,_XER(r1)
- lwz r11,_CTR(r1)
- mtspr SPRN_XER,r10
- mtctr r11
+.Lfast_kernel_interrupt_return:
+ cmpwi cr1,r3,0
+ lwz r11,_NIP(r1)
+ lwz r12,_MSR(r1)
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
- PPC405_ERR77(0,r1)
BEGIN_FTR_SECTION
- lwarx r11,0,r1
-END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
- stwcx. r0,0,r1 /* to clear the reservation */
+ stwcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ lwarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ lwz r3,_LINK(r1)
+ lwz r4,_CTR(r1)
+ lwz r5,_XER(r1)
+ lwz r6,_CCR(r1)
+ li r0,0
-#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
- andi. r10,r9,MSR_RI /* check if this exception occurred */
- beql nonrecoverable /* at a bad place (MSR:RI = 0) */
+ REST_GPRS(7, 12, r1)
- lwz r10,_CCR(r1)
- lwz r11,_LINK(r1)
- mtcrf 0xFF,r10
- mtlr r11
+ mtlr r3
+ mtctr r4
+ mtspr SPRN_XER,r5
- /* Clear the exception_marker on the stack to avoid confusing stacktrace */
- li r10, 0
- stw r10, 8(r1)
/*
- * Once we put values in SRR0 and SRR1, we are in a state
- * where exceptions are not recoverable, since taking an
- * exception will trash SRR0 and SRR1. Therefore we clear the
- * MSR:RI bit to indicate this. If we do take an exception,
- * we can't return to the point of the exception but we
- * can restart the exception exit path at the label
- * exc_exit_restart below. -- paulus
+ * Leaving a stale exception marker on the stack can confuse
+ * the reliable stack unwinder later on. Clear it.
*/
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL & ~MSR_RI)
- SYNC
- mtmsr r10 /* clear the RI bit */
- .globl exc_exit_restart
-exc_exit_restart:
- lwz r12,_NIP(r1)
- mtspr SPRN_SRR0,r12
- mtspr SPRN_SRR1,r9
- REST_4GPRS(9, r1)
- lwz r1,GPR1(r1)
- .globl exc_exit_restart_end
-exc_exit_restart_end:
- SYNC
- RFI
+ stw r0,8(r1)
-#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
- /*
- * This is a bit different on 4xx/Book-E because it doesn't have
- * the RI bit in the MSR.
- * The TLB miss handler checks if we have interrupted
- * the exception exit path and restarts it if so
- * (well maybe one day it will... :).
+ REST_GPRS(2, 5, r1)
+
+ bne- cr1,1f /* emulate stack store */
+ mtcr r6
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ rfi
+#ifdef CONFIG_40x
+ b . /* Prevent prefetch past rfi */
+#endif
+
+1: /*
+ * Emulate stack store with update. New r1 value was already calculated
+ * and updated in our interrupt regs by emulate_loadstore, but we can't
+ * store the previous value of r1 to the stack before re-loading our
+ * registers from it, otherwise they could be clobbered. Use
+ * SPRG Scratch0 as temporary storage to hold the store
+ * data, as interrupts are disabled here so it won't be clobbered.
*/
- lwz r11,_LINK(r1)
- mtlr r11
- lwz r10,_CCR(r1)
- mtcrf 0xff,r10
- /* Clear the exception_marker on the stack to avoid confusing stacktrace */
- li r10, 0
- stw r10, 8(r1)
- REST_2GPRS(9, r1)
- .globl exc_exit_restart
-exc_exit_restart:
- lwz r11,_NIP(r1)
- lwz r12,_MSR(r1)
-exc_exit_start:
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
- REST_2GPRS(11, r1)
- lwz r1,GPR1(r1)
- .globl exc_exit_restart_end
-exc_exit_restart_end:
- PPC405_ERR77_SYNC
+ mtcr r6
+#ifdef CONFIG_BOOKE
+ mtspr SPRN_SPRG_WSCRATCH0, r9
+#else
+ mtspr SPRN_SPRG_SCRATCH0, r9
+#endif
+ addi r9,r1,INT_FRAME_SIZE /* get original r1 */
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ stw r9,0(r1) /* perform store component of stwu */
+#ifdef CONFIG_BOOKE
+ mfspr r9, SPRN_SPRG_RSCRATCH0
+#else
+ mfspr r9, SPRN_SPRG_SCRATCH0
+#endif
rfi
- b . /* prevent prefetch past rfi */
+#ifdef CONFIG_40x
+ b . /* Prevent prefetch past rfi */
+#endif
+_ASM_NOKPROBE_SYMBOL(interrupt_return)
+
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/*
* Returning from a critical interrupt in user mode doesn't need
@@ -1077,17 +450,13 @@ exc_exit_restart_end:
REST_NVGPRS(r1); \
lwz r3,_MSR(r1); \
andi. r3,r3,MSR_PR; \
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL); \
- bne user_exc_return; \
- lwz r0,GPR0(r1); \
- lwz r2,GPR2(r1); \
- REST_4GPRS(3, r1); \
- REST_2GPRS(7, r1); \
+ bne interrupt_return; \
+ REST_GPR(0, r1); \
+ REST_GPRS(2, 8, r1); \
lwz r10,_XER(r1); \
lwz r11,_CTR(r1); \
mtspr SPRN_XER,r10; \
mtctr r11; \
- PPC405_ERR77(0,r1); \
stwcx. r0,0,r1; /* to clear the reservation */ \
lwz r11,_LINK(r1); \
mtlr r11; \
@@ -1102,12 +471,8 @@ exc_exit_restart_end:
lwz r12,_MSR(r1); \
mtspr exc_lvl_srr0,r11; \
mtspr exc_lvl_srr1,r12; \
- lwz r9,GPR9(r1); \
- lwz r12,GPR12(r1); \
- lwz r10,GPR10(r1); \
- lwz r11,GPR11(r1); \
- lwz r1,GPR1(r1); \
- PPC405_ERR77_SYNC; \
+ REST_GPRS(9, 12, r1); \
+ REST_GPR(1, r1); \
exc_lvl_rfi; \
b .; /* prevent prefetch past exc_lvl_rfi */
@@ -1117,7 +482,7 @@ exc_exit_restart_end:
mtspr SPRN_##exc_lvl_srr0,r9; \
mtspr SPRN_##exc_lvl_srr1,r10;
-#if defined(CONFIG_PPC_BOOK3E_MMU)
+#if defined(CONFIG_PPC_E500)
#ifdef CONFIG_PHYS_64BIT
#define RESTORE_MAS7 \
lwz r11,MAS7(r1); \
@@ -1148,11 +513,6 @@ exc_exit_restart_end:
#ifdef CONFIG_40x
.globl ret_from_crit_exc
ret_from_crit_exc:
- mfspr r9,SPRN_SPRG_THREAD
- lis r10,saved_ksp_limit@ha;
- lwz r10,saved_ksp_limit@l(r10);
- tovirt(r9,r9);
- stw r10,KSP_LIMIT(r9)
lis r9,crit_srr0@ha;
lwz r9,crit_srr0@l(r9);
lis r10,crit_srr1@ha;
@@ -1160,219 +520,32 @@ ret_from_crit_exc:
mtspr SPRN_SRR0,r9;
mtspr SPRN_SRR1,r10;
RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
+_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
#endif /* CONFIG_40x */
#ifdef CONFIG_BOOKE
.globl ret_from_crit_exc
ret_from_crit_exc:
- mfspr r9,SPRN_SPRG_THREAD
- lwz r10,SAVED_KSP_LIMIT(r1)
- stw r10,KSP_LIMIT(r9)
RESTORE_xSRR(SRR0,SRR1);
RESTORE_MMU_REGS;
RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
+_ASM_NOKPROBE_SYMBOL(ret_from_crit_exc)
.globl ret_from_debug_exc
ret_from_debug_exc:
- mfspr r9,SPRN_SPRG_THREAD
- lwz r10,SAVED_KSP_LIMIT(r1)
- stw r10,KSP_LIMIT(r9)
RESTORE_xSRR(SRR0,SRR1);
RESTORE_xSRR(CSRR0,CSRR1);
RESTORE_MMU_REGS;
RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
+_ASM_NOKPROBE_SYMBOL(ret_from_debug_exc)
.globl ret_from_mcheck_exc
ret_from_mcheck_exc:
- mfspr r9,SPRN_SPRG_THREAD
- lwz r10,SAVED_KSP_LIMIT(r1)
- stw r10,KSP_LIMIT(r9)
RESTORE_xSRR(SRR0,SRR1);
RESTORE_xSRR(CSRR0,CSRR1);
RESTORE_xSRR(DSRR0,DSRR1);
RESTORE_MMU_REGS;
RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
+_ASM_NOKPROBE_SYMBOL(ret_from_mcheck_exc)
#endif /* CONFIG_BOOKE */
-
-/*
- * Load the DBCR0 value for a task that is being ptraced,
- * having first saved away the global DBCR0. Note that r0
- * has the dbcr0 value to set upon entry to this.
- */
-load_dbcr0:
- mfmsr r10 /* first disable debug exceptions */
- rlwinm r10,r10,0,~MSR_DE
- mtmsr r10
- isync
- mfspr r10,SPRN_DBCR0
- lis r11,global_dbcr0@ha
- addi r11,r11,global_dbcr0@l
-#ifdef CONFIG_SMP
- lwz r9,TASK_CPU(r2)
- slwi r9,r9,3
- add r11,r11,r9
-#endif
- stw r10,0(r11)
- mtspr SPRN_DBCR0,r0
- lwz r10,4(r11)
- addi r10,r10,1
- stw r10,4(r11)
- li r11,-1
- mtspr SPRN_DBSR,r11 /* clear all pending debug events */
- blr
-
- .section .bss
- .align 4
- .global global_dbcr0
-global_dbcr0:
- .space 8*NR_CPUS
- .previous
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
-
-do_work: /* r10 contains MSR_KERNEL here */
- andi. r0,r9,_TIF_NEED_RESCHED
- beq do_user_signal
-
-do_resched: /* r10 contains MSR_KERNEL here */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
- mfmsr r10
-#endif
- ori r10,r10,MSR_EE
- SYNC
- mtmsr r10 /* hard-enable interrupts */
- bl schedule
-recheck:
- /* Note: And we don't tell it we are disabling them again
- * neither. Those disable/enable cycles used to peek at
- * TI_FLAGS aren't advertised.
- */
- LOAD_REG_IMMEDIATE(r10,MSR_KERNEL)
- SYNC
- mtmsr r10 /* disable interrupts */
- lwz r9,TI_FLAGS(r2)
- andi. r0,r9,_TIF_NEED_RESCHED
- bne- do_resched
- andi. r0,r9,_TIF_USER_WORK_MASK
- beq restore_user
-do_user_signal: /* r10 contains MSR_KERNEL here */
- ori r10,r10,MSR_EE
- SYNC
- mtmsr r10 /* hard-enable interrupts */
- /* save r13-r31 in the exception frame, if not already done */
- lwz r3,_TRAP(r1)
- andi. r0,r3,1
- beq 2f
- SAVE_NVGPRS(r1)
- rlwinm r3,r3,0,0,30
- stw r3,_TRAP(r1)
-2: addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r9
- bl do_notify_resume
- REST_NVGPRS(r1)
- b recheck
-
-/*
- * We come here when we are at the end of handling an exception
- * that occurred at a place where taking an exception will lose
- * state information, such as the contents of SRR0 and SRR1.
- */
-nonrecoverable:
- lis r10,exc_exit_restart_end@ha
- addi r10,r10,exc_exit_restart_end@l
- cmplw r12,r10
-#ifdef CONFIG_PPC_BOOK3S_601
- bgelr
-#else
- bge 3f
-#endif
- lis r11,exc_exit_restart@ha
- addi r11,r11,exc_exit_restart@l
- cmplw r12,r11
-#ifdef CONFIG_PPC_BOOK3S_601
- bltlr
-#else
- blt 3f
-#endif
- lis r10,ee_restarts@ha
- lwz r12,ee_restarts@l(r10)
- addi r12,r12,1
- stw r12,ee_restarts@l(r10)
- mr r12,r11 /* restart at exc_exit_restart */
- blr
-3: /* OK, we can't recover, kill this process */
- /* but the 601 doesn't implement the RI bit, so assume it's OK */
- lwz r3,_TRAP(r1)
- andi. r0,r3,1
- beq 5f
- SAVE_NVGPRS(r1)
- rlwinm r3,r3,0,0,30
- stw r3,_TRAP(r1)
-5: mfspr r2,SPRN_SPRG_THREAD
- addi r2,r2,-THREAD
- tovirt(r2,r2) /* set back r2 to current */
-4: addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
- /* shouldn't return */
- b 4b
-
- .section .bss
- .align 2
-ee_restarts:
- .space 4
- .previous
-
-/*
- * PROM code for specific machines follows. Put it
- * here so it's easy to add arch-specific sections later.
- * -- Cort
- */
-#ifdef CONFIG_PPC_RTAS
-/*
- * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
- * called with the MMU off.
- */
-_GLOBAL(enter_rtas)
- stwu r1,-INT_FRAME_SIZE(r1)
- mflr r0
- stw r0,INT_FRAME_SIZE+4(r1)
- LOAD_REG_ADDR(r4, rtas)
- lis r6,1f@ha /* physical return address for rtas */
- addi r6,r6,1f@l
- tophys(r6,r6)
- tophys_novmstack r7, r1
- lwz r8,RTASENTRY(r4)
- lwz r4,RTASBASE(r4)
- mfmsr r9
- stw r9,8(r1)
- LOAD_REG_IMMEDIATE(r0,MSR_KERNEL)
- SYNC /* disable interrupts so SRR0/1 */
- mtmsr r0 /* don't get trashed */
- li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
- mtlr r6
- stw r7, THREAD + RTAS_SP(r2)
- mtspr SPRN_SRR0,r8
- mtspr SPRN_SRR1,r9
- RFI
-1: tophys_novmstack r9, r1
-#ifdef CONFIG_VMAP_STACK
- li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
- mtmsr r0
- isync
-#endif
- lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
- lwz r9,8(r9) /* original msr value */
- addi r1,r1,INT_FRAME_SIZE
- li r0,0
- tophys_novmstack r7, r2
- stw r0, THREAD + RTAS_SP(r7)
- mtspr SPRN_SRR0,r8
- mtspr SPRN_SRR1,r9
- RFI /* return to caller */
-
- .globl machine_check_in_rtas
-machine_check_in_rtas:
- twi 31,0,0
- /* XXX load up BATs and panic */
-
-#endif /* CONFIG_PPC_RTAS */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 6ba675b0cf7d..3e2e37e6ecab 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <asm/cache.h>
#include <asm/unistd.h>
#include <asm/processor.h>
#include <asm/page.h>
@@ -31,7 +32,6 @@
#include <asm/irqflags.h>
#include <asm/hw_irq.h>
#include <asm/context_tracking.h>
-#include <asm/tm.h>
#include <asm/ppc-opcode.h>
#include <asm/barrier.h>
#include <asm/export.h>
@@ -47,483 +47,17 @@
/*
* System calls.
*/
- .section ".toc","aw"
-SYS_CALL_TABLE:
- .tc sys_call_table[TC],sys_call_table
-
-COMPAT_SYS_CALL_TABLE:
- .tc compat_sys_call_table[TC],compat_sys_call_table
-
-/* This value is used to mark exception frames on the stack. */
-exception_marker:
- .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
-
.section ".text"
- .align 7
-
- .globl system_call_common
-system_call_common:
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- bne .Ltabort_syscall
-END_FTR_SECTION_IFSET(CPU_FTR_TM)
-#endif
- mr r10,r1
- ld r1,PACAKSAVE(r13)
- std r10,0(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- std r0,GPR0(r1)
- std r10,GPR1(r1)
-#ifdef CONFIG_PPC_FSL_BOOK3E
-START_BTB_FLUSH_SECTION
- BTB_FLUSH(r10)
-END_BTB_FLUSH_SECTION
-#endif
- ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
- std r2,GPR2(r1)
- std r3,GPR3(r1)
- mfcr r2
- std r4,GPR4(r1)
- std r5,GPR5(r1)
- std r6,GPR6(r1)
- std r7,GPR7(r1)
- std r8,GPR8(r1)
- li r11,0
- std r11,GPR9(r1)
- std r11,GPR10(r1)
- std r11,GPR11(r1)
- std r11,GPR12(r1)
- std r11,_XER(r1)
- std r11,_CTR(r1)
- std r9,GPR13(r1)
- mflr r10
- /*
- * This clears CR0.SO (bit 28), which is the error indication on
- * return from this system call.
- */
- rldimi r2,r11,28,(63-28)
- li r11,0xc01
- std r10,_LINK(r1)
- std r11,_TRAP(r1)
- std r3,ORIG_GPR3(r1)
- std r2,_CCR(r1)
- ld r2,PACATOC(r13)
- addi r9,r1,STACK_FRAME_OVERHEAD
- ld r11,exception_marker@toc(r2)
- std r11,-16(r9) /* "regshere" marker */
-
- kuap_check_amr r10, r11
-
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
-BEGIN_FW_FTR_SECTION
- /* see if there are any DTL entries to process */
- ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
- ld r11,PACA_DTL_RIDX(r13) /* get log read index */
- addi r10,r10,LPPACA_DTLIDX
- LDX_BE r10,0,r10 /* get log write index */
- cmpd r11,r10
- beq+ 33f
- bl accumulate_stolen_time
- REST_GPR(0,r1)
- REST_4GPRS(3,r1)
- REST_2GPRS(7,r1)
- addi r9,r1,STACK_FRAME_OVERHEAD
-33:
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
-
- /*
- * A syscall should always be called with interrupts enabled
- * so we just unconditionally hard-enable here. When some kind
- * of irq tracing is used, we additionally check that condition
- * is correct
- */
-#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
- lbz r10,PACAIRQSOFTMASK(r13)
-1: tdnei r10,IRQS_ENABLED
- EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
-#endif
-
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 1
-#else
- li r11,MSR_RI
- ori r11,r11,MSR_EE
- mtmsrd r11,1
-#endif /* CONFIG_PPC_BOOK3E */
-
-system_call: /* label this so stack traces look sane */
- /* We do need to set SOFTE in the stack frame or the return
- * from interrupt will be painful
- */
- li r10,IRQS_ENABLED
- std r10,SOFTE(r1)
-
- ld r11, PACA_THREAD_INFO(r13)
- ld r10,TI_FLAGS(r11)
- andi. r11,r10,_TIF_SYSCALL_DOTRACE
- bne .Lsyscall_dotrace /* does not return */
- cmpldi 0,r0,NR_syscalls
- bge- .Lsyscall_enosys
-
-.Lsyscall:
-/*
- * Need to vector to 32 Bit or default sys_call_table here,
- * based on caller's run-mode / personality.
- */
- ld r11,SYS_CALL_TABLE@toc(2)
- andis. r10,r10,_TIF_32BIT@h
- beq 15f
- ld r11,COMPAT_SYS_CALL_TABLE@toc(2)
- clrldi r3,r3,32
- clrldi r4,r4,32
- clrldi r5,r5,32
- clrldi r6,r6,32
- clrldi r7,r7,32
- clrldi r8,r8,32
-15:
- slwi r0,r0,3
-
- barrier_nospec_asm
- /*
- * Prevent the load of the handler below (based on the user-passed
- * system call number) being speculatively executed until the test
- * against NR_syscalls and branch to .Lsyscall_enosys above has
- * committed.
- */
-
- ldx r12,r11,r0 /* Fetch system call handler [ptr] */
- mtctr r12
- bctrl /* Call handler */
-
- /* syscall_exit can exit to kernel mode, via ret_from_kernel_thread */
-.Lsyscall_exit:
- std r3,RESULT(r1)
-
-#ifdef CONFIG_DEBUG_RSEQ
- /* Check whether the syscall is issued inside a restartable sequence */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl rseq_syscall
- ld r3,RESULT(r1)
-#endif
-
- ld r12, PACA_THREAD_INFO(r13)
-
- ld r8,_MSR(r1)
-
-/*
- * This is a few instructions into the actual syscall exit path (which actually
- * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the
- * number of visible symbols for profiling purposes.
- *
- * We can probe from system_call until this point as MSR_RI is set. But once it
- * is cleared below, we won't be able to take a trap.
- *
- * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL().
- */
-system_call_exit:
- /*
- * Disable interrupts so current_thread_info()->flags can't change,
- * and so that we don't get interrupted after loading SRR0/1.
- *
- * Leave MSR_RI enabled for now, because with THREAD_INFO_IN_TASK we
- * could fault on the load of the TI_FLAGS below.
- */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 0
-#else
- li r11,MSR_RI
- mtmsrd r11,1
-#endif /* CONFIG_PPC_BOOK3E */
-
- ld r9,TI_FLAGS(r12)
- li r11,-MAX_ERRNO
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
- bne- .Lsyscall_exit_work
-
- andi. r0,r8,MSR_FP
- beq 2f
-#ifdef CONFIG_ALTIVEC
- andis. r0,r8,MSR_VEC@h
- bne 3f
-#endif
-2: addi r3,r1,STACK_FRAME_OVERHEAD
- bl restore_math
- ld r8,_MSR(r1)
- ld r3,RESULT(r1)
- li r11,-MAX_ERRNO
-
-3: cmpld r3,r11
- ld r5,_CCR(r1)
- bge- .Lsyscall_error
-.Lsyscall_error_cont:
- ld r7,_NIP(r1)
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
- andi. r6,r8,MSR_PR
- ld r4,_LINK(r1)
-
- kuap_check_amr r10, r11
-
-#ifdef CONFIG_PPC_BOOK3S
- /*
- * Clear MSR_RI, MSR_EE is already and remains disabled. We could do
- * this later, but testing shows that doing it here causes less slow
- * down than doing it closer to the rfid.
- */
- li r11,0
- mtmsrd r11,1
-#endif
-
- beq- 1f
- ACCOUNT_CPU_USER_EXIT(r13, r11, r12)
-
-BEGIN_FTR_SECTION
- HMT_MEDIUM_LOW
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- std r8, PACATMSCRATCH(r13)
-#endif
-
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
- ld r2,GPR2(r1)
- ld r1,GPR1(r1)
- mtlr r4
- mtcr r5
- mtspr SPRN_SRR0,r7
- mtspr SPRN_SRR1,r8
- RFI_TO_USER
- b . /* prevent speculative execution */
-
-1: /* exit to kernel */
- kuap_restore_amr r2
-
- ld r2,GPR2(r1)
- ld r1,GPR1(r1)
- mtlr r4
- mtcr r5
- mtspr SPRN_SRR0,r7
- mtspr SPRN_SRR1,r8
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
-.Lsyscall_error:
- oris r5,r5,0x1000 /* Set SO bit in CR */
- neg r3,r3
- std r5,_CCR(r1)
- b .Lsyscall_error_cont
-
-/* Traced system call support */
-.Lsyscall_dotrace:
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_syscall_trace_enter
-
- /*
- * We use the return value of do_syscall_trace_enter() as the syscall
- * number. If the syscall was rejected for any reason do_syscall_trace_enter()
- * returns an invalid syscall number and the test below against
- * NR_syscalls will fail.
- */
- mr r0,r3
-
- /* Restore argument registers just clobbered and/or possibly changed. */
- ld r3,GPR3(r1)
- ld r4,GPR4(r1)
- ld r5,GPR5(r1)
- ld r6,GPR6(r1)
- ld r7,GPR7(r1)
- ld r8,GPR8(r1)
-
- /* Repopulate r9 and r10 for the syscall path */
- addi r9,r1,STACK_FRAME_OVERHEAD
- ld r10, PACA_THREAD_INFO(r13)
- ld r10,TI_FLAGS(r10)
-
- cmpldi r0,NR_syscalls
- blt+ .Lsyscall
-
- /* Return code is already in r3 thanks to do_syscall_trace_enter() */
- b .Lsyscall_exit
-
-
-.Lsyscall_enosys:
- li r3,-ENOSYS
- b .Lsyscall_exit
-
-.Lsyscall_exit_work:
- /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
- If TIF_NOERROR is set, just save r3 as it is. */
-
- andi. r0,r9,_TIF_RESTOREALL
- beq+ 0f
- REST_NVGPRS(r1)
- b 2f
-0: cmpld r3,r11 /* r11 is -MAX_ERRNO */
- blt+ 1f
- andi. r0,r9,_TIF_NOERROR
- bne- 1f
- ld r5,_CCR(r1)
- neg r3,r3
- oris r5,r5,0x1000 /* Set SO bit in CR */
- std r5,_CCR(r1)
-1: std r3,GPR3(r1)
-2: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
- beq 4f
-
- /* Clear per-syscall TIF flags if any are set. */
-
- li r11,_TIF_PERSYSCALL_MASK
- addi r12,r12,TI_FLAGS
-3: ldarx r10,0,r12
- andc r10,r10,r11
- stdcx. r10,0,r12
- bne- 3b
- subi r12,r12,TI_FLAGS
-
-4: /* Anything else left to do? */
-BEGIN_FTR_SECTION
- lis r3,DEFAULT_PPR@highest /* Set default PPR */
- sldi r3,r3,32 /* bits 11-13 are used for ppr */
- std r3,_PPR(r1)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
- andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
- beq ret_from_except_lite
-
- /* Re-enable interrupts */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 1
-#else
- li r10,MSR_RI
- ori r10,r10,MSR_EE
- mtmsrd r10,1
-#endif /* CONFIG_PPC_BOOK3E */
-
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_syscall_trace_leave
- b ret_from_except
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-.Ltabort_syscall:
- /* Firstly we need to enable TM in the kernel */
- mfmsr r10
- li r9, 1
- rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r10, 0
-
- /* tabort, this dooms the transaction, nothing else */
- li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
- TABORT(R9)
-
- /*
- * Return directly to userspace. We have corrupted user register state,
- * but userspace will never see that register state. Execution will
- * resume after the tbegin of the aborted transaction with the
- * checkpointed register state.
- */
- li r9, MSR_RI
- andc r10, r10, r9
- mtmsrd r10, 1
- mtspr SPRN_SRR0, r11
- mtspr SPRN_SRR1, r12
- RFI_TO_USER
- b . /* prevent speculative execution */
-#endif
-_ASM_NOKPROBE_SYMBOL(system_call_common);
-_ASM_NOKPROBE_SYMBOL(system_call_exit);
-
-/* Save non-volatile GPRs, if not already saved. */
-_GLOBAL(save_nvgprs)
- ld r11,_TRAP(r1)
- andi. r0,r11,1
- beqlr-
- SAVE_NVGPRS(r1)
- clrrdi r0,r11,1
- std r0,_TRAP(r1)
- blr
-_ASM_NOKPROBE_SYMBOL(save_nvgprs);
-
-
-/*
- * The sigsuspend and rt_sigsuspend system calls can call do_signal
- * and thus put the process into the stopped state where we might
- * want to examine its user state with ptrace. Therefore we need
- * to save all the nonvolatile registers (r14 - r31) before calling
- * the C code. Similarly, fork, vfork and clone need the full
- * register state on the stack so that it can be copied to the child.
- */
-
-_GLOBAL(ppc_fork)
- bl save_nvgprs
- bl sys_fork
- b .Lsyscall_exit
-
-_GLOBAL(ppc_vfork)
- bl save_nvgprs
- bl sys_vfork
- b .Lsyscall_exit
-
-_GLOBAL(ppc_clone)
- bl save_nvgprs
- bl sys_clone
- b .Lsyscall_exit
-
-_GLOBAL(ppc_clone3)
- bl save_nvgprs
- bl sys_clone3
- b .Lsyscall_exit
-
-_GLOBAL(ppc32_swapcontext)
- bl save_nvgprs
- bl compat_sys_swapcontext
- b .Lsyscall_exit
-
-_GLOBAL(ppc64_swapcontext)
- bl save_nvgprs
- bl sys_swapcontext
- b .Lsyscall_exit
-
-_GLOBAL(ppc_switch_endian)
- bl save_nvgprs
- bl sys_switch_endian
- b .Lsyscall_exit
-
-_GLOBAL(ret_from_fork)
- bl schedule_tail
- REST_NVGPRS(r1)
- li r3,0
- b .Lsyscall_exit
-
-_GLOBAL(ret_from_kernel_thread)
- bl schedule_tail
- REST_NVGPRS(r1)
- mtlr r14
- mr r3,r15
-#ifdef PPC64_ELF_ABI_v2
- mr r12,r14
-#endif
- blrl
- li r3,0
- b .Lsyscall_exit
#ifdef CONFIG_PPC_BOOK3S_64
#define FLUSH_COUNT_CACHE \
1: nop; \
- patch_site 1b, patch__call_flush_count_cache
-
-
-#define BCCTR_FLUSH .long 0x4c400420
+ patch_site 1b, patch__call_flush_branch_caches1; \
+1: nop; \
+ patch_site 1b, patch__call_flush_branch_caches2; \
+1: nop; \
+ patch_site 1b, patch__call_flush_branch_caches3
.macro nops number
.rept \number
@@ -532,8 +66,8 @@ _GLOBAL(ret_from_kernel_thread)
.endm
.balign 32
-.global flush_count_cache
-flush_count_cache:
+.global flush_branch_caches
+flush_branch_caches:
/* Save LR into r9 */
mflr r9
@@ -555,7 +89,7 @@ flush_count_cache:
li r9,0x7fff
mtctr r9
- BCCTR_FLUSH
+ PPC_BCCTR_FLUSH
2: nop
patch_site 2b patch__flush_count_cache_return
@@ -564,7 +98,7 @@ flush_count_cache:
.rept 278
.balign 32
- BCCTR_FLUSH
+ PPC_BCCTR_FLUSH
nops 7
.endr
@@ -578,7 +112,7 @@ flush_count_cache:
* state of one is saved on its kernel stack. Then the state
* of the other is restored from its kernel stack. The memory
* management hardware is updated to the second process's state.
- * Finally, we can return to the second process, via ret_from_except.
+ * Finally, we can return to the second process, via interrupt_return.
* On entry, r3 points to the THREAD for the current task, r4
* points to the THREAD for the new task.
*
@@ -605,7 +139,7 @@ _GLOBAL(_switch)
kuap_check_amr r9, r10
- FLUSH_COUNT_CACHE
+ FLUSH_COUNT_CACHE /* Clobbers r9, ctr */
/*
* On SMP kernels, care must be taken because a task may be
@@ -618,7 +152,7 @@ _GLOBAL(_switch)
* kernel/sched/core.c).
*
* Uncacheable stores in the case of involuntary preemption must
- * be taken care of. The smp_mb__before_spin_lock() in __schedule()
+ * be taken care of. The smp_mb__after_spinlock() in __schedule()
* is implemented as hwsync on powerpc, which orders MMIO too. So
* long as there is an hwsync in the context switch path, it will
* be executed on the source CPU after the task has performed
@@ -646,7 +180,7 @@ _GLOBAL(_switch)
#endif
ld r8,KSP(r4) /* new stack pointer */
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
b 2f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
@@ -698,7 +232,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
slbmte r7,r0
isync
2:
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
@@ -730,559 +264,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
addi r1,r1,SWITCH_FRAME_SIZE
blr
- .align 7
-_GLOBAL(ret_from_except)
- ld r11,_TRAP(r1)
- andi. r0,r11,1
- bne ret_from_except_lite
- REST_NVGPRS(r1)
-
-_GLOBAL(ret_from_except_lite)
- /*
- * Disable interrupts so that current_thread_info()->flags
- * can't change between when we test it and when we return
- * from the interrupt.
- */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 0
-#else
- li r10,MSR_RI
- mtmsrd r10,1 /* Update machine state */
-#endif /* CONFIG_PPC_BOOK3E */
-
- ld r9, PACA_THREAD_INFO(r13)
- ld r3,_MSR(r1)
-#ifdef CONFIG_PPC_BOOK3E
- ld r10,PACACURRENT(r13)
-#endif /* CONFIG_PPC_BOOK3E */
- ld r4,TI_FLAGS(r9)
- andi. r3,r3,MSR_PR
- beq resume_kernel
-#ifdef CONFIG_PPC_BOOK3E
- lwz r3,(THREAD+THREAD_DBCR0)(r10)
-#endif /* CONFIG_PPC_BOOK3E */
-
- /* Check current_thread_info()->flags */
- andi. r0,r4,_TIF_USER_WORK_MASK
- bne 1f
-#ifdef CONFIG_PPC_BOOK3E
- /*
- * Check to see if the dbcr0 register is set up to debug.
- * Use the internal debug mode bit to do this.
- */
- andis. r0,r3,DBCR0_IDM@h
- beq restore
- mfmsr r0
- rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */
- mtmsr r0
- mtspr SPRN_DBCR0,r3
- li r10, -1
- mtspr SPRN_DBSR,r10
- b restore
-#else
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl restore_math
- b restore
-#endif
-1: andi. r0,r4,_TIF_NEED_RESCHED
- beq 2f
- bl restore_interrupts
- SCHEDULE_USER
- b ret_from_except_lite
-2:
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM
- bne 3f /* only restore TM if nothing else to do */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl restore_tm_state
- b restore
-3:
-#endif
- bl save_nvgprs
- /*
- * Use a non volatile GPR to save and restore our thread_info flags
- * across the call to restore_interrupts.
- */
- mr r30,r4
- bl restore_interrupts
- mr r4,r30
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_notify_resume
- b ret_from_except
-
-resume_kernel:
- /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
- andis. r8,r4,_TIF_EMULATE_STACK_STORE@h
- beq+ 1f
-
- addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
-
- ld r3,GPR1(r1)
- subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
- mr r4,r1 /* src: current exception frame */
- mr r1,r3 /* Reroute the trampoline frame to r1 */
-
- /* Copy from the original to the trampoline. */
- li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */
- li r6,0 /* start offset: 0 */
- mtctr r5
-2: ldx r0,r6,r4
- stdx r0,r6,r3
- addi r6,r6,8
- bdnz 2b
-
- /* Do real store operation to complete stdu */
- ld r5,GPR1(r1)
- std r8,0(r5)
-
- /* Clear _TIF_EMULATE_STACK_STORE flag */
- lis r11,_TIF_EMULATE_STACK_STORE@h
- addi r5,r9,TI_FLAGS
-0: ldarx r4,0,r5
- andc r4,r4,r11
- stdcx. r4,0,r5
- bne- 0b
-1:
-
-#ifdef CONFIG_PREEMPTION
- /* Check if we need to preempt */
- andi. r0,r4,_TIF_NEED_RESCHED
- beq+ restore
- /* Check that preempt_count() == 0 and interrupts are enabled */
- lwz r8,TI_PREEMPT(r9)
- cmpwi cr0,r8,0
- bne restore
- ld r0,SOFTE(r1)
- andi. r0,r0,IRQS_DISABLED
- bne restore
-
- /*
- * Here we are preempting the current task. We want to make
- * sure we are soft-disabled first and reconcile irq state.
- */
- RECONCILE_IRQ_STATE(r3,r4)
- bl preempt_schedule_irq
-
- /*
- * arch_local_irq_restore() from preempt_schedule_irq above may
- * enable hard interrupt but we really should disable interrupts
- * when we return from the interrupt, and so that we don't get
- * interrupted after loading SRR0/1.
- */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 0
-#else
- li r10,MSR_RI
- mtmsrd r10,1 /* Update machine state */
-#endif /* CONFIG_PPC_BOOK3E */
-#endif /* CONFIG_PREEMPTION */
-
- .globl fast_exc_return_irq
-fast_exc_return_irq:
-restore:
- /*
- * This is the main kernel exit path. First we check if we
- * are about to re-enable interrupts
- */
- ld r5,SOFTE(r1)
- lbz r6,PACAIRQSOFTMASK(r13)
- andi. r5,r5,IRQS_DISABLED
- bne .Lrestore_irq_off
-
- /* We are enabling, were we already enabled ? Yes, just return */
- andi. r6,r6,IRQS_DISABLED
- beq cr0,.Ldo_restore
-
- /*
- * We are about to soft-enable interrupts (we are hard disabled
- * at this point). We check if there's anything that needs to
- * be replayed first.
- */
- lbz r0,PACAIRQHAPPENED(r13)
- cmpwi cr0,r0,0
- bne- .Lrestore_check_irq_replay
-
- /*
- * Get here when nothing happened while soft-disabled, just
- * soft-enable and move-on. We will hard-enable as a side
- * effect of rfi
- */
-.Lrestore_no_replay:
- TRACE_ENABLE_INTS
- li r0,IRQS_ENABLED
- stb r0,PACAIRQSOFTMASK(r13);
-
- /*
- * Final return path. BookE is handled in a different file
- */
-.Ldo_restore:
-#ifdef CONFIG_PPC_BOOK3E
- b exception_return_book3e
-#else
- /*
- * Clear the reservation. If we know the CPU tracks the address of
- * the reservation then we can potentially save some cycles and use
- * a larx. On POWER6 and POWER7 this is significantly faster.
- */
-BEGIN_FTR_SECTION
- stdcx. r0,0,r1 /* to clear the reservation */
-FTR_SECTION_ELSE
- ldarx r4,0,r1
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
-
- /*
- * Some code path such as load_up_fpu or altivec return directly
- * here. They run entirely hard disabled and do not alter the
- * interrupt state. They also don't use lwarx/stwcx. and thus
- * are known not to leave dangling reservations.
- */
- .globl fast_exception_return
-fast_exception_return:
- ld r3,_MSR(r1)
- ld r4,_CTR(r1)
- ld r0,_LINK(r1)
- mtctr r4
- mtlr r0
- ld r4,_XER(r1)
- mtspr SPRN_XER,r4
-
- kuap_check_amr r5, r6
-
- REST_8GPRS(5, r1)
-
- andi. r0,r3,MSR_RI
- beq- .Lunrecov_restore
-
- /*
- * Clear RI before restoring r13. If we are returning to
- * userspace and we take an exception after restoring r13,
- * we end up corrupting the userspace r13 value.
- */
- li r4,0
- mtmsrd r4,1
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* TM debug */
- std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
-#endif
- /*
- * r13 is our per cpu area, only restore it if we are returning to
- * userspace the value stored in the stack frame may belong to
- * another CPU.
- */
- andi. r0,r3,MSR_PR
- beq 1f
-BEGIN_FTR_SECTION
- /* Restore PPR */
- ld r2,_PPR(r1)
- mtspr SPRN_PPR,r2
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
- ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
- REST_GPR(13, r1)
-
- /*
- * We don't need to restore AMR on the way back to userspace for KUAP.
- * The value of AMR only matters while we're in the kernel.
- */
- mtspr SPRN_SRR1,r3
-
- ld r2,_CCR(r1)
- mtcrf 0xFF,r2
- ld r2,_NIP(r1)
- mtspr SPRN_SRR0,r2
-
- ld r0,GPR0(r1)
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
- ld r4,GPR4(r1)
- ld r1,GPR1(r1)
- RFI_TO_USER
- b . /* prevent speculative execution */
-
-1: mtspr SPRN_SRR1,r3
-
- ld r2,_CCR(r1)
- mtcrf 0xFF,r2
- ld r2,_NIP(r1)
- mtspr SPRN_SRR0,r2
-
- /*
- * Leaving a stale exception_marker on the stack can confuse
- * the reliable stack unwinder later on. Clear it.
- */
- li r2,0
- std r2,STACK_FRAME_OVERHEAD-16(r1)
-
- ld r0,GPR0(r1)
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
-
- kuap_restore_amr r4
-
- ld r4,GPR4(r1)
- ld r1,GPR1(r1)
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
-#endif /* CONFIG_PPC_BOOK3E */
-
- /*
- * We are returning to a context with interrupts soft disabled.
- *
- * However, we may also about to hard enable, so we need to
- * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
- * or that bit can get out of sync and bad things will happen
- */
-.Lrestore_irq_off:
- ld r3,_MSR(r1)
- lbz r7,PACAIRQHAPPENED(r13)
- andi. r0,r3,MSR_EE
- beq 1f
- rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
- stb r7,PACAIRQHAPPENED(r13)
-1:
-#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
- /* The interrupt should not have soft enabled. */
- lbz r7,PACAIRQSOFTMASK(r13)
-1: tdeqi r7,IRQS_ENABLED
- EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
-#endif
- b .Ldo_restore
-
- /*
- * Something did happen, check if a re-emit is needed
- * (this also clears paca->irq_happened)
- */
-.Lrestore_check_irq_replay:
- /* XXX: We could implement a fast path here where we check
- * for irq_happened being just 0x01, in which case we can
- * clear it and return. That means that we would potentially
- * miss a decrementer having wrapped all the way around.
- *
- * Still, this might be useful for things like hash_page
- */
- bl __check_irq_replay
- cmpwi cr0,r3,0
- beq .Lrestore_no_replay
-
- /*
- * We need to re-emit an interrupt. We do so by re-using our
- * existing exception frame. We first change the trap value,
- * but we need to ensure we preserve the low nibble of it
- */
- ld r4,_TRAP(r1)
- clrldi r4,r4,60
- or r4,r4,r3
- std r4,_TRAP(r1)
-
- /*
- * PACA_IRQ_HARD_DIS won't always be set here, so set it now
- * to reconcile the IRQ state. Tracing is already accounted for.
- */
- lbz r4,PACAIRQHAPPENED(r13)
- ori r4,r4,PACA_IRQ_HARD_DIS
- stb r4,PACAIRQHAPPENED(r13)
-
- /*
- * Then find the right handler and call it. Interrupts are
- * still soft-disabled and we keep them that way.
- */
- cmpwi cr0,r3,0x500
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl do_IRQ
- b ret_from_except
-1: cmpwi cr0,r3,0xf00
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl performance_monitor_exception
- b ret_from_except
-1: cmpwi cr0,r3,0xe60
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl handle_hmi_exception
- b ret_from_except
-1: cmpwi cr0,r3,0x900
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl timer_interrupt
- b ret_from_except
-#ifdef CONFIG_PPC_DOORBELL
-1:
-#ifdef CONFIG_PPC_BOOK3E
- cmpwi cr0,r3,0x280
-#else
- cmpwi cr0,r3,0xa00
-#endif /* CONFIG_PPC_BOOK3E */
- bne 1f
- addi r3,r1,STACK_FRAME_OVERHEAD;
- bl doorbell_exception
-#endif /* CONFIG_PPC_DOORBELL */
-1: b ret_from_except /* What else to do here ? */
-
-.Lunrecov_restore:
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
- b .Lunrecov_restore
-
-_ASM_NOKPROBE_SYMBOL(ret_from_except);
-_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
-_ASM_NOKPROBE_SYMBOL(resume_kernel);
-_ASM_NOKPROBE_SYMBOL(fast_exc_return_irq);
-_ASM_NOKPROBE_SYMBOL(restore);
-_ASM_NOKPROBE_SYMBOL(fast_exception_return);
-
-
-#ifdef CONFIG_PPC_RTAS
-/*
- * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
- * called with the MMU off.
- *
- * In addition, we need to be in 32b mode, at least for now.
- *
- * Note: r3 is an input parameter to rtas, so don't trash it...
- */
-_GLOBAL(enter_rtas)
- mflr r0
- std r0,16(r1)
- stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
-
- /* Because RTAS is running in 32b mode, it clobbers the high order half
- * of all registers that it saves. We therefore save those registers
- * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
- */
- SAVE_GPR(2, r1) /* Save the TOC */
- SAVE_GPR(13, r1) /* Save paca */
- SAVE_NVGPRS(r1) /* Save the non-volatiles */
-
- mfcr r4
- std r4,_CCR(r1)
- mfctr r5
- std r5,_CTR(r1)
- mfspr r6,SPRN_XER
- std r6,_XER(r1)
- mfdar r7
- std r7,_DAR(r1)
- mfdsisr r8
- std r8,_DSISR(r1)
-
- /* Temporary workaround to clear CR until RTAS can be modified to
- * ignore all bits.
- */
- li r0,0
- mtcr r0
-
-#ifdef CONFIG_BUG
- /* There is no way it is acceptable to get here with interrupts enabled,
- * check it with the asm equivalent of WARN_ON
- */
- lbz r0,PACAIRQSOFTMASK(r13)
-1: tdeqi r0,IRQS_ENABLED
- EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
-#endif
-
- /* Hard-disable interrupts */
- mfmsr r6
- rldicl r7,r6,48,1
- rotldi r7,r7,16
- mtmsrd r7,1
-
- /* Unfortunately, the stack pointer and the MSR are also clobbered,
- * so they are saved in the PACA which allows us to restore
- * our original state after RTAS returns.
- */
- std r1,PACAR1(r13)
- std r6,PACASAVEDMSR(r13)
-
- /* Setup our real return addr */
- LOAD_REG_ADDR(r4,rtas_return_loc)
- clrldi r4,r4,2 /* convert to realmode address */
- mtlr r4
-
- li r0,0
- ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
- andc r0,r6,r0
-
- li r9,1
- rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
- ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
- andc r6,r0,r9
-
-__enter_rtas:
- sync /* disable interrupts so SRR0/1 */
- mtmsrd r0 /* don't get trashed */
-
- LOAD_REG_ADDR(r4, rtas)
- ld r5,RTASENTRY(r4) /* get the rtas->entry value */
- ld r4,RTASBASE(r4) /* get the rtas->base value */
-
- mtspr SPRN_SRR0,r5
- mtspr SPRN_SRR1,r6
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
-rtas_return_loc:
- FIXUP_ENDIAN
-
- /*
- * Clear RI and set SF before anything.
- */
- mfmsr r6
- li r0,MSR_RI
- andc r6,r6,r0
- sldi r0,r0,(MSR_SF_LG - MSR_RI_LG)
- or r6,r6,r0
- sync
- mtmsrd r6
-
- /* relocation is off at this point */
- GET_PACA(r4)
- clrldi r4,r4,2 /* convert to realmode address */
-
- bcl 20,31,$+4
-0: mflr r3
- ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
-
- ld r1,PACAR1(r4) /* Restore our SP */
- ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
-
- mtspr SPRN_SRR0,r3
- mtspr SPRN_SRR1,r4
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-_ASM_NOKPROBE_SYMBOL(__enter_rtas)
-_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
-
- .align 3
-1: .8byte rtas_restore_regs
-
-rtas_restore_regs:
- /* relocation is on at this point */
- REST_GPR(2, r1) /* Restore the TOC */
- REST_GPR(13, r1) /* Restore paca */
- REST_NVGPRS(r1) /* Restore the non-volatiles */
-
- GET_PACA(r13)
-
- ld r4,_CCR(r1)
- mtcr r4
- ld r5,_CTR(r1)
- mtctr r5
- ld r6,_XER(r1)
- mtspr SPRN_XER,r6
- ld r7,_DAR(r1)
- mtdar r7
- ld r8,_DSISR(r1)
- mtdsisr r8
-
- addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
- ld r0,16(r1) /* get return address */
-
- mtlr r0
- blr /* return to caller */
-
-#endif /* CONFIG_PPC_RTAS */
-
_GLOBAL(enter_prom)
mflr r0
std r0,16(r1)
@@ -1311,16 +292,16 @@ _GLOBAL(enter_prom)
/* Prepare a 32-bit mode big endian MSR
*/
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
rlwinm r11,r11,0,1,31
mtsrr1 r11
rfi
-#else /* CONFIG_PPC_BOOK3E */
- LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE)
+#else /* CONFIG_PPC_BOOK3E_64 */
+ LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE)
andc r11,r11,r12
mtsrr1 r11
RFI_TO_KERNEL
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_BOOK3E_64 */
1: /* Return from OF */
FIXUP_ENDIAN
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 9d32158ce36f..d4b8aff20815 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -11,6 +11,7 @@
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
#include <asm/machdep.h>
+#include <asm/inst.h>
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
@@ -36,7 +37,7 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
return -1;
for (i = 0; i < (len / 4); i++) {
- u32 inst = be32_to_cpu(insts[i]);
+ ppc_inst_t inst = ppc_inst(be32_to_cpu(insts[i]));
patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, inst);
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index e4076e3c072d..2f68fb2ee4fc 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -24,6 +24,11 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_booke_hv_asm.h>
#include <asm/feature-fixups.h>
+#include <asm/context_tracking.h>
+
+/* 64e interrupt returns always use SRR registers */
+#define fast_interrupt_return fast_interrupt_return_srr
+#define interrupt_return interrupt_return_srr
/* XXX This will ultimately add space for a special exception save
* structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -62,9 +67,6 @@
ld reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
special_reg_save:
- lbz r9,PACAIRQHAPPENED(r13)
- RECONCILE_IRQ_STATE(r3,r4)
-
/*
* We only need (or have stack space) to save this stuff if
* we interrupted the kernel.
@@ -118,15 +120,11 @@ BEGIN_FTR_SECTION
mtspr SPRN_MAS5,r10
mtspr SPRN_MAS8,r10
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
- SPECIAL_EXC_STORE(r9,IRQHAPPENED)
-
mfspr r10,SPRN_DEAR
SPECIAL_EXC_STORE(r10,DEAR)
mfspr r10,SPRN_ESR
SPECIAL_EXC_STORE(r10,ESR)
- lbz r10,PACAIRQSOFTMASK(r13)
- SPECIAL_EXC_STORE(r10,SOFTE)
ld r10,_NIP(r1)
SPECIAL_EXC_STORE(r10,CSRR0)
ld r10,_MSR(r1)
@@ -138,7 +136,8 @@ ret_from_level_except:
ld r3,_MSR(r1)
andi. r3,r3,MSR_PR
beq 1f
- b ret_from_except
+ REST_NVGPRS(r1)
+ b interrupt_return
1:
LOAD_REG_ADDR(r11,extlb_level_exc)
@@ -192,27 +191,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_MAS8,r10
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
- lbz r6,PACAIRQSOFTMASK(r13)
- ld r5,SOFTE(r1)
-
- /* Interrupts had better not already be enabled... */
- tweqi r6,IRQS_ENABLED
-
- andi. r6,r5,IRQS_DISABLED
- bne 1f
-
- TRACE_ENABLE_INTS
- stb r5,PACAIRQSOFTMASK(r13)
-1:
- /*
- * Restore PACAIRQHAPPENED rather than setting it based on
- * the return MSR[EE], since we could have interrupted
- * __check_irq_replay() or other inconsistent transitory
- * states that must remain that way.
- */
- SPECIAL_EXC_LOAD(r10,IRQHAPPENED)
- stb r10,PACAIRQHAPPENED(r13)
-
SPECIAL_EXC_LOAD(r10,DEAR)
mtspr SPRN_DEAR,r10
SPECIAL_EXC_LOAD(r10,ESR)
@@ -220,8 +198,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
stdcx. r0,0,r1 /* to clear the reservation */
- REST_4GPRS(2, r1)
- REST_4GPRS(6, r1)
+ REST_GPRS(2, 9, r1)
ld r10,_CTR(r1)
ld r11,_XER(r1)
@@ -239,17 +216,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
mtlr r10
mtcr r11
- ld r10,GPR10(r1)
- ld r11,GPR11(r1)
- ld r12,GPR12(r1)
+ REST_GPRS(10, 12, r1)
mtspr \scratch,r0
std r10,\paca_ex+EX_R10(r13);
std r11,\paca_ex+EX_R11(r13);
ld r10,_NIP(r1)
ld r11,_MSR(r1)
- ld r0,GPR0(r1)
- ld r1,GPR1(r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
mtspr \srr0,r10
mtspr \srr1,r11
ld r10,\paca_ex+EX_R10(r13)
@@ -314,7 +289,6 @@ ret_from_mc_except:
#define SPRN_MC_SRR0 SPRN_MCSRR0
#define SPRN_MC_SRR1 SPRN_MCSRR1
-#ifdef CONFIG_PPC_FSL_BOOK3E
#define GEN_BTB_FLUSH \
START_BTB_FLUSH_SECTION \
beq 1f; \
@@ -330,13 +304,6 @@ ret_from_mc_except:
#define DBG_BTB_FLUSH CRIT_BTB_FLUSH
#define MC_BTB_FLUSH CRIT_BTB_FLUSH
#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
-#else
-#define GEN_BTB_FLUSH
-#define CRIT_BTB_FLUSH
-#define DBG_BTB_FLUSH
-#define MC_BTB_FLUSH
-#define GDBELL_BTB_FLUSH
-#endif
#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
@@ -366,6 +333,12 @@ ret_from_mc_except:
andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \
bne masked_interrupt_book3e_##n
+/*
+ * Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is
+ * called, because that does SAVE_NVGPRS which must see the original register
+ * values, otherwise the scratch values might be restored when exiting the
+ * interrupt.
+ */
#define PROLOG_ADDITION_2REGS_GEN(n) \
std r14,PACA_EXGEN+EX_R14(r13); \
std r15,PACA_EXGEN+EX_R15(r13)
@@ -389,42 +362,39 @@ ret_from_mc_except:
/* Core exception code for all exceptions except TLB misses. */
#define EXCEPTION_COMMON_LVL(n, scratch, excf) \
exc_##n##_common: \
- std r0,GPR0(r1); /* save r0 in stackframe */ \
- std r2,GPR2(r1); /* save r2 in stackframe */ \
- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
- std r9,GPR9(r1); /* save r9 in stackframe */ \
+ SAVE_GPR(0, r1); /* save r0 in stackframe */ \
+ SAVE_GPRS(2, 9, r1); /* save r2 - r9 in stackframe */ \
std r10,_NIP(r1); /* save SRR0 to stackframe */ \
std r11,_MSR(r1); /* save SRR1 to stackframe */ \
beq 2f; /* if from kernel mode */ \
- ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */ \
2: ld r3,excf+EX_R10(r13); /* get back r10 */ \
ld r4,excf+EX_R11(r13); /* get back r11 */ \
mfspr r5,scratch; /* get back r13 */ \
- std r12,GPR12(r1); /* save r12 in stackframe */ \
- ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
+ SAVE_GPR(12, r1); /* save r12 in stackframe */ \
+ LOAD_PACA_TOC(); /* get kernel TOC into r2 */ \
mflr r6; /* save LR in stackframe */ \
mfctr r7; /* save CTR in stackframe */ \
mfspr r8,SPRN_XER; /* save XER in stackframe */ \
ld r9,excf+EX_R1(r13); /* load orig r1 back from PACA */ \
lwz r10,excf+EX_CR(r13); /* load orig CR back from PACA */ \
lbz r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */ \
- ld r12,exception_marker@toc(r2); \
- li r0,0; \
+ LOAD_REG_IMMEDIATE(r12, STACK_FRAME_REGS_MARKER); \
+ ZEROIZE_GPR(0); \
std r3,GPR10(r1); /* save r10 to stackframe */ \
std r4,GPR11(r1); /* save r11 to stackframe */ \
std r5,GPR13(r1); /* save it to stackframe */ \
std r6,_LINK(r1); \
std r7,_CTR(r1); \
std r8,_XER(r1); \
- li r3,(n)+1; /* indicate partial regs in trap */ \
+ li r3,(n); /* regs.trap vector */ \
std r9,0(r1); /* store stack frame back link */ \
std r10,_CCR(r1); /* store orig CR in stackframe */ \
std r9,GPR1(r1); /* store stack frame back link */ \
std r11,SOFTE(r1); /* and save it to stackframe */ \
std r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \
std r3,_TRAP(r1); /* set trap number */ \
- std r0,RESULT(r1); /* clear regs->result */
+ std r0,RESULT(r1); /* clear regs->result */ \
+ SAVE_NVGPRS(r1);
#define EXCEPTION_COMMON(n) \
EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)
@@ -435,28 +405,6 @@ exc_##n##_common: \
#define EXCEPTION_COMMON_DBG(n) \
EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG)
-/*
- * This is meant for exceptions that don't immediately hard-enable. We
- * set a bit in paca->irq_happened to ensure that a subsequent call to
- * arch_local_irq_restore() will properly hard-enable and avoid the
- * fast-path, and then reconcile irq state.
- */
-#define INTS_DISABLE RECONCILE_IRQ_STATE(r3,r4)
-
-/*
- * This is called by exceptions that don't use INTS_DISABLE (that did not
- * touch irq indicators in the PACA). This will restore MSR:EE to it's
- * previous value
- *
- * XXX In the long run, we may want to open-code it in order to separate the
- * load from the wrtee, thus limiting the latency caused by the dependency
- * but at this point, I'll favor code clarity until we have a near to final
- * implementation
- */
-#define INTS_RESTORE_HARD \
- ld r11,_MSR(r1); \
- wrtee r11;
-
/* XXX FIXME: Restore r14/r15 when necessary */
#define BAD_STACK_TRAMPOLINE(n) \
exc_##n##_bad_stack: \
@@ -505,18 +453,11 @@ exc_##n##_bad_stack: \
START_EXCEPTION(label); \
NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
EXCEPTION_COMMON(trapnum) \
- INTS_DISABLE; \
ack(r8); \
CHECK_NAPPING(); \
addi r3,r1,STACK_FRAME_OVERHEAD; \
bl hdlr; \
- b ret_from_except_lite;
-
-/* This value is used to mark exception frames on the stack. */
- .section ".toc","aw"
-exception_marker:
- .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
-
+ b interrupt_return
/*
* And here we have the exception vectors !
@@ -561,11 +502,10 @@ __end_interrupts:
CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x100)
- bl save_nvgprs
bl special_reg_save
CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
+ bl unknown_nmi_exception
b ret_from_crit_except
/* Machine Check Interrupt */
@@ -573,7 +513,6 @@ __end_interrupts:
MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_MC(0x000)
- bl save_nvgprs
bl special_reg_save
CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -586,8 +525,11 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
+ std r14,_DEAR(r1)
+ std r15,_ESR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x300)
- INTS_DISABLE
b storage_fault_common
/* Instruction Storage Interrupt */
@@ -596,8 +538,11 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
li r15,0
mr r14,r10
+ std r14,_DEAR(r1)
+ std r15,_ESR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x400)
- INTS_DISABLE
b storage_fault_common
/* External Input Interrupt */
@@ -610,6 +555,10 @@ __end_interrupts:
PROLOG_ADDITION_2REGS)
mfspr r14,SPRN_DEAR
mfspr r15,SPRN_ESR
+ std r14,_DEAR(r1)
+ std r15,_ESR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
EXCEPTION_COMMON(0x600)
b alignment_more /* no room, go out of line */
@@ -618,14 +567,13 @@ __end_interrupts:
NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
PROLOG_ADDITION_1REG)
mfspr r14,SPRN_ESR
+ std r14,_ESR(r1)
+ ld r14,PACA_EXGEN+EX_R14(r13)
EXCEPTION_COMMON(0x700)
- INTS_DISABLE
- std r14,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- ld r14,PACA_EXGEN+EX_R14(r13)
- bl save_nvgprs
bl program_check_exception
- b ret_from_except
+ REST_NVGPRS(r1)
+ b interrupt_return
/* Floating Point Unavailable Interrupt */
START_EXCEPTION(fp_unavailable);
@@ -637,12 +585,10 @@ __end_interrupts:
andi. r0,r12,MSR_PR;
beq- 1f
bl load_up_fpu
- b fast_exception_return
-1: INTS_DISABLE
- bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
+ b fast_interrupt_return
+1: addi r3,r1,STACK_FRAME_OVERHEAD
bl kernel_fp_unavailable_exception
- b ret_from_except
+ b interrupt_return
/* Altivec Unavailable Interrupt */
START_EXCEPTION(altivec_unavailable);
@@ -656,15 +602,13 @@ BEGIN_FTR_SECTION
andi. r0,r12,MSR_PR;
beq- 1f
bl load_up_altivec
- b fast_exception_return
+ b fast_interrupt_return
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- INTS_DISABLE
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_exception
- b ret_from_except
+ b interrupt_return
/* AltiVec Assist */
START_EXCEPTION(altivec_assist);
@@ -672,17 +616,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
BOOKE_INTERRUPT_ALTIVEC_ASSIST,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x220)
- INTS_DISABLE
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
bl altivec_assist_exception
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ REST_NVGPRS(r1)
#else
bl unknown_exception
#endif
- b ret_from_except
+ b interrupt_return
/* Decrementer Interrupt */
@@ -698,14 +641,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x9f0)
- bl save_nvgprs
bl special_reg_save
CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_BOOKE_WDT
bl WatchdogException
#else
- bl unknown_exception
+ bl unknown_nmi_exception
#endif
b ret_from_crit_except
@@ -722,11 +664,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0xf20)
- INTS_DISABLE
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
bl unknown_exception
- b ret_from_except
+ b interrupt_return
/* Debug exception as a critical interrupt*/
START_EXCEPTION(debug_crit);
@@ -747,9 +687,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
beq+ 1f
#ifdef CONFIG_RELOCATABLE
- ld r15,PACATOC(r13)
- ld r14,interrupt_base_book3e@got(r15)
- ld r15,__end_interrupts@got(r15)
+ __LOAD_PACA_TOC(r15)
+ LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e)
+ LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts)
cmpld cr0,r10,r14
cmpld cr1,r10,r15
#else
@@ -787,15 +727,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* normal exception
*/
mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON_CRIT(0xd00)
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r14
ld r14,PACA_EXCRIT+EX_R14(r13)
ld r15,PACA_EXCRIT+EX_R15(r13)
- bl save_nvgprs
+ EXCEPTION_COMMON_CRIT(0xd00)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl DebugException
- b ret_from_except
+ REST_NVGPRS(r1)
+ b interrupt_return
kernel_dbg_exc:
b . /* NYI */
@@ -819,9 +758,9 @@ kernel_dbg_exc:
beq+ 1f
#ifdef CONFIG_RELOCATABLE
- ld r15,PACATOC(r13)
- ld r14,interrupt_base_book3e@got(r15)
- ld r15,__end_interrupts@got(r15)
+ __LOAD_PACA_TOC(r15)
+ LOAD_REG_ADDR_ALTTOC(r14, r15, interrupt_base_book3e)
+ LOAD_REG_ADDR_ALTTOC(r15, r15, __end_interrupts)
cmpld cr0,r10,r14
cmpld cr1,r10,r15
#else
@@ -859,26 +798,30 @@ kernel_dbg_exc:
* normal exception
*/
mfspr r14,SPRN_DBSR
- EXCEPTION_COMMON_DBG(0xd08)
- INTS_DISABLE
std r14,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r14
ld r14,PACA_EXDBG+EX_R14(r13)
ld r15,PACA_EXDBG+EX_R15(r13)
- bl save_nvgprs
+ EXCEPTION_COMMON_DBG(0xd08)
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl DebugException
- b ret_from_except
+ REST_NVGPRS(r1)
+ b interrupt_return
START_EXCEPTION(perfmon);
NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x260)
- INTS_DISABLE
CHECK_NAPPING()
addi r3,r1,STACK_FRAME_OVERHEAD
+ /*
+ * XXX: Returning from performance_monitor_exception taken as a
+ * soft-NMI (Linux irqs disabled) may be risky to use interrupt_return
+ * and could cause bugs in return or elsewhere. That case should just
+ * restore registers and return. There is a workaround for one known
+ * problem in interrupt_exit_kernel_prepare().
+ */
bl performance_monitor_exception
- b ret_from_except_lite
+ b interrupt_return
/* Doorbell interrupt */
MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
@@ -889,11 +832,10 @@ kernel_dbg_exc:
CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x2a0)
- bl save_nvgprs
bl special_reg_save
CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
+ bl unknown_nmi_exception
b ret_from_crit_except
/*
@@ -905,21 +847,18 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x2c0)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
bl unknown_exception
- b ret_from_except
+ b interrupt_return
/* Guest Doorbell critical Interrupt */
START_EXCEPTION(guest_doorbell_crit);
CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON_CRIT(0x2e0)
- bl save_nvgprs
bl special_reg_save
CHECK_NAPPING();
addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
+ bl unknown_nmi_exception
b ret_from_crit_except
/* Hypervisor call */
@@ -928,10 +867,8 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x310)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
bl unknown_exception
- b ret_from_except
+ b interrupt_return
/* Embedded Hypervisor priviledged */
START_EXCEPTION(ehpriv);
@@ -939,10 +876,8 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x320)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
bl unknown_exception
- b ret_from_except
+ b interrupt_return
/* LRAT Error interrupt */
START_EXCEPTION(lrat_error);
@@ -950,10 +885,36 @@ kernel_dbg_exc:
PROLOG_ADDITION_NONE)
EXCEPTION_COMMON(0x340)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl save_nvgprs
- INTS_RESTORE_HARD
bl unknown_exception
- b ret_from_except
+ b interrupt_return
+
+.macro SEARCH_RESTART_TABLE
+#ifdef CONFIG_RELOCATABLE
+ __LOAD_PACA_TOC(r11)
+ LOAD_REG_ADDR_ALTTOC(r14, r11, __start___restart_table)
+ LOAD_REG_ADDR_ALTTOC(r15, r11, __stop___restart_table)
+#else
+ LOAD_REG_IMMEDIATE_SYM(r14, r11, __start___restart_table)
+ LOAD_REG_IMMEDIATE_SYM(r15, r11, __stop___restart_table)
+#endif
+300:
+ cmpd r14,r15
+ beq 302f
+ ld r11,0(r14)
+ cmpld r10,r11
+ blt 301f
+ ld r11,8(r14)
+ cmpld r10,r11
+ bge 301f
+ ld r11,16(r14)
+ b 303f
+301:
+ addi r14,r14,24
+ b 300b
+302:
+ li r11,0
+303:
+.endm
/*
* An interrupt came in while soft-disabled; We mark paca->irq_happened
@@ -963,6 +924,9 @@ kernel_dbg_exc:
*/
.macro masked_interrupt_book3e paca_irq full_mask
+ std r14,PACA_EXGEN+EX_R14(r13)
+ std r15,PACA_EXGEN+EX_R15(r13)
+
lbz r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
ori r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
@@ -972,22 +936,29 @@ kernel_dbg_exc:
stb r10,PACAIRQHAPPENED(r13)
.if \full_mask == 1
- rldicl r10,r11,48,1 /* clear MSR_EE */
- rotldi r11,r10,16
+ xori r11,r11,MSR_EE /* clear MSR_EE */
mtspr SPRN_SRR1,r11
.endif
+ mfspr r10,SPRN_SRR0
+ SEARCH_RESTART_TABLE
+ cmpdi r11,0
+ beq 1f
+ mtspr SPRN_SRR0,r11 /* return to restart address */
+1:
+
lwz r11,PACA_EXGEN+EX_CR(r13)
mtcr r11
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r14,PACA_EXGEN+EX_R14(r13)
+ ld r15,PACA_EXGEN+EX_R15(r13)
mfspr r13,SPRN_SPRG_GEN_SCRATCH
rfi
b .
.endm
masked_interrupt_book3e_0x500:
- // XXX When adding support for EPR, use PACA_IRQ_EE_EDGE
masked_interrupt_book3e PACA_IRQ_EE 1
masked_interrupt_book3e_0x900:
@@ -1003,126 +974,24 @@ masked_interrupt_book3e_0x2c0:
masked_interrupt_book3e PACA_IRQ_DBELL 0
/*
- * Called from arch_local_irq_enable when an interrupt needs
- * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
- * to indicate the kind of interrupt. MSR:EE is already off.
- * We generate a stackframe like if a real interrupt had happened.
- *
- * Note: While MSR:EE is off, we need to make sure that _MSR
- * in the generated frame has EE set to 1 or the exception
- * handler will not properly re-enable them.
- */
-_GLOBAL(__replay_interrupt)
- /* We are going to jump to the exception common code which
- * will retrieve various register values from the PACA which
- * we don't give a damn about.
- */
- mflr r10
- mfmsr r11
- mfcr r4
- mtspr SPRN_SPRG_GEN_SCRATCH,r13;
- std r1,PACA_EXGEN+EX_R1(r13);
- stw r4,PACA_EXGEN+EX_CR(r13);
- ori r11,r11,MSR_EE
- subi r1,r1,INT_FRAME_SIZE;
- cmpwi cr0,r3,0x500
- beq exc_0x500_common
- cmpwi cr0,r3,0x900
- beq exc_0x900_common
- cmpwi cr0,r3,0x280
- beq exc_0x280_common
- blr
-
-
-/*
* This is called from 0x300 and 0x400 handlers after the prologs with
* r14 and r15 containing the fault address and error code, with the
* original values stashed away in the PACA
*/
storage_fault_common:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- mr r4,r14
- mr r5,r15
- ld r14,PACA_EXGEN+EX_R14(r13)
- ld r15,PACA_EXGEN+EX_R15(r13)
bl do_page_fault
- cmpdi r3,0
- bne- 1f
- b ret_from_except_lite
-1: bl save_nvgprs
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl bad_page_fault
- b ret_from_except
+ b interrupt_return
/*
* Alignment exception doesn't fit entirely in the 0x100 bytes so it
* continues here.
*/
alignment_more:
- std r14,_DAR(r1)
- std r15,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- ld r14,PACA_EXGEN+EX_R14(r13)
- ld r15,PACA_EXGEN+EX_R15(r13)
- bl save_nvgprs
- INTS_RESTORE_HARD
bl alignment_exception
- b ret_from_except
-
-/*
- * We branch here from entry_64.S for the last stage of the exception
- * return code path. MSR:EE is expected to be off at that point
- */
-_GLOBAL(exception_return_book3e)
- b 1f
-
-/* This is the return from load_up_fpu fast path which could do with
- * less GPR restores in fact, but for now we have a single return path
- */
- .globl fast_exception_return
-fast_exception_return:
- wrteei 0
-1: mr r0,r13
- ld r10,_MSR(r1)
- REST_4GPRS(2, r1)
- andi. r6,r10,MSR_PR
- REST_2GPRS(6, r1)
- beq 1f
- ACCOUNT_CPU_USER_EXIT(r13, r10, r11)
- ld r0,GPR13(r1)
-
-1: stdcx. r0,0,r1 /* to clear the reservation */
-
- ld r8,_CCR(r1)
- ld r9,_LINK(r1)
- ld r10,_CTR(r1)
- ld r11,_XER(r1)
- mtcr r8
- mtlr r9
- mtctr r10
- mtxer r11
- REST_2GPRS(8, r1)
- ld r10,GPR10(r1)
- ld r11,GPR11(r1)
- ld r12,GPR12(r1)
- mtspr SPRN_SPRG_GEN_SCRATCH,r0
-
- std r10,PACA_EXGEN+EX_R10(r13);
- std r11,PACA_EXGEN+EX_R11(r13);
- ld r10,_NIP(r1)
- ld r11,_MSR(r1)
- ld r0,GPR0(r1)
- ld r1,GPR1(r1)
- mtspr SPRN_SRR0,r10
- mtspr SPRN_SRR1,r11
- ld r10,PACA_EXGEN+EX_R10(r13)
- ld r11,PACA_EXGEN+EX_R11(r13)
- mfspr r13,SPRN_SPRG_GEN_SCRATCH
- rfi
+ REST_NVGPRS(r1)
+ b interrupt_return
/*
* Trampolines used when spotting a bad kernel stack pointer in
@@ -1175,19 +1044,16 @@ bad_stack_book3e:
std r11,_CCR(r1)
mfspr r10,SPRN_DEAR
mfspr r11,SPRN_ESR
- std r10,_DAR(r1)
- std r11,_DSISR(r1)
- std r0,GPR0(r1); /* save r0 in stackframe */ \
- std r2,GPR2(r1); /* save r2 in stackframe */ \
- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
- std r9,GPR9(r1); /* save r9 in stackframe */ \
+ std r10,_DEAR(r1)
+ std r11,_ESR(r1)
+ SAVE_GPR(0, r1); /* save r0 in stackframe */ \
+ SAVE_GPRS(2, 9, r1); /* save r2 - r9 in stackframe */ \
ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \
ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \
mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
std r3,GPR10(r1); /* save r10 to stackframe */ \
std r4,GPR11(r1); /* save r11 to stackframe */ \
- std r12,GPR12(r1); /* save r12 in stackframe */ \
+ SAVE_GPR(12, r1); /* save r12 in stackframe */ \
std r5,GPR13(r1); /* save it to stackframe */ \
mflr r10
mfctr r11
@@ -1195,15 +1061,14 @@ bad_stack_book3e:
std r10,_LINK(r1)
std r11,_CTR(r1)
std r12,_XER(r1)
- SAVE_10GPRS(14,r1)
- SAVE_8GPRS(24,r1)
+ SAVE_NVGPRS(r1)
lhz r12,PACA_TRAP_SAVE(r13)
std r12,_TRAP(r1)
addi r11,r1,INT_FRAME_SIZE
std r11,0(r1)
- li r12,0
+ ZEROIZE_GPR(12)
std r12,0(r11)
- ld r2,PACATOC(r13)
+ LOAD_PACA_TOC()
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl kernel_bad_stack
b 1b
@@ -1245,7 +1110,7 @@ found_iprot:
* r3 = MAS0_TLBSEL (for the iprot array)
* r4 = SPRN_TLBnCFG
*/
- bl invstr /* Find our address */
+ bcl 20,31,$+4 /* Find our address */
invstr: mflr r6 /* Make it accessible */
mfmsr r7
rlwinm r5,r7,27,31,31 /* extract MSR[IS] */
@@ -1314,7 +1179,7 @@ skpinv: addi r6,r6,1 /* Increment */
mfmsr r6
xori r6,r6,MSR_IS
mtspr SPRN_SRR1,r6
- bl 1f /* Find our address */
+ bcl 20,31,$+4 /* Find our address */
1: mflr r6
addi r6,r6,(2f - 1b)
mtspr SPRN_SRR0,r6
@@ -1374,7 +1239,7 @@ skpinv: addi r6,r6,1 /* Increment */
* r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
*/
/* Now we branch the new virtual address mapped by this entry */
- bl 1f /* Find our address */
+ bcl 20,31,$+4 /* Find our address */
1: mflr r6
addi r6,r6,(2f - 1b)
tovirt(r6,r6)
@@ -1443,7 +1308,12 @@ a2_tlbinit_code_start:
a2_tlbinit_after_linear_map:
/* Now we branch the new virtual address mapped by this entry */
+#ifdef CONFIG_RELOCATABLE
+ __LOAD_PACA_TOC(r5)
+ LOAD_REG_ADDR_ALTTOC(r3, r5, 1f)
+#else
LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
+#endif
mtctr r3
bctr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ffc15f4f079d..651c36b056bd 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -21,28 +21,6 @@
#include <asm/feature-fixups.h>
#include <asm/kup.h>
-/* PACA save area offsets (exgen, exmc, etc) */
-#define EX_R9 0
-#define EX_R10 8
-#define EX_R11 16
-#define EX_R12 24
-#define EX_R13 32
-#define EX_DAR 40
-#define EX_DSISR 48
-#define EX_CCR 52
-#define EX_CFAR 56
-#define EX_PPR 64
-#if defined(CONFIG_RELOCATABLE)
-#define EX_CTR 72
-.if EX_SIZE != 10
- .error "EX_SIZE is wrong"
-.endif
-#else
-.if EX_SIZE != 9
- .error "EX_SIZE is wrong"
-.endif
-#endif
-
/*
* Following are fixed section helper macros.
*
@@ -50,7 +28,6 @@
* EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors
* TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these)
* TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use)
- * TRAMP_KVM_BEGIN - KVM handlers, these are put into real, unrelocated
* EXC_COMMON - After switching to virtual, relocated mode.
*/
@@ -71,7 +48,7 @@
.balign IFETCH_ALIGN_BYTES; \
.global name; \
_ASM_NOKPROBE_SYMBOL(name); \
- DEFINE_FIXED_SYMBOL(name); \
+ DEFINE_FIXED_SYMBOL(name, text); \
name:
#define TRAMP_REAL_BEGIN(name) \
@@ -80,13 +57,6 @@ name:
#define TRAMP_VIRT_BEGIN(name) \
FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#define TRAMP_KVM_BEGIN(name) \
- TRAMP_VIRT_BEGIN(name)
-#else
-#define TRAMP_KVM_BEGIN(name)
-#endif
-
#define EXC_REAL_NONE(start, size) \
FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
@@ -106,282 +76,157 @@ name:
ld reg,PACAKBASE(r13); /* get high part of &label */ \
ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
-#define __LOAD_HANDLER(reg, label) \
+#define __LOAD_HANDLER(reg, label, section) \
ld reg,PACAKBASE(r13); \
- ori reg,reg,(ABS_ADDR(label))@l
+ ori reg,reg,(ABS_ADDR(label, section))@l
/*
* Branches from unrelocated code (e.g., interrupts) to labels outside
* head-y require >64K offsets.
*/
-#define __LOAD_FAR_HANDLER(reg, label) \
+#define __LOAD_FAR_HANDLER(reg, label, section) \
ld reg,PACAKBASE(r13); \
- ori reg,reg,(ABS_ADDR(label))@l; \
- addis reg,reg,(ABS_ADDR(label))@h
-
-/* Exception register prefixes */
-#define EXC_HV_OR_STD 2 /* depends on HVMODE */
-#define EXC_HV 1
-#define EXC_STD 0
-
-#if defined(CONFIG_RELOCATABLE)
-/*
- * If we support interrupts with relocation on AND we're a relocatable kernel,
- * we need to use CTR to get to the 2nd level handler. So, save/restore it
- * when required.
- */
-#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
-#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
-#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
-#else
-/* ...else CTR is unused and in register. */
-#define SAVE_CTR(reg, area)
-#define GET_CTR(reg, area) mfctr reg
-#define RESTORE_CTR(reg, area)
-#endif
+ ori reg,reg,(ABS_ADDR(label, section))@l; \
+ addis reg,reg,(ABS_ADDR(label, section))@h
/*
- * PPR save/restore macros used in exceptions-64s.S
- * Used for P7 or later processors
+ * Interrupt code generation macros
*/
-#define SAVE_PPR(area, ra) \
-BEGIN_FTR_SECTION_NESTED(940) \
- ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
- std ra,_PPR(r1); \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
-
-#define RESTORE_PPR_PACA(area, ra) \
-BEGIN_FTR_SECTION_NESTED(941) \
- ld ra,area+EX_PPR(r13); \
- mtspr SPRN_PPR,ra; \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
-
-/*
- * Get an SPR into a register if the CPU has the given feature
- */
-#define OPT_GET_SPR(ra, spr, ftr) \
-BEGIN_FTR_SECTION_NESTED(943) \
- mfspr ra,spr; \
-END_FTR_SECTION_NESTED(ftr,ftr,943)
-
-/*
- * Set an SPR from a register if the CPU has the given feature
- */
-#define OPT_SET_SPR(ra, spr, ftr) \
-BEGIN_FTR_SECTION_NESTED(943) \
- mtspr spr,ra; \
-END_FTR_SECTION_NESTED(ftr,ftr,943)
-
-/*
- * Save a register to the PACA if the CPU has the given feature
- */
-#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr) \
-BEGIN_FTR_SECTION_NESTED(943) \
- std ra,offset(r13); \
-END_FTR_SECTION_NESTED(ftr,ftr,943)
+#define IVEC .L_IVEC_\name\() /* Interrupt vector address */
+#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */
+#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */
+#define IAREA .L_IAREA_\name\() /* PACA save area */
+#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */
+#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */
+#define ICFAR .L_ICFAR_\name\() /* Uses CFAR */
+#define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */
+#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */
+#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */
+#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
+#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
+#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
+#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */
+#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name
+#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
+#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */
+#define __ISTACK(name) .L_ISTACK_ ## name
+#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */
+
+#define INT_DEFINE_BEGIN(n) \
+.macro int_define_ ## n name
+
+#define INT_DEFINE_END(n) \
+.endm ; \
+int_define_ ## n n ; \
+do_define_int n
+
+.macro do_define_int name
+ .ifndef IVEC
+ .error "IVEC not defined"
+ .endif
+ .ifndef IHSRR
+ IHSRR=0
+ .endif
+ .ifndef IHSRR_IF_HVMODE
+ IHSRR_IF_HVMODE=0
+ .endif
+ .ifndef IAREA
+ IAREA=PACA_EXGEN
+ .endif
+ .ifndef IVIRT
+ IVIRT=1
+ .endif
+ .ifndef IISIDE
+ IISIDE=0
+ .endif
+ .ifndef ICFAR
+ ICFAR=1
+ .endif
+ .ifndef ICFAR_IF_HVMODE
+ ICFAR_IF_HVMODE=0
+ .endif
+ .ifndef IDAR
+ IDAR=0
+ .endif
+ .ifndef IDSISR
+ IDSISR=0
+ .endif
+ .ifndef IBRANCH_TO_COMMON
+ IBRANCH_TO_COMMON=1
+ .endif
+ .ifndef IREALMODE_COMMON
+ IREALMODE_COMMON=0
+ .else
+ .if ! IBRANCH_TO_COMMON
+ .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0"
+ .endif
+ .endif
+ .ifndef IMASK
+ IMASK=0
+ .endif
+ .ifndef IKVM_REAL
+ IKVM_REAL=0
+ .endif
+ .ifndef IKVM_VIRT
+ IKVM_VIRT=0
+ .endif
+ .ifndef ISTACK
+ ISTACK=1
+ .endif
+ .ifndef IKUAP
+ IKUAP=1
+ .endif
+.endm
/*
- * Branch to label using its 0xC000 address. This results in instruction
- * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
- * on using mtmsr rather than rfid.
+ * All interrupts which set HSRR registers, as well as SRESET and MCE and
+ * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
+ * so they all generally need to test whether they were taken in guest context.
+ *
+ * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be
+ * taken with MSR[HV]=0.
+ *
+ * Interrupts which set SRR registers (with the above exceptions) do not
+ * elevate to MSR[HV]=1 mode, though most can be taken when running with
+ * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do
+ * not need to test whether a guest is running because they get delivered to
+ * the guest directly, including nested HV KVM guests.
*
- * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
- * load KBASE for a slight optimisation.
+ * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host
+ * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the
+ * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be
+ * delivered to the real-mode entry point, therefore such interrupts only test
+ * KVM in their real mode handlers, and only when PR KVM is possible.
+ *
+ * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always
+ * delivered in real-mode when the MMU is in hash mode because the MMU
+ * registers are not set appropriately to translate host addresses. In nested
+ * radix mode these can be delivered in virt-mode as the host translations are
+ * used implicitly (see: effective LPID, effective PID).
*/
-#define BRANCH_TO_C000(reg, label) \
- __LOAD_FAR_HANDLER(reg, label); \
- mtctr reg; \
- bctr
-
-.macro INT_KVM_HANDLER name, vec, hsrr, area, skip
- TRAMP_KVM_BEGIN(\name\()_kvm)
- KVM_HANDLER \vec, \hsrr, \area, \skip
-.endm
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
- * If hv is possible, interrupts come into to the hv version
- * of the kvmppc_interrupt code, which then jumps to the PR handler,
- * kvmppc_interrupt_pr, if the guest is a PR guest.
+ * If an interrupt is taken while a guest is running, it is immediately routed
+ * to KVM to handle.
*/
-#define kvmppc_interrupt kvmppc_interrupt_hv
-#else
-#define kvmppc_interrupt kvmppc_interrupt_pr
-#endif
-.macro KVMTEST name, hsrr, n
+.macro KVMTEST name handler
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
lbz r10,HSTATE_IN_GUEST(r13)
cmpwi r10,0
- bne \name\()_kvm
-.endm
-
-.macro KVM_HANDLER vec, hsrr, area, skip
- .if \skip
- cmpwi r10,KVM_GUEST_MODE_SKIP
- beq 89f
- .else
-BEGIN_FTR_SECTION_NESTED(947)
- ld r10,\area+EX_CFAR(r13)
- std r10,HSTATE_CFAR(r13)
-END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947)
- .endif
-
-BEGIN_FTR_SECTION_NESTED(948)
- ld r10,\area+EX_PPR(r13)
- std r10,HSTATE_PPR(r13)
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
- ld r10,\area+EX_R10(r13)
- std r12,HSTATE_SCRATCH0(r13)
- sldi r12,r9,32
/* HSRR variants have the 0x2 bit added to their trap number */
- .if \hsrr == EXC_HV_OR_STD
+ .if IHSRR_IF_HVMODE
BEGIN_FTR_SECTION
- ori r12,r12,(\vec + 0x2)
+ li r10,(IVEC + 0x2)
FTR_SECTION_ELSE
- ori r12,r12,(\vec)
+ li r10,(IVEC)
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- ori r12,r12,(\vec + 0x2)
+ .elseif IHSRR
+ li r10,(IVEC + 0x2)
.else
- ori r12,r12,(\vec)
+ li r10,(IVEC)
.endif
-
-#ifdef CONFIG_RELOCATABLE
- /*
- * KVM requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
- * outside the head section. CONFIG_RELOCATABLE KVM expects CTR
- * to be saved in HSTATE_SCRATCH1.
- */
- mfctr r9
- std r9,HSTATE_SCRATCH1(r13)
- __LOAD_FAR_HANDLER(r9, kvmppc_interrupt)
- mtctr r9
- ld r9,\area+EX_R9(r13)
- bctr
-#else
- ld r9,\area+EX_R9(r13)
- b kvmppc_interrupt
-#endif
-
-
- .if \skip
-89: mtocrf 0x80,r9
- ld r9,\area+EX_R9(r13)
- ld r10,\area+EX_R10(r13)
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- b kvmppc_skip_Hinterrupt
- FTR_SECTION_ELSE
- b kvmppc_skip_interrupt
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- b kvmppc_skip_Hinterrupt
- .else
- b kvmppc_skip_interrupt
- .endif
- .endif
-.endm
-
-#else
-.macro KVMTEST name, hsrr, n
-.endm
-.macro KVM_HANDLER name, vec, hsrr, area, skip
-.endm
-#endif
-
-.macro INT_SAVE_SRR_AND_JUMP label, hsrr, set_ri
- ld r10,PACAKMSR(r13) /* get MSR value for kernel */
- .if ! \set_ri
- xori r10,r10,MSR_RI /* Clear MSR_RI */
- .endif
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- mtspr SPRN_HSRR1,r10
- FTR_SECTION_ELSE
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- mfspr r12,SPRN_SRR1 /* and SRR1 */
- mtspr SPRN_SRR1,r10
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- mtspr SPRN_HSRR1,r10
- .else
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- mfspr r12,SPRN_SRR1 /* and SRR1 */
- mtspr SPRN_SRR1,r10
- .endif
- LOAD_HANDLER(r10, \label\())
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- mtspr SPRN_HSRR0,r10
- HRFI_TO_KERNEL
- FTR_SECTION_ELSE
- mtspr SPRN_SRR0,r10
- RFI_TO_KERNEL
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- mtspr SPRN_HSRR0,r10
- HRFI_TO_KERNEL
- .else
- mtspr SPRN_SRR0,r10
- RFI_TO_KERNEL
- .endif
- b . /* prevent speculative execution */
-.endm
-
-/* INT_SAVE_SRR_AND_JUMP works for real or virt, this is faster but virt only */
-.macro INT_VIRT_SAVE_SRR_AND_JUMP label, hsrr
-#ifdef CONFIG_RELOCATABLE
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- FTR_SECTION_ELSE
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- .else
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- .endif
- LOAD_HANDLER(r12, \label\())
- mtctr r12
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- FTR_SECTION_ELSE
- mfspr r12,SPRN_SRR1 /* and HSRR1 */
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- .else
- mfspr r12,SPRN_SRR1 /* and HSRR1 */
- .endif
- li r10,MSR_RI
- mtmsrd r10,1 /* Set RI (EE=0) */
- bctr
-#else
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- FTR_SECTION_ELSE
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- mfspr r12,SPRN_SRR1 /* and SRR1 */
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* and HSRR1 */
- .else
- mfspr r11,SPRN_SRR0 /* save SRR0 */
- mfspr r12,SPRN_SRR1 /* and SRR1 */
- .endif
- li r10,MSR_RI
- mtmsrd r10,1 /* Set RI (EE=0) */
- b \label
+ bne \handler
#endif
.endm
@@ -405,14 +250,53 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
* - Fall through and continue executing in real, unrelocated mode.
* This is done if early=2.
*/
-.macro INT_HANDLER name, vec, ool=0, early=0, virt=0, hsrr=0, area=PACA_EXGEN, ri=1, dar=0, dsisr=0, bitmask=0, kvm=0
+
+.macro GEN_BRANCH_TO_COMMON name, virt
+ .if IREALMODE_COMMON
+ LOAD_HANDLER(r10, \name\()_common)
+ mtctr r10
+ bctr
+ .else
+ .if \virt
+#ifndef CONFIG_RELOCATABLE
+ b \name\()_common_virt
+#else
+ LOAD_HANDLER(r10, \name\()_common_virt)
+ mtctr r10
+ bctr
+#endif
+ .else
+ LOAD_HANDLER(r10, \name\()_common_real)
+ mtctr r10
+ bctr
+ .endif
+ .endif
+.endm
+
+.macro GEN_INT_ENTRY name, virt, ool=0
SET_SCRATCH0(r13) /* save r13 */
GET_PACA(r13)
- std r9,\area\()+EX_R9(r13) /* save r9 */
- OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR)
+ std r9,IAREA+EX_R9(r13) /* save r9 */
+BEGIN_FTR_SECTION
+ mfspr r9,SPRN_PPR
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
HMT_MEDIUM
- std r10,\area\()+EX_R10(r13) /* save r10 - r12 */
- OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
+ std r10,IAREA+EX_R10(r13) /* save r10 */
+ .if ICFAR
+BEGIN_FTR_SECTION
+ mfspr r10,SPRN_CFAR
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ .elseif ICFAR_IF_HVMODE
+BEGIN_FTR_SECTION
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r10,SPRN_CFAR
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69)
+FTR_SECTION_ELSE
+ BEGIN_FTR_SECTION_NESTED(69)
+ li r10,0
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69)
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .endif
.if \ool
.if !\virt
b tramp_real_\name
@@ -425,47 +309,20 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
.endif
.endif
- OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR)
- OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+ std r9,IAREA+EX_PPR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ .if ICFAR || ICFAR_IF_HVMODE
+BEGIN_FTR_SECTION
+ std r10,IAREA+EX_CFAR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ .endif
INTERRUPT_TO_KERNEL
- SAVE_CTR(r10, \area\())
+ mfctr r10
+ std r10,IAREA+EX_CTR(r13)
mfcr r9
- .if \kvm
- KVMTEST \name \hsrr \vec
- .endif
- .if \bitmask
- lbz r10,PACAIRQSOFTMASK(r13)
- andi. r10,r10,\bitmask
- /* Associate vector numbers with bits in paca->irq_happened */
- .if \vec == 0x500 || \vec == 0xea0
- li r10,PACA_IRQ_EE
- .elseif \vec == 0x900
- li r10,PACA_IRQ_DEC
- .elseif \vec == 0xa00 || \vec == 0xe80
- li r10,PACA_IRQ_DBELL
- .elseif \vec == 0xe60
- li r10,PACA_IRQ_HMI
- .elseif \vec == 0xf00
- li r10,PACA_IRQ_PMI
- .else
- .abort "Bad maskable vector"
- .endif
-
- .if \hsrr == EXC_HV_OR_STD
- BEGIN_FTR_SECTION
- bne masked_Hinterrupt
- FTR_SECTION_ELSE
- bne masked_interrupt
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
- .elseif \hsrr
- bne masked_Hinterrupt
- .else
- bne masked_interrupt
- .endif
- .endif
-
- std r11,\area\()+EX_R11(r13)
- std r12,\area\()+EX_R12(r13)
+ std r11,IAREA+EX_R11(r13) /* save r11 - r12 */
+ std r12,IAREA+EX_R12(r13)
/*
* DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
@@ -473,51 +330,165 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
* not recoverable if they are live.
*/
GET_SCRATCH0(r10)
- std r10,\area\()+EX_R13(r13)
- .if \dar
- .if \hsrr
+ std r10,IAREA+EX_R13(r13)
+ .if IDAR && !IISIDE
+ .if IHSRR
mfspr r10,SPRN_HDAR
.else
mfspr r10,SPRN_DAR
.endif
- std r10,\area\()+EX_DAR(r13)
+ std r10,IAREA+EX_DAR(r13)
.endif
- .if \dsisr
- .if \hsrr
+ .if IDSISR && !IISIDE
+ .if IHSRR
mfspr r10,SPRN_HDSISR
.else
mfspr r10,SPRN_DSISR
.endif
- stw r10,\area\()+EX_DSISR(r13)
+ stw r10,IAREA+EX_DSISR(r13)
.endif
- .if \early == 2
- /* nothing more */
- .elseif \early
- mfctr r10 /* save ctr, even for !RELOCATABLE */
- BRANCH_TO_C000(r11, \name\()_early_common)
- .elseif !\virt
- INT_SAVE_SRR_AND_JUMP \name\()_common, \hsrr, \ri
+ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
+ mfspr r12,SPRN_HSRR1 /* and HSRR1 */
+ FTR_SECTION_ELSE
+ mfspr r11,SPRN_SRR0 /* save SRR0 */
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .elseif IHSRR
+ mfspr r11,SPRN_HSRR0 /* save HSRR0 */
+ mfspr r12,SPRN_HSRR1 /* and HSRR1 */
.else
- INT_VIRT_SAVE_SRR_AND_JUMP \name\()_common, \hsrr
+ mfspr r11,SPRN_SRR0 /* save SRR0 */
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
.endif
+
+ .if IBRANCH_TO_COMMON
+ GEN_BRANCH_TO_COMMON \name \virt
+ .endif
+
.if \ool
.popsection
.endif
.endm
/*
- * On entry r13 points to the paca, r9-r13 are saved in the paca,
- * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
- * SRR1, and relocation is on.
+ * __GEN_COMMON_ENTRY is required to receive the branch from interrupt
+ * entry, except in the case of the real-mode handlers which require
+ * __GEN_REALMODE_COMMON_ENTRY.
*
- * If stack=0, then the stack is already set in r1, and r1 is saved in r10.
- * PPR save and CPU accounting is not done for the !stack case (XXX why not?)
+ * This switches to virtual mode and sets MSR[RI].
*/
-.macro INT_COMMON vec, area, stack, kuap, reconcile, dar, dsisr
- .if \stack
+.macro __GEN_COMMON_ENTRY name
+DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
+\name\()_common_real:
+ .if IKVM_REAL
+ KVMTEST \name kvm_interrupt
+ .endif
+
+ ld r10,PACAKMSR(r13) /* get MSR value for kernel */
+ /* MSR[RI] is clear iff using SRR regs */
+ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ xori r10,r10,MSR_RI
+ END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
+ .elseif ! IHSRR
+ xori r10,r10,MSR_RI
+ .endif
+ mtmsrd r10
+
+ .if IVIRT
+ .if IKVM_VIRT
+ b 1f /* skip the virt test coming from real */
+ .endif
+
+ .balign IFETCH_ALIGN_BYTES
+DEFINE_FIXED_SYMBOL(\name\()_common_virt, text)
+\name\()_common_virt:
+ .if IKVM_VIRT
+ KVMTEST \name kvm_interrupt
+1:
+ .endif
+ .endif /* IVIRT */
+.endm
+
+/*
+ * Don't switch to virt mode. Used for early MCE and HMI handlers that
+ * want to run in real mode.
+ */
+.macro __GEN_REALMODE_COMMON_ENTRY name
+DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
+\name\()_common_real:
+ .if IKVM_REAL
+ KVMTEST \name kvm_interrupt
+ .endif
+.endm
+
+.macro __GEN_COMMON_BODY name
+ .if IMASK
+ .if ! ISTACK
+ .error "No support for masked interrupt to use custom stack"
+ .endif
+
+ /* If coming from user, skip soft-mask tests. */
+ andi. r10,r12,MSR_PR
+ bne 3f
+
+ /*
+ * Kernel code running below __end_soft_masked may be
+ * implicitly soft-masked if it is within the regions
+ * in the soft mask table.
+ */
+ LOAD_HANDLER(r10, __end_soft_masked)
+ cmpld r11,r10
+ bge+ 1f
+
+ /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
+ mtctr r12
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ SEARCH_SOFT_MASK_TABLE
+ cmpdi r12,0
+ mfctr r12 /* Restore r12 to SRR1 */
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ beq 1f /* Not in soft-mask table */
+ li r10,IMASK
+ b 2f /* In soft-mask table, always mask */
+
+ /* Test the soft mask state against our interrupt's bit */
+1: lbz r10,PACAIRQSOFTMASK(r13)
+2: andi. r10,r10,IMASK
+ /* Associate vector numbers with bits in paca->irq_happened */
+ .if IVEC == 0x500 || IVEC == 0xea0
+ li r10,PACA_IRQ_EE
+ .elseif IVEC == 0x900
+ li r10,PACA_IRQ_DEC
+ .elseif IVEC == 0xa00 || IVEC == 0xe80
+ li r10,PACA_IRQ_DBELL
+ .elseif IVEC == 0xe60
+ li r10,PACA_IRQ_HMI
+ .elseif IVEC == 0xf00
+ li r10,PACA_IRQ_PMI
+ .else
+ .abort "Bad maskable vector"
+ .endif
+
+ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ bne masked_Hinterrupt
+ FTR_SECTION_ELSE
+ bne masked_interrupt
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .elseif IHSRR
+ bne masked_Hinterrupt
+ .else
+ bne masked_interrupt
+ .endif
+ .endif
+
+ .if ISTACK
andi. r10,r12,MSR_PR /* See if coming from user */
- mr r10,r1 /* Save r1 */
+3: mr r10,r1 /* Save r1 */
subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
beq- 100f
ld r1,PACAKSAVE(r13) /* kernel stack to use */
@@ -532,95 +503,184 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
std r0,GPR0(r1) /* save r0 in stackframe */
std r10,GPR1(r1) /* save r1 in stackframe */
- .if \stack
- .if \kuap
+ /* Mark our [H]SRRs valid for return */
+ li r10,1
+ .if IHSRR_IF_HVMODE
+ BEGIN_FTR_SECTION
+ stb r10,PACAHSRR_VALID(r13)
+ FTR_SECTION_ELSE
+ stb r10,PACASRR_VALID(r13)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .elseif IHSRR
+ stb r10,PACAHSRR_VALID(r13)
+ .else
+ stb r10,PACASRR_VALID(r13)
+ .endif
+
+ .if ISTACK
+ .if IKUAP
kuap_save_amr_and_lock r9, r10, cr1, cr0
.endif
beq 101f /* if from kernel mode */
- ACCOUNT_CPU_USER_ENTRY(r13, r9, r10)
- SAVE_PPR(\area, r9)
+BEGIN_FTR_SECTION
+ ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */
+ std r9,_PPR(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
101:
.else
- .if \kuap
+ .if IKUAP
kuap_save_amr_and_lock r9, r10, cr1
.endif
.endif
/* Save original regs values from save area to stack frame. */
- ld r9,\area+EX_R9(r13) /* move r9, r10 to stackframe */
- ld r10,\area+EX_R10(r13)
+ ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */
+ ld r10,IAREA+EX_R10(r13)
std r9,GPR9(r1)
std r10,GPR10(r1)
- ld r9,\area+EX_R11(r13) /* move r11 - r13 to stackframe */
- ld r10,\area+EX_R12(r13)
- ld r11,\area+EX_R13(r13)
+ ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */
+ ld r10,IAREA+EX_R12(r13)
+ ld r11,IAREA+EX_R13(r13)
std r9,GPR11(r1)
std r10,GPR12(r1)
std r11,GPR13(r1)
- .if \dar
- .if \dar == 2
+
+ SAVE_NVGPRS(r1)
+
+ .if IDAR
+ .if IISIDE
ld r10,_NIP(r1)
.else
- ld r10,\area+EX_DAR(r13)
+ ld r10,IAREA+EX_DAR(r13)
.endif
std r10,_DAR(r1)
.endif
- .if \dsisr
- .if \dsisr == 2
+
+ .if IDSISR
+ .if IISIDE
ld r10,_MSR(r1)
lis r11,DSISR_SRR1_MATCH_64S@h
and r10,r10,r11
.else
- lwz r10,\area+EX_DSISR(r13)
+ lwz r10,IAREA+EX_DSISR(r13)
.endif
std r10,_DSISR(r1)
.endif
-BEGIN_FTR_SECTION_NESTED(66)
- ld r10,\area+EX_CFAR(r13)
+
+BEGIN_FTR_SECTION
+ .if ICFAR || ICFAR_IF_HVMODE
+ ld r10,IAREA+EX_CFAR(r13)
+ .else
+ li r10,0
+ .endif
std r10,ORIG_GPR3(r1)
-END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66)
- GET_CTR(r10, \area)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ ld r10,IAREA+EX_CTR(r13)
std r10,_CTR(r1)
std r2,GPR2(r1) /* save r2 in stackframe */
- SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */
- SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */
+ SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */
mflr r9 /* Get LR, later save to stack */
- ld r2,PACATOC(r13) /* get kernel TOC into r2 */
+ LOAD_PACA_TOC() /* get kernel TOC into r2 */
std r9,_LINK(r1)
lbz r10,PACAIRQSOFTMASK(r13)
mfspr r11,SPRN_XER /* save XER in stackframe */
std r10,SOFTE(r1)
std r11,_XER(r1)
- li r9,(\vec)+1
+ li r9,IVEC
std r9,_TRAP(r1) /* set trap number */
li r10,0
- ld r11,exception_marker@toc(r2)
+ LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
std r10,RESULT(r1) /* clear regs->result */
std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */
+.endm
- .if \stack
- ACCOUNT_STOLEN_TIME
- .endif
+/*
+ * On entry r13 points to the paca, r9-r13 are saved in the paca,
+ * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
+ * SRR1, and relocation is on.
+ *
+ * If stack=0, then the stack is already set in r1, and r1 is saved in r10.
+ * PPR save and CPU accounting is not done for the !stack case (XXX why not?)
+ */
+.macro GEN_COMMON name
+ __GEN_COMMON_ENTRY \name
+ __GEN_COMMON_BODY \name
+.endm
- .if \reconcile
- RECONCILE_IRQ_STATE(r10, r11)
- .endif
+.macro SEARCH_RESTART_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ LOAD_PACA_TOC()
+ LOAD_REG_ADDR(r9, __start___restart_table)
+ LOAD_REG_ADDR(r10, __stop___restart_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ ld r12,16(r9)
+ b 303f
+301:
+ addi r9,r9,24
+ b 300b
+302:
+ li r12,0
+303:
+.endm
+
+.macro SEARCH_SOFT_MASK_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ LOAD_PACA_TOC()
+ LOAD_REG_ADDR(r9, __start___soft_mask_table)
+ LOAD_REG_ADDR(r10, __stop___soft_mask_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ li r12,1
+ b 303f
+301:
+ addi r9,r9,16
+ b 300b
+302:
+ li r12,0
+303:
.endm
/*
* Restore all registers including H/SRR0/1 saved in a stack frame of a
* standard exception.
*/
-.macro EXCEPTION_RESTORE_REGS hsrr
+.macro EXCEPTION_RESTORE_REGS hsrr=0
/* Move original SRR0 and SRR1 into the respective regs */
ld r9,_MSR(r1)
- .if \hsrr == EXC_HV_OR_STD
- .error "EXC_HV_OR_STD Not implemented for EXCEPTION_RESTORE_REGS"
- .endif
+ li r10,0
.if \hsrr
mtspr SPRN_HSRR1,r9
+ stb r10,PACAHSRR_VALID(r13)
.else
mtspr SPRN_SRR1,r9
+ stb r10,PACASRR_VALID(r13)
.endif
ld r9,_NIP(r1)
.if \hsrr
@@ -636,61 +696,76 @@ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66)
mtlr r9
ld r9,_CCR(r1)
mtcr r9
- REST_8GPRS(2, r1)
- REST_4GPRS(10, r1)
+ REST_GPRS(2, 13, r1)
REST_GPR(0, r1)
/* restore original r1. */
ld r1,GPR1(r1)
.endm
-#define RUNLATCH_ON \
-BEGIN_FTR_SECTION \
- ld r3, PACA_THREAD_INFO(r13); \
- ld r4,TI_LOCAL_FLAGS(r3); \
- andi. r0,r4,_TLF_RUNLATCH; \
- beql ppc64_runlatch_on_trampoline; \
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
-
/*
- * When the idle code in power4_idle puts the CPU into NAP mode,
- * it has to do so in a loop, and relies on the external interrupt
- * and decrementer interrupt entry code to get it out of the loop.
- * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
- * to signal that it is in the loop and needs help to get out.
+ * EARLY_BOOT_FIXUP - Fix real-mode interrupt with wrong endian in early boot.
+ *
+ * There's a short window during boot where although the kernel is running
+ * little endian, any exceptions will cause the CPU to switch back to big
+ * endian. For example a WARN() boils down to a trap instruction, which will
+ * cause a program check, and we end up here but with the CPU in big endian
+ * mode. The first instruction of the program check handler (in GEN_INT_ENTRY
+ * below) is an mtsprg, which when executed in the wrong endian is an lhzu with
+ * a ~3GB displacement from r3. The content of r3 is random, so that is a load
+ * from some random location, and depending on the system can easily lead to a
+ * checkstop, or an infinitely recursive page fault.
+ *
+ * So to handle that case we have a trampoline here that can detect we are in
+ * the wrong endian and flip us back to the correct endian. We can't flip
+ * MSR[LE] using mtmsr, so we have to use rfid. That requires backing up SRR0/1
+ * as well as a GPR. To do that we use SPRG0/2/3, as SPRG1 is already used for
+ * the paca. SPRG3 is user readable, but this trampoline is only active very
+ * early in boot, and SPRG3 will be reinitialised in vdso_getcpu_init() before
+ * userspace starts.
*/
-#ifdef CONFIG_PPC_970_NAP
-#define FINISH_NAP \
-BEGIN_FTR_SECTION \
- ld r11, PACA_THREAD_INFO(r13); \
- ld r9,TI_LOCAL_FLAGS(r11); \
- andi. r10,r9,_TLF_NAPPING; \
- bnel power4_fixup_nap; \
-END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
-#else
-#define FINISH_NAP
+.macro EARLY_BOOT_FIXUP
+BEGIN_FTR_SECTION
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8
+ b 2f // Skip trampoline if endian is correct
+ .long 0xa643707d // mtsprg 0, r11 Backup r11
+ .long 0xa6027a7d // mfsrr0 r11
+ .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2
+ .long 0xa6027b7d // mfsrr1 r11
+ .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3
+ .long 0xa600607d // mfmsr r11
+ .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE]
+ .long 0xa6037b7d // mtsrr1 r11
+ /*
+ * This is 'li r11,1f' where 1f is the absolute address of that
+ * label, byteswapped into the SI field of the instruction.
+ */
+ .long 0x00006039 | \
+ ((ABS_ADDR(1f, real_vectors) & 0x00ff) << 24) | \
+ ((ABS_ADDR(1f, real_vectors) & 0xff00) << 8)
+ .long 0xa6037a7d // mtsrr0 r11
+ .long 0x2400004c // rfid
+1:
+ mfsprg r11, 3
+ mtsrr1 r11 // Restore SRR1
+ mfsprg r11, 2
+ mtsrr0 r11 // Restore SRR0
+ mfsprg r11, 0 // Restore r11
+2:
#endif
-
-#define EXC_COMMON(name, realvec, hdlr) \
- EXC_COMMON_BEGIN(name); \
- INT_COMMON realvec, PACA_EXGEN, 1, 1, 1, 0, 0 ; \
- bl save_nvgprs; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b ret_from_except
-
-/*
- * Like EXC_COMMON, but for exceptions that can occur in the idle task and
- * therefore need the special idle handling (finish nap and runlatch)
- */
-#define EXC_COMMON_ASYNC(name, realvec, hdlr) \
- EXC_COMMON_BEGIN(name); \
- INT_COMMON realvec, PACA_EXGEN, 1, 1, 1, 0, 0 ; \
- FINISH_NAP; \
- RUNLATCH_ON; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- bl hdlr; \
- b ret_from_except_lite
-
+ /*
+ * program check could hit at any time, and pseries can not block
+ * MSR[ME] in early boot. So check if there is anything useful in r13
+ * yet, and spin forever if not.
+ */
+ mtsprg 0, r11
+ mfcr r11
+ cmpdi r13, 0
+ beq .
+ mtcr r11
+ mfsprg r11, 0
+END_FTR_SECTION(0, 1) // nop out after boot
+.endm
/*
* There are a few constraints to be concerned with.
@@ -716,6 +791,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
* guarantee they will be delivered virtually. Some conditions (see the ISA)
* cause exceptions to be delivered in real mode.
*
+ * The scv instructions are a special case. They get a 0x3000 offset applied.
+ * scv exceptions have unique reentrancy properties, see below.
+ *
* It's impossible to receive interrupts below 0x300 via AIL.
*
* KVM: None of the virtual exceptions are from the guest. Anything that
@@ -725,8 +803,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
* We layout physical memory as follows:
* 0x0000 - 0x00ff : Secondary processor spin code
* 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
- * 0x1900 - 0x3fff : Real mode trampolines
- * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
+ * 0x1900 - 0x2fff : Real mode trampolines
+ * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
* 0x5900 - 0x6fff : Relon mode trampolines
* 0x7000 - 0x7fff : FWNMI data area
* 0x8000 - .... : Common interrupt handlers, remaining early
@@ -737,8 +815,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
* vectors there.
*/
OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
-OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000)
-OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900)
+OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000)
+OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900)
OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
#ifdef CONFIG_PPC_POWERNV
@@ -774,10 +852,131 @@ USE_FIXED_SECTION(real_vectors)
.globl __start_interrupts
__start_interrupts:
+/**
+ * Interrupt 0x3000 - System Call Vectored Interrupt (syscall).
+ * This is a synchronous interrupt invoked with the "scv" instruction. The
+ * system call does not alter the HV bit, so it is directed to the OS.
+ *
+ * Handling:
+ * scv instructions enter the kernel without changing EE, RI, ME, or HV.
+ * In particular, this means we can take a maskable interrupt at any point
+ * in the scv handler, which is unlike any other interrupt. This is solved
+ * by treating the instruction addresses in the handler as being soft-masked,
+ * by adding a SOFT_MASK_TABLE entry for them.
+ *
+ * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
+ * ensure scv is never executed with relocation off, which means AIL-0
+ * should never happen.
+ *
+ * Before leaving the following inside-__end_soft_masked text, at least of the
+ * following must be true:
+ * - MSR[PR]=1 (i.e., return to userspace)
+ * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
+ * - Standard kernel environment is set up (stack, paca, etc)
+ *
+ * KVM:
+ * These interrupts do not elevate HV 0->1, so HV is not involved. PR KVM
+ * ensures that FSCR[SCV] is disabled whenever it has to force AIL off.
+ *
+ * Call convention:
+ *
+ * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
+ */
+EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
+ /* SCV 0 */
+ mr r9,r13
+ GET_PACA(r13)
+ mflr r11
+ mfctr r12
+ li r10,IRQS_ALL_DISABLED
+ stb r10,PACAIRQSOFTMASK(r13)
+#ifdef CONFIG_RELOCATABLE
+ b system_call_vectored_tramp
+#else
+ b system_call_vectored_common
+#endif
+ nop
+
+ /* SCV 1 - 127 */
+ .rept 127
+ mr r9,r13
+ GET_PACA(r13)
+ mflr r11
+ mfctr r12
+ li r10,IRQS_ALL_DISABLED
+ stb r10,PACAIRQSOFTMASK(r13)
+ li r0,-1 /* cause failure */
+#ifdef CONFIG_RELOCATABLE
+ b system_call_vectored_sigill_tramp
+#else
+ b system_call_vectored_sigill
+#endif
+ .endr
+EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
+
+// Treat scv vectors as soft-masked, see comment above.
+// Use absolute values rather than labels here, so they don't get relocated,
+// because this code runs unrelocated.
+SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
+
+#ifdef CONFIG_RELOCATABLE
+TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
+ __LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines)
+ mtctr r10
+ bctr
+
+TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp)
+ __LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines)
+ mtctr r10
+ bctr
+#endif
+
+
/* No virt vectors corresponding with 0x0..0x100 */
EXC_VIRT_NONE(0x4000, 0x100)
+/**
+ * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI).
+ * This is a non-maskable, asynchronous interrupt always taken in real-mode.
+ * It is caused by:
+ * - Wake from power-saving state, on powernv.
+ * - An NMI from another CPU, triggered by firmware or hypercall.
+ * - As crash/debug signal injected from BMC, firmware or hypervisor.
+ *
+ * Handling:
+ * Power-save wakeup is the only performance critical path, so this is
+ * determined quickly as possible first. In this case volatile registers
+ * can be discarded and SPRs like CFAR don't need to be read.
+ *
+ * If not a powersave wakeup, then it's run as a regular interrupt, however
+ * it uses its own stack and PACA save area to preserve the regular kernel
+ * environment for debugging.
+ *
+ * This interrupt is not maskable, so triggering it when MSR[RI] is clear,
+ * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely
+ * correct to switch to virtual mode to run the regular interrupt handler
+ * because it might be interrupted when the MMU is in a bad state (e.g., SLB
+ * is clear).
+ *
+ * FWNMI:
+ * PAPR specifies a "fwnmi" facility which sends the sreset to a different
+ * entry point with a different register set up. Some hypervisors will
+ * send the sreset to 0x100 in the guest if it is not fwnmi capable.
+ *
+ * KVM:
+ * Unlike most SRR interrupts, this may be taken by the host while executing
+ * in a guest, so a KVM test is required. KVM will pull the CPU out of guest
+ * mode and then raise the sreset.
+ */
+INT_DEFINE_BEGIN(system_reset)
+ IVEC=0x100
+ IAREA=PACA_EXNMI
+ IVIRT=0 /* no virt entry point */
+ ISTACK=0
+ IKVM_REAL=1
+INT_DEFINE_END(system_reset)
+
EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
#ifdef CONFIG_PPC_P7_NAP
/*
@@ -815,11 +1014,8 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
#endif
- INT_HANDLER system_reset, 0x100, area=PACA_EXNMI, ri=0, kvm=1
+ GEN_INT_ENTRY system_reset, virt=0
/*
- * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
- * being used, so a nested NMI exception would corrupt it.
- *
* In theory, we should not enable relocation here if it was disabled
* in SRR1, because the MMU may not be configured to support it (e.g.,
* SLB may have been cleared). In practice, there should only be a few
@@ -828,14 +1024,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
*/
EXC_REAL_END(system_reset, 0x100, 0x100)
EXC_VIRT_NONE(0x4100, 0x100)
-INT_KVM_HANDLER system_reset 0x100, EXC_STD, PACA_EXNMI, 0
#ifdef CONFIG_PPC_P7_NAP
TRAMP_REAL_BEGIN(system_reset_idle_wake)
/* We are waking up from idle, so may clobber any volatile register */
cmpwi cr1,r5,2
bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
- BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss))
+ __LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines)
+ mtctr r12
+ bctr
#endif
#ifdef CONFIG_PPC_PSERIES
@@ -843,42 +1040,26 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake)
* Vectors for the FWNMI option. Share common code.
*/
TRAMP_REAL_BEGIN(system_reset_fwnmi)
- /* See comment at system_reset exception, don't turn on RI */
- INT_HANDLER system_reset, 0x100, area=PACA_EXNMI, ri=0
+ GEN_INT_ENTRY system_reset, virt=0
#endif /* CONFIG_PPC_PSERIES */
EXC_COMMON_BEGIN(system_reset_common)
+ __GEN_COMMON_ENTRY system_reset
/*
- * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
- * to recover, but nested NMI will notice in_nmi and not recover
- * because of the use of the NMI stack. in_nmi reentrancy is tested in
- * system_reset_exception.
+ * Increment paca->in_nmi. When the interrupt entry wrapper later
+ * enable MSR_RI, then SLB or MCE will be able to recover, but a nested
+ * NMI will notice in_nmi and not recover because of the use of the NMI
+ * stack. in_nmi reentrancy is tested in system_reset_exception.
*/
lhz r10,PACA_IN_NMI(r13)
addi r10,r10,1
sth r10,PACA_IN_NMI(r13)
- li r10,MSR_RI
- mtmsrd r10,1
mr r10,r1
ld r1,PACA_NMI_EMERG_SP(r13)
subi r1,r1,INT_FRAME_SIZE
- INT_COMMON 0x100, PACA_EXNMI, 0, 1, 0, 0, 0
- bl save_nvgprs
- /*
- * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
- * the right thing. We do not want to reconcile because that goes
- * through irq tracing which we don't want in NMI.
- *
- * Save PACAIRQHAPPENED because some code will do a hard disable
- * (e.g., xmon). So we want to restore this back to where it was
- * when we return. DAR is unused in the stack, so save it there.
- */
- li r10,IRQS_ALL_DISABLED
- stb r10,PACAIRQSOFTMASK(r13)
- lbz r10,PACAIRQHAPPENED(r13)
- std r10,_DAR(r1)
+ __GEN_COMMON_BODY system_reset
addi r3,r1,STACK_FRAME_OVERHEAD
bl system_reset_exception
@@ -894,36 +1075,86 @@ EXC_COMMON_BEGIN(system_reset_common)
subi r10,r10,1
sth r10,PACA_IN_NMI(r13)
- /*
- * Restore soft mask settings.
- */
- ld r10,_DAR(r1)
- stb r10,PACAIRQHAPPENED(r13)
- ld r10,SOFTE(r1)
- stb r10,PACAIRQSOFTMASK(r13)
-
- EXCEPTION_RESTORE_REGS EXC_STD
+ kuap_kernel_restore r9, r10
+ EXCEPTION_RESTORE_REGS
RFI_TO_USER_OR_KERNEL
+/**
+ * Interrupt 0x200 - Machine Check Interrupt (MCE).
+ * This is a non-maskable interrupt always taken in real-mode. It can be
+ * synchronous or asynchronous, caused by hardware or software, and it may be
+ * taken in a power-saving state.
+ *
+ * Handling:
+ * Similarly to system reset, this uses its own stack and PACA save area,
+ * the difference is re-entrancy is allowed on the machine check stack.
+ *
+ * machine_check_early is run in real mode, and carefully decodes the
+ * machine check and tries to handle it (e.g., flush the SLB if there was an
+ * error detected there), determines if it was recoverable and logs the
+ * event.
+ *
+ * This early code does not "reconcile" irq soft-mask state like SRESET or
+ * regular interrupts do, so irqs_disabled() among other things may not work
+ * properly (irq disable/enable already doesn't work because irq tracing can
+ * not work in real mode).
+ *
+ * Then, depending on the execution context when the interrupt is taken, there
+ * are 3 main actions:
+ * - Executing in kernel mode. The event is queued with irq_work, which means
+ * it is handled when it is next safe to do so (i.e., the kernel has enabled
+ * interrupts), which could be immediately when the interrupt returns. This
+ * avoids nasty issues like switching to virtual mode when the MMU is in a
+ * bad state, or when executing OPAL code. (SRESET is exposed to such issues,
+ * but it has different priorities). Check to see if the CPU was in power
+ * save, and return via the wake up code if it was.
+ *
+ * - Executing in user mode. machine_check_exception is run like a normal
+ * interrupt handler, which processes the data generated by the early handler.
+ *
+ * - Executing in guest mode. The interrupt is run with its KVM test, and
+ * branches to KVM to deal with. KVM may queue the event for the host
+ * to report later.
+ *
+ * This interrupt is not maskable, so if it triggers when MSR[RI] is clear,
+ * or SCRATCH0 is in use, it may cause a crash.
+ *
+ * KVM:
+ * See SRESET.
+ */
+INT_DEFINE_BEGIN(machine_check_early)
+ IVEC=0x200
+ IAREA=PACA_EXMC
+ IVIRT=0 /* no virt entry point */
+ IREALMODE_COMMON=1
+ ISTACK=0
+ IDAR=1
+ IDSISR=1
+ IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
+INT_DEFINE_END(machine_check_early)
+
+INT_DEFINE_BEGIN(machine_check)
+ IVEC=0x200
+ IAREA=PACA_EXMC
+ IVIRT=0 /* no virt entry point */
+ IDAR=1
+ IDSISR=1
+ IKVM_REAL=1
+INT_DEFINE_END(machine_check)
+
EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
- INT_HANDLER machine_check, 0x200, early=1, area=PACA_EXMC, dar=1, dsisr=1
- /*
- * MSR_RI is not enabled, because PACA_EXMC is being used, so a
- * nested machine check corrupts it. machine_check_common enables
- * MSR_RI.
- */
+ EARLY_BOOT_FIXUP
+ GEN_INT_ENTRY machine_check_early, virt=0
EXC_REAL_END(machine_check, 0x200, 0x100)
EXC_VIRT_NONE(0x4200, 0x100)
#ifdef CONFIG_PPC_PSERIES
TRAMP_REAL_BEGIN(machine_check_fwnmi)
/* See comment at machine_check exception, don't turn on RI */
- INT_HANDLER machine_check, 0x200, early=1, area=PACA_EXMC, dar=1, dsisr=1
+ GEN_INT_ENTRY machine_check_early, virt=0
#endif
-INT_KVM_HANDLER machine_check 0x200, EXC_STD, PACA_EXMC, 1
-
#define MACHINE_CHECK_HANDLER_WINDUP \
/* Clear MSR_RI before setting SRR0 and SRR1. */\
li r9,0; \
@@ -932,12 +1163,10 @@ INT_KVM_HANDLER machine_check 0x200, EXC_STD, PACA_EXMC, 1
lhz r12,PACA_IN_MCE(r13); \
subi r12,r12,1; \
sth r12,PACA_IN_MCE(r13); \
- EXCEPTION_RESTORE_REGS EXC_STD
+ EXCEPTION_RESTORE_REGS
EXC_COMMON_BEGIN(machine_check_early_common)
- mtctr r10 /* Restore ctr */
- mfspr r11,SPRN_SRR0
- mfspr r12,SPRN_SRR1
+ __GEN_REALMODE_COMMON_ENTRY machine_check_early
/*
* Switch to mc_emergency stack and handle re-entrancy (we limit
@@ -974,17 +1203,15 @@ EXC_COMMON_BEGIN(machine_check_early_common)
bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
- /* We don't touch AMR here, we never go to virtual mode */
- INT_COMMON 0x200, PACA_EXMC, 0, 0, 0, 1, 1
+ __GEN_COMMON_BODY machine_check_early
BEGIN_FTR_SECTION
bl enable_machine_check
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
- li r10,MSR_RI
- mtmsrd r10,1
-
- bl save_nvgprs
addi r3,r1,STACK_FRAME_OVERHEAD
+BEGIN_FTR_SECTION
+ bl machine_check_early_boot
+END_FTR_SECTION(0, 1) // nop out after boot
bl machine_check_early
std r3,RESULT(r1) /* Save result */
ld r12,_MSR(r1)
@@ -1009,7 +1236,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
/*
* Check if we are coming from guest. If yes, then run the normal
* exception handler which will take the
- * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event
+ * machine_check_kvm->kvm_interrupt branch to deliver the MC event
* to guest.
*/
lbz r11,HSTATE_IN_GUEST(r13)
@@ -1063,23 +1290,18 @@ BEGIN_FTR_SECTION
mtspr SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
MACHINE_CHECK_HANDLER_WINDUP
- /* See comment at machine_check exception, don't turn on RI */
- INT_HANDLER machine_check, 0x200, area=PACA_EXMC, ri=0, dar=1, dsisr=1, kvm=1
+ GEN_INT_ENTRY machine_check, virt=0
EXC_COMMON_BEGIN(machine_check_common)
/*
* Machine check is different because we use a different
* save area: PACA_EXMC instead of PACA_EXGEN.
*/
- INT_COMMON 0x200, PACA_EXMC, 1, 1, 1, 1, 1
- FINISH_NAP
- /* Enable MSR_RI when finished with PACA_EXMC */
- li r10,MSR_RI
- mtmsrd r10,1
- bl save_nvgprs
+ GEN_COMMON machine_check
addi r3,r1,STACK_FRAME_OVERHEAD
- bl machine_check_exception
- b ret_from_except
+ bl machine_check_exception_async
+ b interrupt_return_srr
+
#ifdef CONFIG_PPC_P7_NAP
/*
@@ -1090,17 +1312,19 @@ EXC_COMMON_BEGIN(machine_check_idle_common)
bl machine_check_queue_event
/*
- * We have not used any non-volatile GPRs here, and as a rule
- * most exception code including machine check does not.
- * Therefore PACA_NAPSTATELOST does not need to be set. Idle
- * wakeup will restore volatile registers.
+ * GPR-loss wakeups are relatively straightforward, because the
+ * idle sleep code has saved all non-volatile registers on its
+ * own stack, and r1 in PACAR1.
*
- * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
+ * For no-loss wakeups the r1 and lr registers used by the
+ * early machine check handler have to be restored first. r2 is
+ * the kernel TOC, so no need to restore it.
*
* Then decrement MCE nesting after finishing with the stack.
*/
ld r3,_MSR(r1)
ld r4,_LINK(r1)
+ ld r1,GPR1(r1)
lhz r11,PACA_IN_MCE(r13)
subi r11,r11,1
@@ -1109,7 +1333,7 @@ EXC_COMMON_BEGIN(machine_check_idle_common)
mtlr r4
rlwinm r10,r3,47-31,30,31
cmpwi cr1,r10,2
- bltlr cr1 /* no state loss, return to idle caller */
+ bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
b idle_return_gpr_loss
#endif
@@ -1131,7 +1355,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
andc r10,r10,r3
mtmsrd r10
- /* Invoke machine_check_exception to print MCE event and panic. */
+ lhz r12,PACA_IN_MCE(r13)
+ subi r12,r12,1
+ sth r12,PACA_IN_MCE(r13)
+
+ /*
+ * Invoke machine_check_exception to print MCE event and panic.
+ * This is the NMI version of the handler because we are called from
+ * the early handler which is a true NMI.
+ */
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception
@@ -1144,148 +1376,327 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
b .
+/**
+ * Interrupt 0x300 - Data Storage Interrupt (DSI).
+ * This is a synchronous interrupt generated due to a data access exception,
+ * e.g., a load orstore which does not have a valid page table entry with
+ * permissions. DAWR matches also fault here, as do RC updates, and minor misc
+ * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc.
+ *
+ * Handling:
+ * - Hash MMU
+ * Go to do_hash_fault, which attempts to fill the HPT from an entry in the
+ * Linux page table. Hash faults can hit in kernel mode in a fairly
+ * arbitrary state (e.g., interrupts disabled, locks held) when accessing
+ * "non-bolted" regions, e.g., vmalloc space. However these should always be
+ * backed by Linux page table entries.
+ *
+ * If no entry is found the Linux page fault handler is invoked (by
+ * do_hash_fault). Linux page faults can happen in kernel mode due to user
+ * copy operations of course.
+ *
+ * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
+ * MMU context, which may cause a DSI in the host, which must go to the
+ * KVM handler. MSR[IR] is not enabled, so the real-mode handler will
+ * always be used regardless of AIL setting.
+ *
+ * - Radix MMU
+ * The hardware loads from the Linux page table directly, so a fault goes
+ * immediately to Linux page fault.
+ *
+ * Conditions like DAWR match are handled on the way in to Linux page fault.
+ */
+INT_DEFINE_BEGIN(data_access)
+ IVEC=0x300
+ IDAR=1
+ IDSISR=1
+ IKVM_REAL=1
+INT_DEFINE_END(data_access)
+
EXC_REAL_BEGIN(data_access, 0x300, 0x80)
- INT_HANDLER data_access, 0x300, ool=1, dar=1, dsisr=1, kvm=1
+ GEN_INT_ENTRY data_access, virt=0
EXC_REAL_END(data_access, 0x300, 0x80)
EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
- INT_HANDLER data_access, 0x300, virt=1, dar=1, dsisr=1
+ GEN_INT_ENTRY data_access, virt=1
EXC_VIRT_END(data_access, 0x4300, 0x80)
-INT_KVM_HANDLER data_access, 0x300, EXC_STD, PACA_EXGEN, 1
EXC_COMMON_BEGIN(data_access_common)
- /*
- * Here r13 points to the paca, r9 contains the saved CR,
- * SRR0 and SRR1 are saved in r11 and r12,
- * r9 - r13 are saved in paca->exgen.
- * EX_DAR and EX_DSISR have saved DAR/DSISR
- */
- INT_COMMON 0x300, PACA_EXGEN, 1, 1, 1, 1, 1
- ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
+ GEN_COMMON data_access
+ ld r4,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ andis. r0,r4,DSISR_DABRMATCH@h
+ bne- 1f
+#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
- ld r6,_MSR(r1)
- li r3,0x300
- b do_hash_page /* Try to handle as hpte fault */
+ bl do_hash_fault
MMU_FTR_SECTION_ELSE
- b handle_page_fault
+ bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+ bl do_page_fault
+#endif
+ b interrupt_return_srr
+
+1: bl do_break
+ /*
+ * do_break() may have changed the NV GPRS while handling a breakpoint.
+ * If so, we need to restore them with their updated values.
+ */
+ REST_NVGPRS(r1)
+ b interrupt_return_srr
+
+/**
+ * Interrupt 0x380 - Data Segment Interrupt (DSLB).
+ * This is a synchronous interrupt in response to an MMU fault missing SLB
+ * entry for HPT, or an address outside RPT translation range.
+ *
+ * Handling:
+ * - HPT:
+ * This refills the SLB, or reports an access fault similarly to a bad page
+ * fault. When coming from user-mode, the SLB handler may access any kernel
+ * data, though it may itself take a DSLB. When coming from kernel mode,
+ * recursive faults must be avoided so access is restricted to the kernel
+ * image text/data, kernel stack, and any data allocated below
+ * ppc64_bolted_size (first segment). The kernel handler must avoid stomping
+ * on user-handler data structures.
+ *
+ * KVM: Same as 0x300, DSLB must test for KVM guest.
+ */
+INT_DEFINE_BEGIN(data_access_slb)
+ IVEC=0x380
+ IDAR=1
+ IKVM_REAL=1
+INT_DEFINE_END(data_access_slb)
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
- INT_HANDLER data_access_slb, 0x380, ool=1, area=PACA_EXSLB, dar=1, kvm=1
+ GEN_INT_ENTRY data_access_slb, virt=0
EXC_REAL_END(data_access_slb, 0x380, 0x80)
EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
- INT_HANDLER data_access_slb, 0x380, virt=1, area=PACA_EXSLB, dar=1
+ GEN_INT_ENTRY data_access_slb, virt=1
EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
-INT_KVM_HANDLER data_access_slb, 0x380, EXC_STD, PACA_EXSLB, 1
EXC_COMMON_BEGIN(data_access_slb_common)
- INT_COMMON 0x380, PACA_EXSLB, 1, 1, 0, 1, 0
- ld r4,_DAR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ GEN_COMMON data_access_slb
+#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_exception_return
+ b fast_interrupt_return_srr
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+ li r3,-EFAULT
+#endif
std r3,RESULT(r1)
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- ld r4,_DAR(r1)
- ld r5,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_bad_slb_fault
- b ret_from_except
+ bl do_bad_segment_interrupt
+ b interrupt_return_srr
+/**
+ * Interrupt 0x400 - Instruction Storage Interrupt (ISI).
+ * This is a synchronous interrupt in response to an MMU fault due to an
+ * instruction fetch.
+ *
+ * Handling:
+ * Similar to DSI, though in response to fetch. The faulting address is found
+ * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR).
+ */
+INT_DEFINE_BEGIN(instruction_access)
+ IVEC=0x400
+ IISIDE=1
+ IDAR=1
+ IDSISR=1
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(instruction_access)
+
EXC_REAL_BEGIN(instruction_access, 0x400, 0x80)
- INT_HANDLER instruction_access, 0x400, kvm=1
+ GEN_INT_ENTRY instruction_access, virt=0
EXC_REAL_END(instruction_access, 0x400, 0x80)
EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
- INT_HANDLER instruction_access, 0x400, virt=1
+ GEN_INT_ENTRY instruction_access, virt=1
EXC_VIRT_END(instruction_access, 0x4400, 0x80)
-INT_KVM_HANDLER instruction_access, 0x400, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(instruction_access_common)
- INT_COMMON 0x400, PACA_EXGEN, 1, 1, 1, 2, 2
- ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
+ GEN_COMMON instruction_access
+ addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
- ld r6,_MSR(r1)
- li r3,0x400
- b do_hash_page /* Try to handle as hpte fault */
+ bl do_hash_fault
MMU_FTR_SECTION_ELSE
- b handle_page_fault
+ bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+ bl do_page_fault
+#endif
+ b interrupt_return_srr
+
+/**
+ * Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
+ * This is a synchronous interrupt in response to an MMU fault due to an
+ * instruction fetch.
+ *
+ * Handling:
+ * Similar to DSLB, though in response to fetch. The faulting address is found
+ * in SRR0 (rather than DAR).
+ */
+INT_DEFINE_BEGIN(instruction_access_slb)
+ IVEC=0x480
+ IISIDE=1
+ IDAR=1
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(instruction_access_slb)
EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
- INT_HANDLER instruction_access_slb, 0x480, area=PACA_EXSLB, kvm=1
+ GEN_INT_ENTRY instruction_access_slb, virt=0
EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
- INT_HANDLER instruction_access_slb, 0x480, virt=1, area=PACA_EXSLB
+ GEN_INT_ENTRY instruction_access_slb, virt=1
EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
-INT_KVM_HANDLER instruction_access_slb, 0x480, EXC_STD, PACA_EXSLB, 0
EXC_COMMON_BEGIN(instruction_access_slb_common)
- INT_COMMON 0x480, PACA_EXSLB, 1, 1, 0, 2, 0
- ld r4,_DAR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ GEN_COMMON instruction_access_slb
+#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
+ addi r3,r1,STACK_FRAME_OVERHEAD
bl do_slb_fault
cmpdi r3,0
bne- 1f
- b fast_exception_return
+ b fast_interrupt_return_srr
1: /* Error case */
MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+ li r3,-EFAULT
+#endif
std r3,RESULT(r1)
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
- ld r4,_DAR(r1)
- ld r5,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_bad_slb_fault
- b ret_from_except
+ bl do_bad_segment_interrupt
+ b interrupt_return_srr
+
+
+/**
+ * Interrupt 0x500 - External Interrupt.
+ * This is an asynchronous maskable interrupt in response to an "external
+ * exception" from the interrupt controller or hypervisor (e.g., device
+ * interrupt). It is maskable in hardware by clearing MSR[EE], and
+ * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()).
+ *
+ * When running in HV mode, Linux sets up the LPCR[LPES] bit such that
+ * interrupts are delivered with HSRR registers, guests use SRRs, which
+ * reqiures IHSRR_IF_HVMODE.
+ *
+ * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that
+ * external interrupts are delivered as Hypervisor Virtualization Interrupts
+ * rather than External Interrupts.
+ *
+ * Handling:
+ * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead,
+ * because registers at the time of the interrupt are not so important as it is
+ * asynchronous.
+ *
+ * If soft masked, the masked handler will note the pending interrupt for
+ * replay, and clear MSR[EE] in the interrupted context.
+ *
+ * CFAR is not required because this is an asynchronous interrupt that in
+ * general won't have much bearing on the state of the CPU, with the possible
+ * exception of crash/debug IPIs, but those are generally moving to use SRESET
+ * IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case
+ * it may be exiting the guest and need CFAR to be saved.
+ */
+INT_DEFINE_BEGIN(hardware_interrupt)
+ IVEC=0x500
+ IHSRR_IF_HVMODE=1
+ IMASK=IRQS_DISABLED
+ IKVM_REAL=1
+ IKVM_VIRT=1
+ ICFAR=0
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ ICFAR_IF_HVMODE=1
+#endif
+INT_DEFINE_END(hardware_interrupt)
EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
- INT_HANDLER hardware_interrupt, 0x500, hsrr=EXC_HV_OR_STD, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY hardware_interrupt, virt=0
EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
- INT_HANDLER hardware_interrupt, 0x500, virt=1, hsrr=EXC_HV_OR_STD, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY hardware_interrupt, virt=1
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
-INT_KVM_HANDLER hardware_interrupt, 0x500, EXC_HV_OR_STD, PACA_EXGEN, 0
-EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
+EXC_COMMON_BEGIN(hardware_interrupt_common)
+ GEN_COMMON hardware_interrupt
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_IRQ
+ BEGIN_FTR_SECTION
+ b interrupt_return_hsrr
+ FTR_SECTION_ELSE
+ b interrupt_return_srr
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+/**
+ * Interrupt 0x600 - Alignment Interrupt
+ * This is a synchronous interrupt in response to data alignment fault.
+ */
+INT_DEFINE_BEGIN(alignment)
+ IVEC=0x600
+ IDAR=1
+ IDSISR=1
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(alignment)
+
EXC_REAL_BEGIN(alignment, 0x600, 0x100)
- INT_HANDLER alignment, 0x600, dar=1, dsisr=1, kvm=1
+ GEN_INT_ENTRY alignment, virt=0
EXC_REAL_END(alignment, 0x600, 0x100)
EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
- INT_HANDLER alignment, 0x600, virt=1, dar=1, dsisr=1
+ GEN_INT_ENTRY alignment, virt=1
EXC_VIRT_END(alignment, 0x4600, 0x100)
-INT_KVM_HANDLER alignment, 0x600, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(alignment_common)
- INT_COMMON 0x600, PACA_EXGEN, 1, 1, 1, 1, 1
- bl save_nvgprs
+ GEN_COMMON alignment
addi r3,r1,STACK_FRAME_OVERHEAD
bl alignment_exception
- b ret_from_except
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ b interrupt_return_srr
+
+/**
+ * Interrupt 0x700 - Program Interrupt (program check).
+ * This is a synchronous interrupt in response to various instruction faults:
+ * traps, privilege errors, TM errors, floating point exceptions.
+ *
+ * Handling:
+ * This interrupt may use the "emergency stack" in some cases when being taken
+ * from kernel context, which complicates handling.
+ */
+INT_DEFINE_BEGIN(program_check)
+ IVEC=0x700
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(program_check)
EXC_REAL_BEGIN(program_check, 0x700, 0x100)
- INT_HANDLER program_check, 0x700, kvm=1
+ EARLY_BOOT_FIXUP
+ GEN_INT_ENTRY program_check, virt=0
EXC_REAL_END(program_check, 0x700, 0x100)
EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
- INT_HANDLER program_check, 0x700, virt=1
+ GEN_INT_ENTRY program_check, virt=1
EXC_VIRT_END(program_check, 0x4700, 0x100)
-INT_KVM_HANDLER program_check, 0x700, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(program_check_common)
+ __GEN_COMMON_ENTRY program_check
+
/*
* It's possible to receive a TM Bad Thing type program check with
* userspace register values (in particular r1), but with SRR1 reporting
@@ -1296,43 +1707,61 @@ EXC_COMMON_BEGIN(program_check_common)
*/
andi. r10,r12,MSR_PR
- bne 2f /* If userspace, go normal path */
+ bne .Lnormal_stack /* If userspace, go normal path */
andis. r10,r12,(SRR1_PROGTM)@h
- bne 1f /* If TM, emergency */
+ bne .Lemergency_stack /* If TM, emergency */
cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
- blt 2f /* normal path if not */
+ blt .Lnormal_stack /* normal path if not */
/* Use the emergency stack */
-1: andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
+.Lemergency_stack:
+ andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
/* 3 in EXCEPTION_PROLOG_COMMON */
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
- INT_COMMON 0x700, PACA_EXGEN, 0, 1, 1, 0, 0
- b 3f
-2:
- INT_COMMON 0x700, PACA_EXGEN, 1, 1, 1, 0, 0
-3:
- bl save_nvgprs
+ __ISTACK(program_check)=0
+ __GEN_COMMON_BODY program_check
+ b .Ldo_program_check
+
+.Lnormal_stack:
+ __ISTACK(program_check)=1
+ __GEN_COMMON_BODY program_check
+
+.Ldo_program_check:
addi r3,r1,STACK_FRAME_OVERHEAD
bl program_check_exception
- b ret_from_except
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ b interrupt_return_srr
+/*
+ * Interrupt 0x800 - Floating-Point Unavailable Interrupt.
+ * This is a synchronous interrupt in response to executing an fp instruction
+ * with MSR[FP]=0.
+ *
+ * Handling:
+ * This will load FP registers and enable the FP bit if coming from userspace,
+ * otherwise report a bad kernel use of FP.
+ */
+INT_DEFINE_BEGIN(fp_unavailable)
+ IVEC=0x800
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(fp_unavailable)
+
EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
- INT_HANDLER fp_unavailable, 0x800, kvm=1
+ GEN_INT_ENTRY fp_unavailable, virt=0
EXC_REAL_END(fp_unavailable, 0x800, 0x100)
EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100)
- INT_HANDLER fp_unavailable, 0x800, virt=1
+ GEN_INT_ENTRY fp_unavailable, virt=1
EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
-INT_KVM_HANDLER fp_unavailable, 0x800, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(fp_unavailable_common)
- INT_COMMON 0x800, PACA_EXGEN, 1, 1, 0, 0, 0
+ GEN_COMMON fp_unavailable
bne 1f /* if from user, just load it up */
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl kernel_fp_unavailable_exception
0: trap
@@ -1348,64 +1777,164 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
bl load_up_fpu
- b fast_exception_return
+ b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl fp_unavailable_tm
- b ret_from_except
+ b interrupt_return_srr
#endif
+/**
+ * Interrupt 0x900 - Decrementer Interrupt.
+ * This is an asynchronous interrupt in response to a decrementer exception
+ * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing
+ * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e.,
+ * local_irq_disable()).
+ *
+ * Handling:
+ * This calls into Linux timer handler. NVGPRs are not saved (see 0x500).
+ *
+ * If soft masked, the masked handler will note the pending interrupt for
+ * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled
+ * in the interrupted context.
+ * If PPC_WATCHDOG is configured, the soft masked handler will actually set
+ * things back up to run soft_nmi_interrupt as a regular interrupt handler
+ * on the emergency stack.
+ *
+ * CFAR is not required because this is asynchronous (see hardware_interrupt).
+ * A watchdog interrupt may like to have CFAR, but usually the interesting
+ * branch is long gone by that point (e.g., infinite loop).
+ */
+INT_DEFINE_BEGIN(decrementer)
+ IVEC=0x900
+ IMASK=IRQS_DISABLED
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+ ICFAR=0
+INT_DEFINE_END(decrementer)
+
EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
- INT_HANDLER decrementer, 0x900, ool=1, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY decrementer, virt=0
EXC_REAL_END(decrementer, 0x900, 0x80)
EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
- INT_HANDLER decrementer, 0x900, virt=1, bitmask=IRQS_DISABLED
+ GEN_INT_ENTRY decrementer, virt=1
EXC_VIRT_END(decrementer, 0x4900, 0x80)
-INT_KVM_HANDLER decrementer, 0x900, EXC_STD, PACA_EXGEN, 0
-EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+EXC_COMMON_BEGIN(decrementer_common)
+ GEN_COMMON decrementer
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl timer_interrupt
+ b interrupt_return_srr
+
+/**
+ * Interrupt 0x980 - Hypervisor Decrementer Interrupt.
+ * This is an asynchronous interrupt, similar to 0x900 but for the HDEC
+ * register.
+ *
+ * Handling:
+ * Linux does not use this outside KVM where it's used to keep a host timer
+ * while the guest is given control of DEC. It should normally be caught by
+ * the KVM test and routed there.
+ */
+INT_DEFINE_BEGIN(hdecrementer)
+ IVEC=0x980
+ IHSRR=1
+ ISTACK=0
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(hdecrementer)
EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80)
- INT_HANDLER hdecrementer, 0x980, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY hdecrementer, virt=0
EXC_REAL_END(hdecrementer, 0x980, 0x80)
EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80)
- INT_HANDLER hdecrementer, 0x980, virt=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY hdecrementer, virt=1
EXC_VIRT_END(hdecrementer, 0x4980, 0x80)
-INT_KVM_HANDLER hdecrementer, 0x980, EXC_HV, PACA_EXGEN, 0
-EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
+EXC_COMMON_BEGIN(hdecrementer_common)
+ __GEN_COMMON_ENTRY hdecrementer
+ /*
+ * Hypervisor decrementer interrupts not caught by the KVM test
+ * shouldn't occur but are sometimes left pending on exit from a KVM
+ * guest. We don't need to do anything to clear them, as they are
+ * edge-triggered.
+ *
+ * Be careful to avoid touching the kernel stack.
+ */
+ li r10,0
+ stb r10,PACAHSRR_VALID(r13)
+ ld r10,PACA_EXGEN+EX_CTR(r13)
+ mtctr r10
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+ HRFI_TO_KERNEL
+/**
+ * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
+ * This is an asynchronous interrupt in response to a msgsndp doorbell.
+ * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
+ * IRQS_DISABLED mask (i.e., local_irq_disable()).
+ *
+ * Handling:
+ * Guests may use this for IPIs between threads in a core if the
+ * hypervisor supports it. NVGPRS are not saved (see 0x500).
+ *
+ * If soft masked, the masked handler will note the pending interrupt for
+ * replay, leaving MSR[EE] enabled in the interrupted context because the
+ * doorbells are edge triggered.
+ *
+ * CFAR is not required, similarly to hardware_interrupt.
+ */
+INT_DEFINE_BEGIN(doorbell_super)
+ IVEC=0xa00
+ IMASK=IRQS_DISABLED
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+ ICFAR=0
+INT_DEFINE_END(doorbell_super)
+
EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
- INT_HANDLER doorbell_super, 0xa00, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY doorbell_super, virt=0
EXC_REAL_END(doorbell_super, 0xa00, 0x100)
EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
- INT_HANDLER doorbell_super, 0xa00, virt=1, bitmask=IRQS_DISABLED
+ GEN_INT_ENTRY doorbell_super, virt=1
EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
-INT_KVM_HANDLER doorbell_super, 0xa00, EXC_STD, PACA_EXGEN, 0
+EXC_COMMON_BEGIN(doorbell_super_common)
+ GEN_COMMON doorbell_super
+ addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_DOORBELL
-EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
+ bl doorbell_exception
#else
-EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
+ bl unknown_async_exception
#endif
+ b interrupt_return_srr
EXC_REAL_NONE(0xb00, 0x100)
EXC_VIRT_NONE(0x4b00, 0x100)
-/*
- * system call / hypercall (0xc00, 0x4c00)
- *
- * The system call exception is invoked with "sc 0" and does not alter HV bit.
- *
- * The hypercall is invoked with "sc 1" and sets HV=1.
+/**
+ * Interrupt 0xc00 - System Call Interrupt (syscall, hcall).
+ * This is a synchronous interrupt invoked with the "sc" instruction. The
+ * system call is invoked with "sc 0" and does not alter the HV bit, so it
+ * is directed to the currently running OS. The hypercall is invoked with
+ * "sc 1" and it sets HV=1, so it elevates to hypervisor.
*
* In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
* 0x4c00 virtual mode.
*
+ * Handling:
+ * If the KVM test fires then it was due to a hypercall and is accordingly
+ * routed to KVM. Otherwise this executes a normal Linux system call.
+ *
* Call convention:
*
* syscall and hypercalls register conventions are documented in
@@ -1417,6 +1946,13 @@ EXC_VIRT_NONE(0x4b00, 0x100)
* without saving, though xer is not a good idea to use, as hardware may
* interpret some bits so it may be costly to change them.
*/
+INT_DEFINE_BEGIN(system_call)
+ IVEC=0xc00
+ IKVM_REAL=1
+ IKVM_VIRT=1
+ ICFAR=0
+INT_DEFINE_END(system_call)
+
.macro SYSTEM_CALL virt
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
/*
@@ -1431,7 +1967,7 @@ EXC_VIRT_NONE(0x4b00, 0x100)
GET_PACA(r13)
std r10,PACA_EXGEN+EX_R10(r13)
INTERRUPT_TO_KERNEL
- KVMTEST system_call EXC_STD 0xc00 /* uses r10, branch to system_call_kvm */
+ KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */
mfctr r9
#else
mr r9,r13
@@ -1453,17 +1989,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
HMT_MEDIUM
.if ! \virt
- __LOAD_HANDLER(r10, system_call_common)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
+ __LOAD_HANDLER(r10, system_call_common_real, real_vectors)
+ mtctr r10
+ bctr
.else
- li r10,MSR_RI
- mtmsrd r10,1 /* Set RI (EE=0) */
#ifdef CONFIG_RELOCATABLE
- __LOAD_HANDLER(r10, system_call_common)
+ __LOAD_HANDLER(r10, system_call_common, virt_vectors)
mtctr r10
bctr
#else
@@ -1490,108 +2021,213 @@ EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
EXC_VIRT_END(system_call, 0x4c00, 0x100)
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
- /*
- * This is a hcall, so register convention is as above, with these
- * differences:
- * r13 = PACA
- * ctr = orig r13
- * orig r10 saved in PACA
- */
-TRAMP_KVM_BEGIN(system_call_kvm)
+TRAMP_REAL_BEGIN(kvm_hcall)
+ std r9,PACA_EXGEN+EX_R9(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfcr r9
+ mfctr r10
+ std r10,PACA_EXGEN+EX_R13(r13)
+ li r10,0
+ std r10,PACA_EXGEN+EX_CFAR(r13)
+ std r10,PACA_EXGEN+EX_CTR(r13)
/*
* Save the PPR (on systems that support it) before changing to
* HMT_MEDIUM. That allows the KVM code to save that value into the
* guest state (it is the guest's PPR value).
*/
- OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
+BEGIN_FTR_SECTION
+ mfspr r10,SPRN_PPR
+ std r10,PACA_EXGEN+EX_PPR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
HMT_MEDIUM
- OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
- mfctr r10
- SET_SCRATCH0(r10)
- std r9,PACA_EXGEN+EX_R9(r13)
- mfcr r9
- KVM_HANDLER 0xc00, EXC_STD, PACA_EXGEN, 0
+
+#ifdef CONFIG_RELOCATABLE
+ /*
+ * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
+ * outside the head section.
+ */
+ __LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines)
+ mtctr r10
+ bctr
+#else
+ b kvmppc_hcall
+#endif
#endif
+/**
+ * Interrupt 0xd00 - Trace Interrupt.
+ * This is a synchronous interrupt in response to instruction step or
+ * breakpoint faults.
+ */
+INT_DEFINE_BEGIN(single_step)
+ IVEC=0xd00
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(single_step)
EXC_REAL_BEGIN(single_step, 0xd00, 0x100)
- INT_HANDLER single_step, 0xd00, kvm=1
+ GEN_INT_ENTRY single_step, virt=0
EXC_REAL_END(single_step, 0xd00, 0x100)
EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
- INT_HANDLER single_step, 0xd00, virt=1
+ GEN_INT_ENTRY single_step, virt=1
EXC_VIRT_END(single_step, 0x4d00, 0x100)
-INT_KVM_HANDLER single_step, 0xd00, EXC_STD, PACA_EXGEN, 0
-EXC_COMMON(single_step_common, 0xd00, single_step_exception)
+EXC_COMMON_BEGIN(single_step_common)
+ GEN_COMMON single_step
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl single_step_exception
+ b interrupt_return_srr
+
+/**
+ * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI).
+ * This is a synchronous interrupt in response to an MMU fault caused by a
+ * guest data access.
+ *
+ * Handling:
+ * This should always get routed to KVM. In radix MMU mode, this is caused
+ * by a guest nested radix access that can't be performed due to the
+ * partition scope page table. In hash mode, this can be caused by guests
+ * running with translation disabled (virtual real mode) or with VPM enabled.
+ * KVM will update the page table structures or disallow the access.
+ */
+INT_DEFINE_BEGIN(h_data_storage)
+ IVEC=0xe00
+ IHSRR=1
+ IDAR=1
+ IDSISR=1
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(h_data_storage)
EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20)
- INT_HANDLER h_data_storage, 0xe00, ool=1, hsrr=EXC_HV, dar=1, dsisr=1, kvm=1
+ GEN_INT_ENTRY h_data_storage, virt=0, ool=1
EXC_REAL_END(h_data_storage, 0xe00, 0x20)
EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
- INT_HANDLER h_data_storage, 0xe00, ool=1, virt=1, hsrr=EXC_HV, dar=1, dsisr=1, kvm=1
+ GEN_INT_ENTRY h_data_storage, virt=1, ool=1
EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
-INT_KVM_HANDLER h_data_storage, 0xe00, EXC_HV, PACA_EXGEN, 1
EXC_COMMON_BEGIN(h_data_storage_common)
- INT_COMMON 0xe00, PACA_EXGEN, 1, 1, 1, 1, 1
- bl save_nvgprs
+ GEN_COMMON h_data_storage
addi r3,r1,STACK_FRAME_OVERHEAD
BEGIN_MMU_FTR_SECTION
- ld r4,_DAR(r1)
- li r5,SIGSEGV
- bl bad_page_fault
+ bl do_bad_page_fault_segv
MMU_FTR_SECTION_ELSE
bl unknown_exception
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
- b ret_from_except
+ b interrupt_return_hsrr
+
+/**
+ * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI).
+ * This is a synchronous interrupt in response to an MMU fault caused by a
+ * guest instruction fetch, similar to HDSI.
+ */
+INT_DEFINE_BEGIN(h_instr_storage)
+ IVEC=0xe20
+ IHSRR=1
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(h_instr_storage)
EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20)
- INT_HANDLER h_instr_storage, 0xe20, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY h_instr_storage, virt=0, ool=1
EXC_REAL_END(h_instr_storage, 0xe20, 0x20)
EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
- INT_HANDLER h_instr_storage, 0xe20, ool=1, virt=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY h_instr_storage, virt=1, ool=1
EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
-INT_KVM_HANDLER h_instr_storage, 0xe20, EXC_HV, PACA_EXGEN, 0
-EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
+EXC_COMMON_BEGIN(h_instr_storage_common)
+ GEN_COMMON h_instr_storage
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl unknown_exception
+ b interrupt_return_hsrr
+
+/**
+ * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt.
+ */
+INT_DEFINE_BEGIN(emulation_assist)
+ IVEC=0xe40
+ IHSRR=1
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(emulation_assist)
EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20)
- INT_HANDLER emulation_assist, 0xe40, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY emulation_assist, virt=0, ool=1
EXC_REAL_END(emulation_assist, 0xe40, 0x20)
EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
- INT_HANDLER emulation_assist, 0xe40, ool=1, virt=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY emulation_assist, virt=1, ool=1
EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
-INT_KVM_HANDLER emulation_assist, 0xe40, EXC_HV, PACA_EXGEN, 0
-EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
+EXC_COMMON_BEGIN(emulation_assist_common)
+ GEN_COMMON emulation_assist
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl emulation_assist_interrupt
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ b interrupt_return_hsrr
-/*
- * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
- * first, and then eventaully from there to the trampoline to get into virtual
- * mode.
+/**
+ * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI).
+ * This is an asynchronous interrupt caused by a Hypervisor Maintenance
+ * Exception. It is always taken in real mode but uses HSRR registers
+ * unlike SRESET and MCE.
+ *
+ * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable
+ * with IRQS_DISABLED mask (i.e., local_irq_disable()).
+ *
+ * Handling:
+ * This is a special case, this is handled similarly to machine checks, with an
+ * initial real mode handler that is not soft-masked, which attempts to fix the
+ * problem. Then a regular handler which is soft-maskable and reports the
+ * problem.
+ *
+ * The emergency stack is used for the early real mode handler.
+ *
+ * XXX: unclear why MCE and HMI schemes could not be made common, e.g.,
+ * either use soft-masking for the MCE, or use irq_work for the HMI.
+ *
+ * KVM:
+ * Unlike MCE, this calls into KVM without calling the real mode handler
+ * first.
*/
+INT_DEFINE_BEGIN(hmi_exception_early)
+ IVEC=0xe60
+ IHSRR=1
+ IREALMODE_COMMON=1
+ ISTACK=0
+ IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
+ IKVM_REAL=1
+INT_DEFINE_END(hmi_exception_early)
+
+INT_DEFINE_BEGIN(hmi_exception)
+ IVEC=0xe60
+ IHSRR=1
+ IMASK=IRQS_DISABLED
+ IKVM_REAL=1
+INT_DEFINE_END(hmi_exception)
+
EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
- INT_HANDLER hmi_exception, 0xe60, ool=1, early=1, hsrr=EXC_HV, ri=0, kvm=1
+ GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1
EXC_REAL_END(hmi_exception, 0xe60, 0x20)
EXC_VIRT_NONE(0x4e60, 0x20)
-INT_KVM_HANDLER hmi_exception, 0xe60, EXC_HV, PACA_EXGEN, 0
+
EXC_COMMON_BEGIN(hmi_exception_early_common)
- mtctr r10 /* Restore ctr */
- mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
- mfspr r12,SPRN_HSRR1 /* Save HSRR1 */
+ __GEN_REALMODE_COMMON_ENTRY hmi_exception_early
+
mr r10,r1 /* Save r1 */
ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
- /* We don't touch AMR here, we never go to virtual mode */
- INT_COMMON 0xe60, PACA_EXGEN, 0, 0, 0, 0, 0
+ __GEN_COMMON_BODY hmi_exception_early
addi r3,r1,STACK_FRAME_OVERHEAD
bl hmi_exception_realmode
cmpdi cr0,r3,0
bne 1f
- EXCEPTION_RESTORE_REGS EXC_HV
+ EXCEPTION_RESTORE_REGS hsrr=1
HRFI_TO_USER_OR_KERNEL
1:
@@ -1599,41 +2235,84 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
* Go to virtual mode and pull the HMI event information from
* firmware.
*/
- EXCEPTION_RESTORE_REGS EXC_HV
- INT_HANDLER hmi_exception, 0xe60, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
+ EXCEPTION_RESTORE_REGS hsrr=1
+ GEN_INT_ENTRY hmi_exception, virt=0
EXC_COMMON_BEGIN(hmi_exception_common)
- INT_COMMON 0xe60, PACA_EXGEN, 1, 1, 1, 0, 0
- FINISH_NAP
- RUNLATCH_ON
- bl save_nvgprs
+ GEN_COMMON hmi_exception
addi r3,r1,STACK_FRAME_OVERHEAD
bl handle_hmi_exception
- b ret_from_except
+ b interrupt_return_hsrr
+/**
+ * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
+ * This is an asynchronous interrupt in response to a msgsnd doorbell.
+ * Similar to the 0xa00 doorbell but for host rather than guest.
+ *
+ * CFAR is not required (similar to doorbell_interrupt), unless KVM HV
+ * is enabled, in which case it may be a guest exit. Most PowerNV kernels
+ * include KVM support so it would be nice if this could be dynamically
+ * patched out if KVM was not currently running any guests.
+ */
+INT_DEFINE_BEGIN(h_doorbell)
+ IVEC=0xe80
+ IHSRR=1
+ IMASK=IRQS_DISABLED
+ IKVM_REAL=1
+ IKVM_VIRT=1
+#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ ICFAR=0
+#endif
+INT_DEFINE_END(h_doorbell)
+
EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
- INT_HANDLER h_doorbell, 0xe80, ool=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY h_doorbell, virt=0, ool=1
EXC_REAL_END(h_doorbell, 0xe80, 0x20)
EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
- INT_HANDLER h_doorbell, 0xe80, ool=1, virt=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY h_doorbell, virt=1, ool=1
EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
-INT_KVM_HANDLER h_doorbell, 0xe80, EXC_HV, PACA_EXGEN, 0
+EXC_COMMON_BEGIN(h_doorbell_common)
+ GEN_COMMON h_doorbell
+ addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_DOORBELL
-EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
+ bl doorbell_exception
#else
-EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
+ bl unknown_async_exception
#endif
+ b interrupt_return_hsrr
+/**
+ * Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
+ * This is an asynchronous interrupt in response to an "external exception".
+ * Similar to 0x500 but for host only.
+ *
+ * Like h_doorbell, CFAR is only required for KVM HV because this can be
+ * a guest exit.
+ */
+INT_DEFINE_BEGIN(h_virt_irq)
+ IVEC=0xea0
+ IHSRR=1
+ IMASK=IRQS_DISABLED
+ IKVM_REAL=1
+ IKVM_VIRT=1
+#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ ICFAR=0
+#endif
+INT_DEFINE_END(h_virt_irq)
+
EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
- INT_HANDLER h_virt_irq, 0xea0, ool=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY h_virt_irq, virt=0, ool=1
EXC_REAL_END(h_virt_irq, 0xea0, 0x20)
EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
- INT_HANDLER h_virt_irq, 0xea0, ool=1, virt=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
+ GEN_INT_ENTRY h_virt_irq, virt=1, ool=1
EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
-INT_KVM_HANDLER h_virt_irq, 0xea0, EXC_HV, PACA_EXGEN, 0
-EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
+EXC_COMMON_BEGIN(h_virt_irq_common)
+ GEN_COMMON h_virt_irq
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_IRQ
+ b interrupt_return_hsrr
EXC_REAL_NONE(0xec0, 0x20)
@@ -1642,25 +2321,79 @@ EXC_REAL_NONE(0xee0, 0x20)
EXC_VIRT_NONE(0x4ee0, 0x20)
+/*
+ * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU).
+ * This is an asynchronous interrupt in response to a PMU exception.
+ * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
+ * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()).
+ *
+ * Handling:
+ * This calls into the perf subsystem.
+ *
+ * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it
+ * runs under local_irq_disable. However it may be soft-masked in
+ * powerpc-specific code.
+ *
+ * If soft masked, the masked handler will note the pending interrupt for
+ * replay, and clear MSR[EE] in the interrupted context.
+ *
+ * CFAR is not used by perf interrupts so not required.
+ */
+INT_DEFINE_BEGIN(performance_monitor)
+ IVEC=0xf00
+ IMASK=IRQS_PMI_DISABLED
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+ ICFAR=0
+INT_DEFINE_END(performance_monitor)
+
EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
- INT_HANDLER performance_monitor, 0xf00, ool=1, bitmask=IRQS_PMI_DISABLED, kvm=1
+ GEN_INT_ENTRY performance_monitor, virt=0, ool=1
EXC_REAL_END(performance_monitor, 0xf00, 0x20)
EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
- INT_HANDLER performance_monitor, 0xf00, ool=1, virt=1, bitmask=IRQS_PMI_DISABLED
+ GEN_INT_ENTRY performance_monitor, virt=1, ool=1
EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
-INT_KVM_HANDLER performance_monitor, 0xf00, EXC_STD, PACA_EXGEN, 0
-EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
+EXC_COMMON_BEGIN(performance_monitor_common)
+ GEN_COMMON performance_monitor
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lbz r4,PACAIRQSOFTMASK(r13)
+ cmpdi r4,IRQS_ENABLED
+ bne 1f
+ bl performance_monitor_exception_async
+ b interrupt_return_srr
+1:
+ bl performance_monitor_exception_nmi
+ /* Clear MSR_RI before setting SRR0 and SRR1. */
+ li r9,0
+ mtmsrd r9,1
+
+ kuap_kernel_restore r9, r10
+
+ EXCEPTION_RESTORE_REGS hsrr=0
+ RFI_TO_KERNEL
+/**
+ * Interrupt 0xf20 - Vector Unavailable Interrupt.
+ * This is a synchronous interrupt in response to
+ * executing a vector (or altivec) instruction with MSR[VEC]=0.
+ * Similar to FP unavailable.
+ */
+INT_DEFINE_BEGIN(altivec_unavailable)
+ IVEC=0xf20
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(altivec_unavailable)
EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
- INT_HANDLER altivec_unavailable, 0xf20, ool=1, kvm=1
+ GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1
EXC_REAL_END(altivec_unavailable, 0xf20, 0x20)
EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20)
- INT_HANDLER altivec_unavailable, 0xf20, ool=1, virt=1
+ GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1
EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20)
-INT_KVM_HANDLER altivec_unavailable, 0xf20, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(altivec_unavailable_common)
- INT_COMMON 0xf20, PACA_EXGEN, 1, 1, 0, 0, 0
+ GEN_COMMON altivec_unavailable
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
beq 1f
@@ -1674,34 +2407,42 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
#endif
bl load_up_altivec
- b fast_exception_return
+ b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_tm
- b ret_from_except
+ b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl altivec_unavailable_exception
- b ret_from_except
+ b interrupt_return_srr
+/**
+ * Interrupt 0xf40 - VSX Unavailable Interrupt.
+ * This is a synchronous interrupt in response to
+ * executing a VSX instruction with MSR[VSX]=0.
+ * Similar to FP unavailable.
+ */
+INT_DEFINE_BEGIN(vsx_unavailable)
+ IVEC=0xf40
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(vsx_unavailable)
+
EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
- INT_HANDLER vsx_unavailable, 0xf40, ool=1, kvm=1
+ GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1
EXC_REAL_END(vsx_unavailable, 0xf40, 0x20)
EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20)
- INT_HANDLER vsx_unavailable, 0xf40, ool=1, virt=1
+ GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1
EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20)
-INT_KVM_HANDLER vsx_unavailable, 0xf40, EXC_STD, PACA_EXGEN, 0
EXC_COMMON_BEGIN(vsx_unavailable_common)
- INT_COMMON 0xf40, PACA_EXGEN, 1, 1, 0, 0, 0
+ GEN_COMMON vsx_unavailable
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
beq 1f
@@ -1717,40 +2458,72 @@ BEGIN_FTR_SECTION
b load_up_vsx
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_tm
- b ret_from_except
+ b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
- bl save_nvgprs
- RECONCILE_IRQ_STATE(r10, r11)
addi r3,r1,STACK_FRAME_OVERHEAD
bl vsx_unavailable_exception
- b ret_from_except
+ b interrupt_return_srr
+/**
+ * Interrupt 0xf60 - Facility Unavailable Interrupt.
+ * This is a synchronous interrupt in response to
+ * executing an instruction without access to the facility that can be
+ * resolved by the OS (e.g., FSCR, MSR).
+ * Similar to FP unavailable.
+ */
+INT_DEFINE_BEGIN(facility_unavailable)
+ IVEC=0xf60
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(facility_unavailable)
+
EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20)
- INT_HANDLER facility_unavailable, 0xf60, ool=1, kvm=1
+ GEN_INT_ENTRY facility_unavailable, virt=0, ool=1
EXC_REAL_END(facility_unavailable, 0xf60, 0x20)
EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
- INT_HANDLER facility_unavailable, 0xf60, ool=1, virt=1
+ GEN_INT_ENTRY facility_unavailable, virt=1, ool=1
EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
-INT_KVM_HANDLER facility_unavailable, 0xf60, EXC_STD, PACA_EXGEN, 0
-EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
+EXC_COMMON_BEGIN(facility_unavailable_common)
+ GEN_COMMON facility_unavailable
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl facility_unavailable_exception
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ b interrupt_return_srr
+
+/**
+ * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt.
+ * This is a synchronous interrupt in response to
+ * executing an instruction without access to the facility that can only
+ * be resolved in HV mode (e.g., HFSCR).
+ * Similar to FP unavailable.
+ */
+INT_DEFINE_BEGIN(h_facility_unavailable)
+ IVEC=0xf80
+ IHSRR=1
+ IKVM_REAL=1
+ IKVM_VIRT=1
+INT_DEFINE_END(h_facility_unavailable)
EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20)
- INT_HANDLER h_facility_unavailable, 0xf80, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1
EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20)
EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
- INT_HANDLER h_facility_unavailable, 0xf80, ool=1, virt=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1
EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
-INT_KVM_HANDLER h_facility_unavailable, 0xf80, EXC_HV, PACA_EXGEN, 0
-EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
+EXC_COMMON_BEGIN(h_facility_unavailable_common)
+ GEN_COMMON h_facility_unavailable
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl facility_unavailable_exception
+ REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
+ b interrupt_return_hsrr
EXC_REAL_NONE(0xfa0, 0x20)
@@ -1766,56 +2539,94 @@ EXC_REAL_NONE(0x1100, 0x100)
EXC_VIRT_NONE(0x5100, 0x100)
#ifdef CONFIG_CBE_RAS
+INT_DEFINE_BEGIN(cbe_system_error)
+ IVEC=0x1200
+ IHSRR=1
+INT_DEFINE_END(cbe_system_error)
+
EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
- INT_HANDLER cbe_system_error, 0x1200, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY cbe_system_error, virt=0
EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
EXC_VIRT_NONE(0x5200, 0x100)
-INT_KVM_HANDLER cbe_system_error, 0x1200, EXC_HV, PACA_EXGEN, 1
-EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
+EXC_COMMON_BEGIN(cbe_system_error_common)
+ GEN_COMMON cbe_system_error
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl cbe_system_error_exception
+ b interrupt_return_hsrr
+
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1200, 0x100)
EXC_VIRT_NONE(0x5200, 0x100)
#endif
+/**
+ * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt.
+ * This has been removed from the ISA before 2.01, which is the earliest
+ * 64-bit BookS ISA supported, however the G5 / 970 implements this
+ * interrupt with a non-architected feature available through the support
+ * processor interface.
+ */
+INT_DEFINE_BEGIN(instruction_breakpoint)
+ IVEC=0x1300
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(instruction_breakpoint)
EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100)
- INT_HANDLER instruction_breakpoint, 0x1300, kvm=1
+ GEN_INT_ENTRY instruction_breakpoint, virt=0
EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100)
EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
- INT_HANDLER instruction_breakpoint, 0x1300, virt=1
+ GEN_INT_ENTRY instruction_breakpoint, virt=1
EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
-INT_KVM_HANDLER instruction_breakpoint, 0x1300, EXC_STD, PACA_EXGEN, 1
-EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
+EXC_COMMON_BEGIN(instruction_breakpoint_common)
+ GEN_COMMON instruction_breakpoint
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl instruction_breakpoint_exception
+ b interrupt_return_srr
EXC_REAL_NONE(0x1400, 0x100)
EXC_VIRT_NONE(0x5400, 0x100)
-EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
- INT_HANDLER denorm_exception_hv, 0x1500, early=2, hsrr=EXC_HV
+/**
+ * Interrupt 0x1500 - Soft Patch Interrupt
+ *
+ * Handling:
+ * This is an implementation specific interrupt which can be used for a
+ * range of exceptions.
+ *
+ * This interrupt handler is unique in that it runs the denormal assist
+ * code even for guests (and even in guest context) without going to KVM,
+ * for speed. POWER9 does not raise denorm exceptions, so this special case
+ * could be phased out in future to reduce special cases.
+ */
+INT_DEFINE_BEGIN(denorm_exception)
+ IVEC=0x1500
+ IHSRR=1
+ IBRANCH_TO_COMMON=0
+ IKVM_REAL=1
+INT_DEFINE_END(denorm_exception)
+
+EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100)
+ GEN_INT_ENTRY denorm_exception, virt=0
#ifdef CONFIG_PPC_DENORMALISATION
- mfspr r10,SPRN_HSRR1
- andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
+ andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
bne+ denorm_assist
#endif
- KVMTEST denorm_exception_hv, EXC_HV 0x1500
- INT_SAVE_SRR_AND_JUMP denorm_common, EXC_HV, 1
-EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
-
+ GEN_BRANCH_TO_COMMON denorm_exception, virt=0
+EXC_REAL_END(denorm_exception, 0x1500, 0x100)
#ifdef CONFIG_PPC_DENORMALISATION
EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
- INT_HANDLER denorm_exception, 0x1500, 0, 2, 1, EXC_HV, PACA_EXGEN, 1, 0, 0, 0, 0
- mfspr r10,SPRN_HSRR1
- andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
+ GEN_INT_ENTRY denorm_exception, virt=1
+ andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
bne+ denorm_assist
- INT_VIRT_SAVE_SRR_AND_JUMP denorm_common, EXC_HV
+ GEN_BRANCH_TO_COMMON denorm_exception, virt=1
EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
#else
EXC_VIRT_NONE(0x5500, 0x100)
#endif
-INT_KVM_HANDLER denorm_exception_hv, 0x1500, EXC_HV, PACA_EXGEN, 0
-
#ifdef CONFIG_PPC_DENORMALISATION
TRAMP_REAL_BEGIN(denorm_assist)
BEGIN_FTR_SECTION
@@ -1872,11 +2683,16 @@ denorm_done:
mtspr SPRN_HSRR0,r11
mtcrf 0x80,r9
ld r9,PACA_EXGEN+EX_R9(r13)
- RESTORE_PPR_PACA(PACA_EXGEN, r10)
+BEGIN_FTR_SECTION
+ ld r10,PACA_EXGEN+EX_PPR(r13)
+ mtspr SPRN_PPR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
BEGIN_FTR_SECTION
ld r10,PACA_EXGEN+EX_CFAR(r13)
mtspr SPRN_CFAR,r10
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ li r10,0
+ stb r10,PACAHSRR_VALID(r13)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
ld r12,PACA_EXGEN+EX_R12(r13)
@@ -1885,43 +2701,76 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
b .
#endif
-EXC_COMMON(denorm_common, 0x1500, unknown_exception)
+EXC_COMMON_BEGIN(denorm_exception_common)
+ GEN_COMMON denorm_exception
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl unknown_exception
+ b interrupt_return_hsrr
#ifdef CONFIG_CBE_RAS
+INT_DEFINE_BEGIN(cbe_maintenance)
+ IVEC=0x1600
+ IHSRR=1
+INT_DEFINE_END(cbe_maintenance)
+
EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
- INT_HANDLER cbe_maintenance, 0x1600, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY cbe_maintenance, virt=0
EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
EXC_VIRT_NONE(0x5600, 0x100)
-INT_KVM_HANDLER cbe_maintenance, 0x1600, EXC_HV, PACA_EXGEN, 1
-EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
+EXC_COMMON_BEGIN(cbe_maintenance_common)
+ GEN_COMMON cbe_maintenance
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl cbe_maintenance_exception
+ b interrupt_return_hsrr
+
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1600, 0x100)
EXC_VIRT_NONE(0x5600, 0x100)
#endif
+INT_DEFINE_BEGIN(altivec_assist)
+ IVEC=0x1700
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ IKVM_REAL=1
+#endif
+INT_DEFINE_END(altivec_assist)
+
EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100)
- INT_HANDLER altivec_assist, 0x1700, kvm=1
+ GEN_INT_ENTRY altivec_assist, virt=0
EXC_REAL_END(altivec_assist, 0x1700, 0x100)
EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
- INT_HANDLER altivec_assist, 0x1700, virt=1
+ GEN_INT_ENTRY altivec_assist, virt=1
EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
-INT_KVM_HANDLER altivec_assist, 0x1700, EXC_STD, PACA_EXGEN, 0
+EXC_COMMON_BEGIN(altivec_assist_common)
+ GEN_COMMON altivec_assist
+ addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_ALTIVEC
-EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
+ bl altivec_assist_exception
+ REST_NVGPRS(r1) /* instruction emulation may change GPRs */
#else
-EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
+ bl unknown_exception
#endif
+ b interrupt_return_srr
#ifdef CONFIG_CBE_RAS
+INT_DEFINE_BEGIN(cbe_thermal)
+ IVEC=0x1800
+ IHSRR=1
+INT_DEFINE_END(cbe_thermal)
+
EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
- INT_HANDLER cbe_thermal, 0x1800, ool=1, hsrr=EXC_HV, kvm=1
+ GEN_INT_ENTRY cbe_thermal, virt=0
EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
-INT_KVM_HANDLER cbe_thermal, 0x1800, EXC_HV, PACA_EXGEN, 1
-EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
+EXC_COMMON_BEGIN(cbe_thermal_common)
+ GEN_COMMON cbe_thermal
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl cbe_thermal_exception
+ b interrupt_return_hsrr
+
#else /* CONFIG_CBE_RAS */
EXC_REAL_NONE(0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
@@ -1930,14 +2779,11 @@ EXC_VIRT_NONE(0x5800, 0x100)
#ifdef CONFIG_PPC_WATCHDOG
-#define MASKED_DEC_HANDLER_LABEL 3f
-
-#define MASKED_DEC_HANDLER(_H) \
-3: /* soft-nmi */ \
- std r12,PACA_EXGEN+EX_R12(r13); \
- GET_SCRATCH0(r10); \
- std r10,PACA_EXGEN+EX_R13(r13); \
- INT_SAVE_SRR_AND_JUMP soft_nmi_common, _H, 1
+INT_DEFINE_BEGIN(soft_nmi)
+ IVEC=0x900
+ ISTACK=0
+ ICFAR=0
+INT_DEFINE_END(soft_nmi)
/*
* Branch to soft_nmi_interrupt using the emergency stack. The emergency
@@ -1952,20 +2798,25 @@ EXC_COMMON_BEGIN(soft_nmi_common)
mr r10,r1
ld r1,PACAEMERGSP(r13)
subi r1,r1,INT_FRAME_SIZE
- INT_COMMON 0x900, PACA_EXGEN, 0, 1, 1, 0, 0
- bl save_nvgprs
+ __GEN_COMMON_BODY soft_nmi
+
addi r3,r1,STACK_FRAME_OVERHEAD
bl soft_nmi_interrupt
- b ret_from_except
-#else /* CONFIG_PPC_WATCHDOG */
-#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
-#define MASKED_DEC_HANDLER(_H)
+ /* Clear MSR_RI before setting SRR0 and SRR1. */
+ li r9,0
+ mtmsrd r9,1
+
+ kuap_kernel_restore r9, r10
+
+ EXCEPTION_RESTORE_REGS hsrr=0
+ RFI_TO_KERNEL
+
#endif /* CONFIG_PPC_WATCHDOG */
/*
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
- * - If it was a decrementer interrupt, we bump the dec to max and and return.
+ * - If it was a decrementer interrupt, we bump the dec to max and return.
* - If it was a doorbell we return immediately since doorbells are edge
* triggered and won't automatically refire.
* - If it was a HMI we return immediately since we handled it in realmode
@@ -1973,49 +2824,89 @@ EXC_COMMON_BEGIN(soft_nmi_common)
* - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
* This is called with r10 containing the value to OR to the paca field.
*/
-.macro MASKED_INTERRUPT hsrr
+.macro MASKED_INTERRUPT hsrr=0
.if \hsrr
masked_Hinterrupt:
.else
masked_interrupt:
.endif
- std r11,PACA_EXGEN+EX_R11(r13)
- lbz r11,PACAIRQHAPPENED(r13)
- or r11,r11,r10
- stb r11,PACAIRQHAPPENED(r13)
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ /*
+ * Ensure there was no previous MUST_HARD_MASK interrupt or
+ * HARD_DIS setting. If this does fire, the interrupt is still
+ * masked and MSR[EE] will be cleared on return, so no need to
+ * panic, but somebody probably enabled MSR[EE] under
+ * PACA_IRQ_HARD_DIS, mtmsr(mfmsr() | MSR_x) being a common
+ * cause.
+ */
+ lbz r9,PACAIRQHAPPENED(r13)
+ andi. r9,r9,(PACA_IRQ_MUST_HARD_MASK|PACA_IRQ_HARD_DIS)
+0: tdnei r9,0
+ EMIT_WARN_ENTRY 0b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+#endif
+ lbz r9,PACAIRQHAPPENED(r13)
+ or r9,r9,r10
+ stb r9,PACAIRQHAPPENED(r13)
+
+ .if ! \hsrr
cmpwi r10,PACA_IRQ_DEC
bne 1f
- lis r10,0x7fff
- ori r10,r10,0xffff
- mtspr SPRN_DEC,r10
- b MASKED_DEC_HANDLER_LABEL
+ LOAD_REG_IMMEDIATE(r9, 0x7fffffff)
+ mtspr SPRN_DEC,r9
+#ifdef CONFIG_PPC_WATCHDOG
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ b soft_nmi_common
+#else
+ b 2f
+#endif
+ .endif
+
1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
beq 2f
+ xori r12,r12,MSR_EE /* clear MSR_EE */
.if \hsrr
- mfspr r10,SPRN_HSRR1
- xori r10,r10,MSR_EE /* clear MSR_EE */
- mtspr SPRN_HSRR1,r10
+ mtspr SPRN_HSRR1,r12
.else
- mfspr r10,SPRN_SRR1
- xori r10,r10,MSR_EE /* clear MSR_EE */
- mtspr SPRN_SRR1,r10
+ mtspr SPRN_SRR1,r12
.endif
- ori r11,r11,PACA_IRQ_HARD_DIS
- stb r11,PACAIRQHAPPENED(r13)
+ ori r9,r9,PACA_IRQ_HARD_DIS
+ stb r9,PACAIRQHAPPENED(r13)
2: /* done */
+ li r9,0
+ .if \hsrr
+ stb r9,PACAHSRR_VALID(r13)
+ .else
+ stb r9,PACASRR_VALID(r13)
+ .endif
+
+ SEARCH_RESTART_TABLE
+ cmpdi r12,0
+ beq 3f
+ .if \hsrr
+ mtspr SPRN_HSRR0,r12
+ .else
+ mtspr SPRN_SRR0,r12
+ .endif
+3:
+
+ ld r9,PACA_EXGEN+EX_CTR(r13)
+ mtctr r9
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
mtcrf 0x80,r9
std r1,PACAR1(r13)
ld r9,PACA_EXGEN+EX_R9(r13)
ld r10,PACA_EXGEN+EX_R10(r13)
ld r11,PACA_EXGEN+EX_R11(r13)
- /* returns to kernel where r13 must be set up, so don't restore it */
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+ /* May return to masked low address where r13 is not set up */
.if \hsrr
HRFI_TO_KERNEL
.else
RFI_TO_KERNEL
.endif
b .
- MASKED_DEC_HANDLER(\hsrr\())
.endm
TRAMP_REAL_BEGIN(stf_barrier_fallback)
@@ -2031,15 +2922,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
.endr
blr
-TRAMP_REAL_BEGIN(rfi_flush_fallback)
- SET_SCRATCH0(r13);
- GET_PACA(r13);
- std r1,PACA_EXRFI+EX_R12(r13)
- ld r1,PACAKSAVE(r13)
- std r9,PACA_EXRFI+EX_R9(r13)
- std r10,PACA_EXRFI+EX_R10(r13)
- std r11,PACA_EXRFI+EX_R11(r13)
- mfctr r9
+/* Clobbers r10, r11, ctr */
+.macro L1D_DISPLACEMENT_FLUSH
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
ld r11,PACA_L1D_FLUSH_SIZE(r13)
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
@@ -2050,7 +2934,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
sync
/*
- * The load adresses are at staggered offsets within cachelines,
+ * The load addresses are at staggered offsets within cachelines,
* which suits some pipelines better (on others it should not
* hurt).
*/
@@ -2065,7 +2949,49 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
ld r11,(0x80 + 8)*7(r10)
addi r10,r10,0x80*8
bdnz 1b
+.endm
+TRAMP_REAL_BEGIN(entry_flush_fallback)
+ std r9,PACA_EXRFI+EX_R9(r13)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ mfctr r9
+ L1D_DISPLACEMENT_FLUSH
+ mtctr r9
+ ld r9,PACA_EXRFI+EX_R9(r13)
+ ld r10,PACA_EXRFI+EX_R10(r13)
+ ld r11,PACA_EXRFI+EX_R11(r13)
+ blr
+
+/*
+ * The SCV entry flush happens with interrupts enabled, so it must disable
+ * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
+ * (containing LR) does not need to be preserved here because scv entry
+ * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
+ */
+TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
+ li r10,0
+ mtmsrd r10,1
+ lbz r10,PACAIRQHAPPENED(r13)
+ ori r10,r10,PACA_IRQ_HARD_DIS
+ stb r10,PACAIRQHAPPENED(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ L1D_DISPLACEMENT_FLUSH
+ ld r11,PACA_EXRFI+EX_R11(r13)
+ li r10,MSR_RI
+ mtmsrd r10,1
+ blr
+
+TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ SET_SCRATCH0(r13);
+ GET_PACA(r13);
+ std r1,PACA_EXRFI+EX_R12(r13)
+ ld r1,PACAKSAVE(r13)
+ std r9,PACA_EXRFI+EX_R9(r13)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ mfctr r9
+ L1D_DISPLACEMENT_FLUSH
mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13)
@@ -2083,6 +3009,22 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
mfctr r9
+ L1D_DISPLACEMENT_FLUSH
+ mtctr r9
+ ld r9,PACA_EXRFI+EX_R9(r13)
+ ld r10,PACA_EXRFI+EX_R10(r13)
+ ld r11,PACA_EXRFI+EX_R11(r13)
+ ld r1,PACA_EXRFI+EX_R12(r13)
+ GET_SCRATCH0(r13);
+ hrfid
+
+TRAMP_REAL_BEGIN(rfscv_flush_fallback)
+ /* system call volatile */
+ mr r7,r13
+ GET_PACA(r13);
+ mr r8,r1
+ ld r1,PACAKSAVE(r13)
+ mfctr r9
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
ld r11,PACA_L1D_FLUSH_SIZE(r13)
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
@@ -2110,79 +3052,45 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
bdnz 1b
mtctr r9
- ld r9,PACA_EXRFI+EX_R9(r13)
- ld r10,PACA_EXRFI+EX_R10(r13)
- ld r11,PACA_EXRFI+EX_R11(r13)
- ld r1,PACA_EXRFI+EX_R12(r13)
- GET_SCRATCH0(r13);
- hrfid
+ li r9,0
+ li r10,0
+ li r11,0
+ mr r1,r8
+ mr r13,r7
+ RFSCV
-/*
- * Real mode exceptions actually use this too, but alternate
- * instruction code patches (which end up in the common .text area)
- * cannot reach these if they are put there.
- */
-USE_FIXED_SECTION(virt_trampolines)
- MASKED_INTERRUPT EXC_STD
- MASKED_INTERRUPT EXC_HV
+USE_TEXT_SECTION()
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
+kvm_interrupt:
/*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
+ * The conditional branch in KVMTEST can't reach all the way,
+ * make a stub.
*/
- mfspr r13, SPRN_SRR0
- addi r13, r13, 4
- mtspr SPRN_SRR0, r13
- GET_SCRATCH0(r13)
- RFI_TO_KERNEL
- b .
-
-TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
- /*
- * Here all GPRs are unchanged from when the interrupt happened
- * except for r13, which is saved in SPRG_SCRATCH0.
- */
- mfspr r13, SPRN_HSRR0
- addi r13, r13, 4
- mtspr SPRN_HSRR0, r13
- GET_SCRATCH0(r13)
- HRFI_TO_KERNEL
- b .
+ b kvmppc_interrupt
#endif
-/*
- * Ensure that any handlers that get invoked from the exception prologs
- * above are below the first 64KB (0x10000) of the kernel image because
- * the prologs assemble the addresses of these handlers using the
- * LOAD_HANDLER macro, which uses an ori instruction.
- */
-
-/*** Common interrupt handlers ***/
-
-
- /*
- * Relocation-on interrupts: A subset of the interrupts can be delivered
- * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
- * it. Addresses are the same as the original interrupt addresses, but
- * offset by 0xc000000000004000.
- * It's impossible to receive interrupts below 0x300 via this mechanism.
- * KVM: None of these traps are from the guest ; anything that escalated
- * to HV=1 from HV=0 is delivered via real mode handlers.
- */
+_GLOBAL(do_uaccess_flush)
+ UACCESS_FLUSH_FIXUP_SECTION
+ nop
+ nop
+ nop
+ blr
+ L1D_DISPLACEMENT_FLUSH
+ blr
+_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
+EXPORT_SYMBOL(do_uaccess_flush)
- /*
- * This uses the standard macro, since the original 0x300 vector
- * only has extra guff for STAB-based processors -- which never
- * come here.
- */
-EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
- b __ppc64_runlatch_on
+MASKED_INTERRUPT
+MASKED_INTERRUPT hsrr=1
USE_FIXED_SECTION(virt_trampolines)
/*
+ * All code below __end_soft_masked is treated as soft-masked. If
+ * any code runs here with MSR[EE]=1, it must then cope with pending
+ * soft interrupt being raised (i.e., by ensuring it is replayed).
+ *
* The __end_interrupts marker must be past the out-of-line (OOL)
* handlers, so that they are copied to real address 0x100 when running
* a relocatable kernel. This ensures they can be reached from the short
@@ -2192,25 +3100,7 @@ USE_FIXED_SECTION(virt_trampolines)
.align 7
.globl __end_interrupts
__end_interrupts:
-DEFINE_FIXED_SYMBOL(__end_interrupts)
-
-#ifdef CONFIG_PPC_970_NAP
- /*
- * Called by exception entry code if _TLF_NAPPING was set, this clears
- * the NAPPING flag, and redirects the exception exit to
- * power4_fixup_nap_return.
- */
- .globl power4_fixup_nap
-EXC_COMMON_BEGIN(power4_fixup_nap)
- andc r9,r9,r10
- std r9,TI_LOCAL_FLAGS(r11)
- LOAD_REG_ADDR(r10, power4_idle_nap_return)
- std r10,_NIP(r1)
- blr
-
-power4_idle_nap_return:
- blr
-#endif
+DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines)
CLOSE_FIXED_SECTION(real_vectors);
CLOSE_FIXED_SECTION(real_trampolines);
@@ -2220,7 +3110,7 @@ CLOSE_FIXED_SECTION(virt_trampolines);
USE_TEXT_SECTION()
/* MSR[RI] should be clear because this uses SRR[01] */
-enable_machine_check:
+_GLOBAL(enable_machine_check)
mflr r0
bcl 20,31,$+4
0: mflr r3
@@ -2247,161 +3137,3 @@ disable_machine_check:
RFI_TO_KERNEL
1: mtlr r0
blr
-
-/*
- * Hash table stuff
- */
- .balign IFETCH_ALIGN_BYTES
-do_hash_page:
-#ifdef CONFIG_PPC_BOOK3S_64
- lis r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
- ori r0,r0,DSISR_BAD_FAULT_64S@l
- and. r0,r5,r0 /* weird error? */
- bne- handle_page_fault /* if not, try to insert a HPTE */
- ld r11, PACA_THREAD_INFO(r13)
- lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
- andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
- bne 77f /* then don't call hash_page now */
-
- /*
- * r3 contains the trap number
- * r4 contains the faulting address
- * r5 contains dsisr
- * r6 msr
- *
- * at return r3 = 0 for success, 1 for page fault, negative for error
- */
- bl __hash_page /* build HPTE if possible */
- cmpdi r3,0 /* see if __hash_page succeeded */
-
- /* Success */
- beq fast_exc_return_irq /* Return from exception on success */
-
- /* Error */
- blt- 13f
-
- /* Reload DAR/DSISR into r4/r5 for the DABR check below */
- ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
-#endif /* CONFIG_PPC_BOOK3S_64 */
-
-/* Here we have a page fault that hash_page can't handle. */
-handle_page_fault:
-11: andis. r0,r5,DSISR_DABRMATCH@h
- bne- handle_dabr_fault
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_page_fault
- cmpdi r3,0
- beq+ ret_from_except_lite
- bl save_nvgprs
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl bad_page_fault
- b ret_from_except
-
-/* We have a data breakpoint exception - handle it */
-handle_dabr_fault:
- bl save_nvgprs
- ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_break
- /*
- * do_break() may have changed the NV GPRS while handling a breakpoint.
- * If so, we need to restore them with their updated values. Don't use
- * ret_from_except_lite here.
- */
- b ret_from_except
-
-
-#ifdef CONFIG_PPC_BOOK3S_64
-/* We have a page fault that hash_page could handle but HV refused
- * the PTE insertion
- */
-13: bl save_nvgprs
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl low_hash_fault
- b ret_from_except
-#endif
-
-/*
- * We come here as a result of a DSI at a point where we don't want
- * to call hash_page, such as when we are accessing memory (possibly
- * user memory) inside a PMU interrupt that occurred while interrupts
- * were soft-disabled. We want to invoke the exception handler for
- * the access, or panic if there isn't a handler.
- */
-77: bl save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- li r5,SIGSEGV
- bl bad_page_fault
- b ret_from_except
-
-/*
- * When doorbell is triggered from system reset wakeup, the message is
- * not cleared, so it would fire again when EE is enabled.
- *
- * When coming from local_irq_enable, there may be the same problem if
- * we were hard disabled.
- *
- * Execute msgclr to clear pending exceptions before handling it.
- */
-h_doorbell_common_msgclr:
- LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
- PPC_MSGCLR(3)
- b h_doorbell_common
-
-doorbell_super_common_msgclr:
- LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
- PPC_MSGCLRP(3)
- b doorbell_super_common
-
-/*
- * Called from arch_local_irq_enable when an interrupt needs
- * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
- * which kind of interrupt. MSR:EE is already off. We generate a
- * stackframe like if a real interrupt had happened.
- *
- * Note: While MSR:EE is off, we need to make sure that _MSR
- * in the generated frame has EE set to 1 or the exception
- * handler will not properly re-enable them.
- *
- * Note that we don't specify LR as the NIP (return address) for
- * the interrupt because that would unbalance the return branch
- * predictor.
- */
-_GLOBAL(__replay_interrupt)
- /* We are going to jump to the exception common code which
- * will retrieve various register values from the PACA which
- * we don't give a damn about, so we don't bother storing them.
- */
- mfmsr r12
- LOAD_REG_ADDR(r11, replay_interrupt_return)
- mfcr r9
- ori r12,r12,MSR_EE
- cmpwi r3,0x900
- beq decrementer_common
- cmpwi r3,0x500
-BEGIN_FTR_SECTION
- beq h_virt_irq_common
-FTR_SECTION_ELSE
- beq hardware_interrupt_common
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
- cmpwi r3,0xf00
- beq performance_monitor_common
-BEGIN_FTR_SECTION
- cmpwi r3,0xa00
- beq h_doorbell_common_msgclr
- cmpwi r3,0xe60
- beq hmi_exception_common
-FTR_SECTION_ELSE
- cmpwi r3,0xa00
- beq doorbell_super_common_msgclr
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
-replay_interrupt_return:
- blr
-
-_ASM_NOKPROBE_SYMBOL(__replay_interrupt)
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index ff0114aeba9b..ea0a073abd96 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -24,22 +24,45 @@
#include <linux/slab.h>
#include <linux/cma.h>
#include <linux/hugetlb.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
-#include <asm/debugfs.h>
#include <asm/page.h>
-#include <asm/prom.h>
#include <asm/fadump.h>
#include <asm/fadump-internal.h>
#include <asm/setup.h>
+#include <asm/interrupt.h>
+
+/*
+ * The CPU who acquired the lock to trigger the fadump crash should
+ * wait for other CPUs to enter.
+ *
+ * The timeout is in milliseconds.
+ */
+#define CRASH_TIMEOUT 500
static struct fw_dump fw_dump;
static void __init fadump_reserve_crash_area(u64 base);
#ifndef CONFIG_PRESERVE_FA_DUMP
+
+static struct kobject *fadump_kobj;
+
+static atomic_t cpus_in_fadump;
static DEFINE_MUTEX(fadump_mutex);
-struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0 };
-struct fadump_mrange_info reserved_mrange_info = { "reserved", NULL, 0, 0, 0 };
+
+static struct fadump_mrange_info crash_mrange_info = { "crash", NULL, 0, 0, 0, false };
+
+#define RESERVED_RNGS_SZ 16384 /* 16K - 128 entries */
+#define RESERVED_RNGS_CNT (RESERVED_RNGS_SZ / \
+ sizeof(struct fadump_memory_range))
+static struct fadump_memory_range rngs[RESERVED_RNGS_CNT];
+static struct fadump_mrange_info
+reserved_mrange_info = { "reserved", rngs, RESERVED_RNGS_SZ, 0, RESERVED_RNGS_CNT, true };
+
+static void __init early_init_dt_scan_reserved_ranges(unsigned long node);
#ifdef CONFIG_CMA
static struct cma *fadump_cma;
@@ -51,13 +74,13 @@ static struct cma *fadump_cma;
* The total size of fadump reserved memory covers for boot memory size
* + cpu data size + hpte size and metadata.
* Initialize only the area equivalent to boot memory size for CMA use.
- * The reamining portion of fadump reserved memory will be not given
- * to CMA and pages for thoes will stay reserved. boot memory size is
+ * The remaining portion of fadump reserved memory will be not given
+ * to CMA and pages for those will stay reserved. boot memory size is
* aligned per CMA requirement to satisy cma_init_reserved_mem() call.
* But for some reason even if it fails we still have the memory reservation
* with us and we can still continue doing fadump.
*/
-int __init fadump_cma_init(void)
+static int __init fadump_cma_init(void)
{
unsigned long long base, size;
int rc;
@@ -91,6 +114,12 @@ int __init fadump_cma_init(void)
}
/*
+ * If CMA activation fails, keep the pages reserved, instead of
+ * exposing them to buddy allocator. Same as 'fadump=nocma' case.
+ */
+ cma_reserve_pages_on_error(fadump_cma);
+
+ /*
* So we now have successfully initialized cma area for fadump.
*/
pr_info("Initialized 0x%lx bytes cma area at %ldMB from 0x%lx "
@@ -108,6 +137,11 @@ static int __init fadump_cma_init(void) { return 1; }
int __init early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
int depth, void *data)
{
+ if (depth == 0) {
+ early_init_dt_scan_reserved_ranges(node);
+ return 0;
+ }
+
if (depth != 1)
return 0;
@@ -164,13 +198,13 @@ int is_fadump_active(void)
*/
static bool is_fadump_mem_area_contiguous(u64 d_start, u64 d_end)
{
- struct memblock_region *reg;
+ phys_addr_t reg_start, reg_end;
bool ret = false;
- u64 start, end;
+ u64 i, start, end;
- for_each_memblock(memory, reg) {
- start = max_t(u64, d_start, reg->base);
- end = min_t(u64, d_end, (reg->base + reg->size));
+ for_each_mem_range(i, &reg_start, &reg_end) {
+ start = max_t(u64, d_start, reg_start);
+ end = min_t(u64, d_end, reg_end);
if (d_start < end) {
/* Memory hole from d_start to start */
if (start > d_start)
@@ -224,7 +258,7 @@ bool is_fadump_reserved_mem_contiguous(void)
}
/* Print firmware assisted dump configurations for debugging purpose. */
-static void fadump_show_config(void)
+static void __init fadump_show_config(void)
{
int i;
@@ -265,7 +299,7 @@ static void fadump_show_config(void)
* that is required for a kernel to boot successfully.
*
*/
-static inline u64 fadump_calculate_reserve_size(void)
+static __init u64 fadump_calculate_reserve_size(void)
{
u64 base, size, bootmem_min;
int ret;
@@ -326,12 +360,17 @@ static inline u64 fadump_calculate_reserve_size(void)
* Calculate the total memory size required to be reserved for
* firmware-assisted dump registration.
*/
-static unsigned long get_fadump_area_size(void)
+static unsigned long __init get_fadump_area_size(void)
{
unsigned long size = 0;
size += fw_dump.cpu_state_data_size;
size += fw_dump.hpte_region_size;
+ /*
+ * Account for pagesize alignment of boot memory area destination address.
+ * This faciliates in mmap reading of first kernel's memory.
+ */
+ size = PAGE_ALIGN(size);
size += fw_dump.boot_memory_size;
size += sizeof(struct fadump_crash_info_header);
size += sizeof(struct elfhdr); /* ELF core header.*/
@@ -395,44 +434,106 @@ static int __init add_boot_mem_regions(unsigned long mstart,
static int __init fadump_get_boot_mem_regions(void)
{
- unsigned long base, size, cur_size, hole_size, last_end;
+ unsigned long size, cur_size, hole_size, last_end;
unsigned long mem_size = fw_dump.boot_memory_size;
- struct memblock_region *reg;
+ phys_addr_t reg_start, reg_end;
int ret = 1;
+ u64 i;
fw_dump.boot_mem_regs_cnt = 0;
last_end = 0;
hole_size = 0;
cur_size = 0;
- for_each_memblock(memory, reg) {
- base = reg->base;
- size = reg->size;
- hole_size += (base - last_end);
+ for_each_mem_range(i, &reg_start, &reg_end) {
+ size = reg_end - reg_start;
+ hole_size += (reg_start - last_end);
if ((cur_size + size) >= mem_size) {
size = (mem_size - cur_size);
- ret = add_boot_mem_regions(base, size);
+ ret = add_boot_mem_regions(reg_start, size);
break;
}
mem_size -= size;
cur_size += size;
- ret = add_boot_mem_regions(base, size);
+ ret = add_boot_mem_regions(reg_start, size);
if (!ret)
break;
- last_end = base + size;
+ last_end = reg_end;
}
fw_dump.boot_mem_top = PAGE_ALIGN(fw_dump.boot_memory_size + hole_size);
return ret;
}
+/*
+ * Returns true, if the given range overlaps with reserved memory ranges
+ * starting at idx. Also, updates idx to index of overlapping memory range
+ * with the given memory range.
+ * False, otherwise.
+ */
+static bool __init overlaps_reserved_ranges(u64 base, u64 end, int *idx)
+{
+ bool ret = false;
+ int i;
+
+ for (i = *idx; i < reserved_mrange_info.mem_range_cnt; i++) {
+ u64 rbase = reserved_mrange_info.mem_ranges[i].base;
+ u64 rend = rbase + reserved_mrange_info.mem_ranges[i].size;
+
+ if (end <= rbase)
+ break;
+
+ if ((end > rbase) && (base < rend)) {
+ *idx = i;
+ ret = true;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * Locate a suitable memory area to reserve memory for FADump. While at it,
+ * lookup reserved-ranges & avoid overlap with them, as they are used by F/W.
+ */
+static u64 __init fadump_locate_reserve_mem(u64 base, u64 size)
+{
+ struct fadump_memory_range *mrngs;
+ phys_addr_t mstart, mend;
+ int idx = 0;
+ u64 i, ret = 0;
+
+ mrngs = reserved_mrange_info.mem_ranges;
+ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
+ &mstart, &mend, NULL) {
+ pr_debug("%llu) mstart: %llx, mend: %llx, base: %llx\n",
+ i, mstart, mend, base);
+
+ if (mstart > base)
+ base = PAGE_ALIGN(mstart);
+
+ while ((mend > base) && ((mend - base) >= size)) {
+ if (!overlaps_reserved_ranges(base, base+size, &idx)) {
+ ret = base;
+ goto out;
+ }
+
+ base = mrngs[idx].base + mrngs[idx].size;
+ base = PAGE_ALIGN(base);
+ }
+ }
+
+out:
+ return ret;
+}
+
int __init fadump_reserve_mem(void)
{
- u64 base, size, mem_boundary, bootmem_min, align = PAGE_SIZE;
- bool is_memblock_bottom_up = memblock_bottom_up();
+ u64 base, size, mem_boundary, bootmem_min;
int ret = 1;
if (!fw_dump.fadump_enabled)
@@ -453,9 +554,9 @@ int __init fadump_reserve_mem(void)
PAGE_ALIGN(fadump_calculate_reserve_size());
#ifdef CONFIG_CMA
if (!fw_dump.nocma) {
- align = FADUMP_CMA_ALIGNMENT;
fw_dump.boot_memory_size =
- ALIGN(fw_dump.boot_memory_size, align);
+ ALIGN(fw_dump.boot_memory_size,
+ CMA_MIN_ALIGNMENT_BYTES);
}
#endif
@@ -523,13 +624,9 @@ int __init fadump_reserve_mem(void)
* Reserve memory at an offset closer to bottom of the RAM to
* minimize the impact of memory hot-remove operation.
*/
- memblock_set_bottom_up(true);
- base = memblock_find_in_range(base, mem_boundary, size, align);
+ base = fadump_locate_reserve_mem(base, size);
- /* Restore the previous allocation mode */
- memblock_set_bottom_up(is_memblock_bottom_up);
-
- if (!base) {
+ if (!base || (base + size > mem_boundary)) {
pr_err("Failed to find memory chunk for reservation!\n");
goto error_out;
}
@@ -594,8 +691,11 @@ early_param("fadump_reserve_mem", early_fadump_reserve_mem);
void crash_fadump(struct pt_regs *regs, const char *str)
{
+ unsigned int msecs;
struct fadump_crash_info_header *fdh = NULL;
int old_cpu, this_cpu;
+ /* Do not include first CPU */
+ unsigned int ncpus = num_online_cpus() - 1;
if (!should_fadump_crash())
return;
@@ -611,6 +711,8 @@ void crash_fadump(struct pt_regs *regs, const char *str)
old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu);
if (old_cpu != -1) {
+ atomic_inc(&cpus_in_fadump);
+
/*
* We can't loop here indefinitely. Wait as long as fadump
* is in force. If we race with fadump un-registration this
@@ -632,12 +734,22 @@ void crash_fadump(struct pt_regs *regs, const char *str)
else
ppc_save_regs(&fdh->regs);
- fdh->online_mask = *cpu_online_mask;
+ fdh->cpu_mask = *cpu_online_mask;
+
+ /*
+ * If we came in via system reset, wait a while for the secondary
+ * CPUs to enter.
+ */
+ if (TRAP(&(fdh->regs)) == INTERRUPT_SYSTEM_RESET) {
+ msecs = CRASH_TIMEOUT;
+ while ((atomic_read(&cpus_in_fadump) < ncpus) && (--msecs > 0))
+ mdelay(1);
+ }
fw_dump.ops->fadump_trigger(fdh, str);
}
-u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
+u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
{
struct elf_prstatus prstatus;
@@ -646,18 +758,16 @@ u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
* FIXME: How do i get PID? Do I really need it?
* prstatus.pr_pid = ????
*/
- elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
+ elf_core_copy_regs(&prstatus.pr_reg, regs);
buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
&prstatus, sizeof(prstatus));
return buf;
}
-void fadump_update_elfcore_header(char *bufp)
+void __init fadump_update_elfcore_header(char *bufp)
{
- struct elfhdr *elf;
struct elf_phdr *phdr;
- elf = (struct elfhdr *)bufp;
bufp += sizeof(struct elfhdr);
/* First note is a place holder for cpu notes info. */
@@ -672,7 +782,7 @@ void fadump_update_elfcore_header(char *bufp)
return;
}
-static void *fadump_alloc_buffer(unsigned long size)
+static void *__init fadump_alloc_buffer(unsigned long size)
{
unsigned long count, i;
struct page *page;
@@ -694,7 +804,7 @@ static void fadump_free_buffer(unsigned long vaddr, unsigned long size)
free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL);
}
-s32 fadump_setup_cpu_notes_buf(u32 num_cpus)
+s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus)
{
/* Allocate buffer to hold cpu crash notes. */
fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
@@ -726,10 +836,14 @@ void fadump_free_cpu_notes_buf(void)
static void fadump_free_mem_ranges(struct fadump_mrange_info *mrange_info)
{
+ if (mrange_info->is_static) {
+ mrange_info->mem_range_cnt = 0;
+ return;
+ }
+
kfree(mrange_info->mem_ranges);
- mrange_info->mem_ranges = NULL;
- mrange_info->mem_ranges_sz = 0;
- mrange_info->max_mem_ranges = 0;
+ memset((void *)((u64)mrange_info + RNG_NAME_SZ), 0,
+ (sizeof(struct fadump_mrange_info) - RNG_NAME_SZ));
}
/*
@@ -759,7 +873,6 @@ static int fadump_alloc_mem_ranges(struct fadump_mrange_info *mrange_info)
sizeof(struct fadump_memory_range));
return 0;
}
-
static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
u64 base, u64 end)
{
@@ -778,7 +891,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
start = mem_ranges[mrange_info->mem_range_cnt - 1].base;
size = mem_ranges[mrange_info->mem_range_cnt - 1].size;
- if ((start + size) == base)
+ /*
+ * Boot memory area needs separate PT_LOAD segment(s) as it
+ * is moved to a different location at the time of crash.
+ * So, fold only if the region is not boot memory area.
+ */
+ if ((start + size) == base && start >= fw_dump.boot_mem_top)
is_adjacent = true;
}
if (!is_adjacent) {
@@ -786,6 +904,12 @@ static inline int fadump_add_mem_range(struct fadump_mrange_info *mrange_info,
if (mrange_info->mem_range_cnt == mrange_info->max_mem_ranges) {
int ret;
+ if (mrange_info->is_static) {
+ pr_err("Reached array size limit for %s memory ranges\n",
+ mrange_info->name);
+ return -ENOSPC;
+ }
+
ret = fadump_alloc_mem_ranges(mrange_info);
if (ret)
return ret;
@@ -854,11 +978,14 @@ static int fadump_init_elfcore_header(char *bufp)
elf->e_entry = 0;
elf->e_phoff = sizeof(struct elfhdr);
elf->e_shoff = 0;
-#if defined(_CALL_ELF)
- elf->e_flags = _CALL_ELF;
-#else
- elf->e_flags = 0;
-#endif
+
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ elf->e_flags = 2;
+ else if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V1))
+ elf->e_flags = 1;
+ else
+ elf->e_flags = 0;
+
elf->e_ehsize = sizeof(struct elfhdr);
elf->e_phentsize = sizeof(struct elf_phdr);
elf->e_phnum = 0;
@@ -875,9 +1002,8 @@ static int fadump_init_elfcore_header(char *bufp)
*/
static int fadump_setup_crash_memory_ranges(void)
{
- struct memblock_region *reg;
- u64 start, end;
- int i, ret;
+ u64 i, start, end;
+ int ret;
pr_debug("Setup crash memory ranges.\n");
crash_mrange_info.mem_range_cnt = 0;
@@ -895,10 +1021,7 @@ static int fadump_setup_crash_memory_ranges(void)
return ret;
}
- for_each_memblock(memory, reg) {
- start = (u64)reg->base;
- end = start + (u64)reg->size;
-
+ for_each_mem_range(i, &start, &end) {
/*
* skip the memory chunk that is already added
* (0 through boot_memory_top).
@@ -1054,6 +1177,11 @@ static unsigned long init_fadump_header(unsigned long addr)
fdh->elfcorehdr_addr = addr;
/* We will set the crashing cpu id in crash_fadump() during crash. */
fdh->crashing_cpu = FADUMP_CPU_UNKNOWN;
+ /*
+ * When LPAR is terminated by PYHP, ensure all possible CPUs'
+ * register data is processed while exporting the vmcore.
+ */
+ fdh->cpu_mask = *cpu_possible_mask;
return addr;
}
@@ -1132,14 +1260,17 @@ static void fadump_free_reserved_memory(unsigned long start_pfn,
*/
static void fadump_release_reserved_area(u64 start, u64 end)
{
+ unsigned long reg_spfn, reg_epfn;
u64 tstart, tend, spfn, epfn;
- struct memblock_region *reg;
+ int i;
spfn = PHYS_PFN(start);
epfn = PHYS_PFN(end);
- for_each_memblock(memory, reg) {
- tstart = max_t(u64, spfn, memblock_region_memory_base_pfn(reg));
- tend = min_t(u64, epfn, memblock_region_memory_end_pfn(reg));
+
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &reg_spfn, &reg_epfn, NULL) {
+ tstart = max_t(u64, spfn, reg_spfn);
+ tend = min_t(u64, epfn, reg_epfn);
+
if (tstart < tend) {
fadump_free_reserved_memory(tstart, tend);
@@ -1158,7 +1289,6 @@ static void fadump_release_reserved_area(u64 start, u64 end)
static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
{
struct fadump_memory_range *mem_ranges;
- struct fadump_memory_range tmp_range;
u64 base, size;
int i, j, idx;
@@ -1173,11 +1303,8 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
if (mem_ranges[idx].base > mem_ranges[j].base)
idx = j;
}
- if (idx != i) {
- tmp_range = mem_ranges[idx];
- mem_ranges[idx] = mem_ranges[i];
- mem_ranges[i] = tmp_range;
- }
+ if (idx != i)
+ swap(mem_ranges[idx], mem_ranges[i]);
}
/* Merge adjacent reserved ranges */
@@ -1202,20 +1329,19 @@ static void sort_and_merge_mem_ranges(struct fadump_mrange_info *mrange_info)
* Scan reserved-ranges to consider them while reserving/releasing
* memory for FADump.
*/
-static inline int fadump_scan_reserved_mem_ranges(void)
+static void __init early_init_dt_scan_reserved_ranges(unsigned long node)
{
- struct device_node *root;
const __be32 *prop;
int len, ret = -1;
unsigned long i;
- root = of_find_node_by_path("/");
- if (!root)
- return ret;
+ /* reserved-ranges already scanned */
+ if (reserved_mrange_info.mem_range_cnt != 0)
+ return;
- prop = of_get_property(root, "reserved-ranges", &len);
+ prop = of_get_flat_dt_prop(node, "reserved-ranges", &len);
if (!prop)
- return ret;
+ return;
/*
* Each reserved range is an (address,size) pair, 2 cells each,
@@ -1237,7 +1363,8 @@ static inline int fadump_scan_reserved_mem_ranges(void)
}
}
- return ret;
+ /* Compact reserved ranges */
+ sort_and_merge_mem_ranges(&reserved_mrange_info);
}
/*
@@ -1251,32 +1378,21 @@ static void fadump_release_memory(u64 begin, u64 end)
u64 ra_start, ra_end, tstart;
int i, ret;
- fadump_scan_reserved_mem_ranges();
-
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
/*
- * Add reserved dump area to reserved ranges list
- * and exclude all these ranges while releasing memory.
+ * If reserved ranges array limit is hit, overwrite the last reserved
+ * memory range with reserved dump area to ensure it is excluded from
+ * the memory being released (reused for next FADump registration).
*/
- ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
- if (ret != 0) {
- /*
- * Not enough memory to setup reserved ranges but the system is
- * running shortage of memory. So, release all the memory except
- * Reserved dump area (reused for next fadump registration).
- */
- if (begin < ra_end && end > ra_start) {
- if (begin < ra_start)
- fadump_release_reserved_area(begin, ra_start);
- if (end > ra_end)
- fadump_release_reserved_area(ra_end, end);
- } else
- fadump_release_reserved_area(begin, end);
+ if (reserved_mrange_info.mem_range_cnt ==
+ reserved_mrange_info.max_mem_ranges)
+ reserved_mrange_info.mem_range_cnt--;
+ ret = fadump_add_mem_range(&reserved_mrange_info, ra_start, ra_end);
+ if (ret != 0)
return;
- }
/* Get the reserved ranges list in order first. */
sort_and_merge_mem_ranges(&reserved_mrange_info);
@@ -1323,9 +1439,9 @@ static void fadump_invalidate_release_mem(void)
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
}
-static ssize_t fadump_release_memory_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t release_mem_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int input = -1;
@@ -1350,23 +1466,40 @@ static ssize_t fadump_release_memory_store(struct kobject *kobj,
return count;
}
-static ssize_t fadump_enabled_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+/* Release the reserved memory and disable the FADump */
+static void __init unregister_fadump(void)
+{
+ fadump_cleanup();
+ fadump_release_memory(fw_dump.reserve_dump_area_start,
+ fw_dump.reserve_dump_area_size);
+ fw_dump.fadump_enabled = 0;
+ kobject_put(fadump_kobj);
+}
+
+static ssize_t enabled_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
}
-static ssize_t fadump_register_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
+static ssize_t mem_reserved_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%ld\n", fw_dump.reserve_dump_area_size);
+}
+
+static ssize_t registered_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", fw_dump.dump_registered);
}
-static ssize_t fadump_register_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t registered_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int ret = 0;
int input = -1;
@@ -1418,45 +1551,82 @@ static int fadump_region_show(struct seq_file *m, void *private)
return 0;
}
-static struct kobj_attribute fadump_release_attr = __ATTR(fadump_release_mem,
- 0200, NULL,
- fadump_release_memory_store);
-static struct kobj_attribute fadump_attr = __ATTR(fadump_enabled,
- 0444, fadump_enabled_show,
- NULL);
-static struct kobj_attribute fadump_register_attr = __ATTR(fadump_registered,
- 0644, fadump_register_show,
- fadump_register_store);
+static struct kobj_attribute release_attr = __ATTR_WO(release_mem);
+static struct kobj_attribute enable_attr = __ATTR_RO(enabled);
+static struct kobj_attribute register_attr = __ATTR_RW(registered);
+static struct kobj_attribute mem_reserved_attr = __ATTR_RO(mem_reserved);
+
+static struct attribute *fadump_attrs[] = {
+ &enable_attr.attr,
+ &register_attr.attr,
+ &mem_reserved_attr.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(fadump);
DEFINE_SHOW_ATTRIBUTE(fadump_region);
-static void fadump_init_files(void)
+static void __init fadump_init_files(void)
{
- struct dentry *debugfs_file;
int rc = 0;
- rc = sysfs_create_file(kernel_kobj, &fadump_attr.attr);
- if (rc)
- printk(KERN_ERR "fadump: unable to create sysfs file"
- " fadump_enabled (%d)\n", rc);
+ fadump_kobj = kobject_create_and_add("fadump", kernel_kobj);
+ if (!fadump_kobj) {
+ pr_err("failed to create fadump kobject\n");
+ return;
+ }
+
+ debugfs_create_file("fadump_region", 0444, arch_debugfs_dir, NULL,
+ &fadump_region_fops);
- rc = sysfs_create_file(kernel_kobj, &fadump_register_attr.attr);
- if (rc)
- printk(KERN_ERR "fadump: unable to create sysfs file"
- " fadump_registered (%d)\n", rc);
+ if (fw_dump.dump_active) {
+ rc = sysfs_create_file(fadump_kobj, &release_attr.attr);
+ if (rc)
+ pr_err("unable to create release_mem sysfs file (%d)\n",
+ rc);
+ }
- debugfs_file = debugfs_create_file("fadump_region", 0444,
- powerpc_debugfs_root, NULL,
- &fadump_region_fops);
- if (!debugfs_file)
- printk(KERN_ERR "fadump: unable to create debugfs file"
- " fadump_region\n");
+ rc = sysfs_create_groups(fadump_kobj, fadump_groups);
+ if (rc) {
+ pr_err("sysfs group creation failed (%d), unregistering FADump",
+ rc);
+ unregister_fadump();
+ return;
+ }
+
+ /*
+ * The FADump sysfs are moved from kernel_kobj to fadump_kobj need to
+ * create symlink at old location to maintain backward compatibility.
+ *
+ * - fadump_enabled -> fadump/enabled
+ * - fadump_registered -> fadump/registered
+ * - fadump_release_mem -> fadump/release_mem
+ */
+ rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
+ "enabled", "fadump_enabled");
+ if (rc) {
+ pr_err("unable to create fadump_enabled symlink (%d)", rc);
+ return;
+ }
+
+ rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj, fadump_kobj,
+ "registered",
+ "fadump_registered");
+ if (rc) {
+ pr_err("unable to create fadump_registered symlink (%d)", rc);
+ sysfs_remove_link(kernel_kobj, "fadump_enabled");
+ return;
+ }
if (fw_dump.dump_active) {
- rc = sysfs_create_file(kernel_kobj, &fadump_release_attr.attr);
+ rc = compat_only_sysfs_link_entry_to_kobj(kernel_kobj,
+ fadump_kobj,
+ "release_mem",
+ "fadump_release_mem");
if (rc)
- printk(KERN_ERR "fadump: unable to create sysfs file"
- " fadump_release_mem (%d)\n", rc);
+ pr_err("unable to create fadump_release_mem symlink (%d)",
+ rc);
}
return;
}
@@ -1487,13 +1657,28 @@ int __init setup_fadump(void)
if (fw_dump.ops->fadump_process(&fw_dump) < 0)
fadump_invalidate_release_mem();
}
- /* Initialize the kernel dump memory structure for FAD registration. */
- else if (fw_dump.reserve_dump_area_size)
+ /* Initialize the kernel dump memory structure and register with f/w */
+ else if (fw_dump.reserve_dump_area_size) {
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
+ register_fadump();
+ }
+
+ /*
+ * In case of panic, fadump is triggered via ppc_panic_event()
+ * panic notifier. Setting crash_kexec_post_notifiers to 'true'
+ * lets panic() function take crash friendly path before panic
+ * notifiers are invoked.
+ */
+ crash_kexec_post_notifiers = true;
return 1;
}
-subsys_initcall(setup_fadump);
+/*
+ * Use subsys_initcall_sync() here because there is dependency with
+ * crash_save_vmcoreinfo_init(), which must run first to ensure vmcoreinfo initialization
+ * is done before registering with f/w.
+ */
+subsys_initcall_sync(setup_fadump);
#else /* !CONFIG_PRESERVE_FA_DUMP */
/* Scan the Firmware Assisted dump configuration details. */
@@ -1531,12 +1716,10 @@ int __init fadump_reserve_mem(void)
/* Preserve everything above the base address */
static void __init fadump_reserve_crash_area(u64 base)
{
- struct memblock_region *reg;
- u64 mstart, msize;
+ u64 i, mstart, mend, msize;
- for_each_memblock(memory, reg) {
- mstart = reg->base;
- msize = reg->size;
+ for_each_mem_range(i, &mstart, &mend) {
+ msize = mend - mstart;
if ((mstart + msize) < base)
continue;
diff --git a/arch/powerpc/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
index cc4a5e3f51f1..20328f72f9f2 100644
--- a/arch/powerpc/kernel/firmware.c
+++ b/arch/powerpc/kernel/firmware.c
@@ -11,8 +11,31 @@
#include <linux/export.h>
#include <linux/cache.h>
+#include <linux/of.h>
#include <asm/firmware.h>
+#include <asm/kvm_guest.h>
+#ifdef CONFIG_PPC64
unsigned long powerpc_firmware_features __read_mostly;
EXPORT_SYMBOL_GPL(powerpc_firmware_features);
+#endif
+
+#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_GUEST)
+DEFINE_STATIC_KEY_FALSE(kvm_guest);
+int __init check_kvm_guest(void)
+{
+ struct device_node *hyper_node;
+
+ hyper_node = of_find_node_by_path("/hypervisor");
+ if (!hyper_node)
+ return 0;
+
+ if (of_device_is_compatible(hyper_node, "linux,kvm"))
+ static_branch_enable(&kvm_guest);
+
+ of_node_put(hyper_node);
+ return 0;
+}
+core_initcall(check_kvm_guest); // before kvm_guest_init()
+#endif
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 3235a8da6af7..f71f2bbd4de6 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -12,7 +12,6 @@
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
@@ -82,21 +81,22 @@ EXPORT_SYMBOL(store_fp_state)
*/
_GLOBAL(load_up_fpu)
mfmsr r5
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
+ ori r5,r5,MSR_FP|MSR_RI
+#else
ori r5,r5,MSR_FP
+#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
oris r5,r5,MSR_VSX@h
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
- SYNC
MTMSRD(r5) /* enable use of fpu now */
isync
/* enable use of FP after return */
#ifdef CONFIG_PPC32
- mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
-#ifdef CONFIG_VMAP_STACK
- tovirt(r5, r5)
-#endif
+ addi r5,r2,THREAD
lwz r4,THREAD_FPEXC_MODE(r5)
ori r9,r9,MSR_FP /* enable FP for current */
or r9,r9,r4
@@ -107,10 +107,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
ori r12,r12,MSR_FP
or r12,r12,r4
std r12,_MSR(r1)
+#ifdef CONFIG_PPC_BOOK3S_64
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
#endif
- /* Don't care if r4 overflows, this is desired behaviour */
- lbz r4,THREAD_LOAD_FP(r5)
- addi r4,r4,1
+#endif
+ li r4,1
stb r4,THREAD_LOAD_FP(r5)
addi r10,r5,THREAD_FPSTATE
lfd fr0,FPSTATE_FPSCR(r10)
@@ -119,6 +121,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
/* restore registers and return */
/* we haven't used ctr or xer or lr */
blr
+_ASM_NOKPROBE_SYMBOL(load_up_fpu)
/*
* save_fpu(tsk)
@@ -136,18 +139,3 @@ _GLOBAL(save_fpu)
mffs fr0
stfd fr0,FPSTATE_FPSCR(r6)
blr
-
-/*
- * These are used in the alignment trap handler when emulating
- * single-precision loads and stores.
- */
-
-_GLOBAL(cvt_fd)
- lfs 0,0(r3)
- stfd 0,0(r4)
- blr
-
-_GLOBAL(cvt_df)
- lfd 0,0(r3)
- stfs 0,0(r4)
- blr
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index 9db162f79fe6..c3286260a7d1 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -10,84 +10,82 @@
* We assume sprg3 has the physical address of the current
* task's thread_struct.
*/
-.macro EXCEPTION_PROLOG handle_dar_dsisr=0
+.macro EXCEPTION_PROLOG trapno name handle_dar_dsisr=0
EXCEPTION_PROLOG_0 handle_dar_dsisr=\handle_dar_dsisr
EXCEPTION_PROLOG_1
- EXCEPTION_PROLOG_2 handle_dar_dsisr=\handle_dar_dsisr
+ EXCEPTION_PROLOG_2 \trapno \name handle_dar_dsisr=\handle_dar_dsisr
.endm
.macro EXCEPTION_PROLOG_0 handle_dar_dsisr=0
mtspr SPRN_SPRG_SCRATCH0,r10
mtspr SPRN_SPRG_SCRATCH1,r11
-#ifdef CONFIG_VMAP_STACK
mfspr r10, SPRN_SPRG_THREAD
.if \handle_dar_dsisr
+#ifdef CONFIG_40x
+ mfspr r11, SPRN_DEAR
+#else
mfspr r11, SPRN_DAR
+#endif
stw r11, DAR(r10)
+#ifdef CONFIG_40x
+ mfspr r11, SPRN_ESR
+#else
mfspr r11, SPRN_DSISR
+#endif
stw r11, DSISR(r10)
.endif
mfspr r11, SPRN_SRR0
stw r11, SRR0(r10)
-#endif
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
-#ifdef CONFIG_VMAP_STACK
stw r11, SRR1(r10)
-#endif
mfcr r10
andi. r11, r11, MSR_PR
.endm
-.macro EXCEPTION_PROLOG_1 for_rtas=0
-#ifdef CONFIG_VMAP_STACK
- .ifeq \for_rtas
- li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
- mtmsr r11
- isync
- .endif
- subi r11, r1, INT_FRAME_SIZE /* use r1 if kernel */
-#else
- tophys(r11,r1) /* use tophys(r1) if kernel */
- subi r11, r11, INT_FRAME_SIZE /* alloc exc. frame */
-#endif
+.macro EXCEPTION_PROLOG_1
+ mtspr SPRN_SPRG_SCRATCH2,r1
+ subi r1, r1, INT_FRAME_SIZE /* use r1 if kernel */
beq 1f
- mfspr r11,SPRN_SPRG_THREAD
- tovirt_vmstack r11, r11
- lwz r11,TASK_STACK-THREAD(r11)
- addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
- tophys_novmstack r11, r11
+ mfspr r1,SPRN_SPRG_THREAD
+ lwz r1,TASK_STACK-THREAD(r1)
+ addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
1:
#ifdef CONFIG_VMAP_STACK
- mtcrf 0x7f, r11
- bt 32 - THREAD_ALIGN_SHIFT, stack_overflow
+ mtcrf 0x3f, r1
+ bt 32 - THREAD_ALIGN_SHIFT, vmap_stack_overflow
#endif
.endm
-.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
-#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
-BEGIN_MMU_FTR_SECTION
- mtcr r10
-FTR_SECTION_ELSE
- stw r10, _CCR(r11)
-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
-#else
- stw r10,_CCR(r11) /* save registers */
+.macro EXCEPTION_PROLOG_2 trapno name handle_dar_dsisr=0
+#ifdef CONFIG_PPC_8xx
+ .if \handle_dar_dsisr
+ li r11, RPN_PATTERN
+ mtspr SPRN_DAR, r11 /* Tag DAR, to be used in DTLB Error */
+ .endif
#endif
- mfspr r10, SPRN_SPRG_SCRATCH0
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~MSR_RI) /* re-enable MMU */
+ mtspr SPRN_SRR1, r11
+ lis r11, 1f@h
+ ori r11, r11, 1f@l
+ mtspr SPRN_SRR0, r11
+ mfspr r11, SPRN_SPRG_SCRATCH2
+ rfi
+
+ .text
+\name\()_virt:
+1:
+ stw r11,GPR1(r1)
+ stw r11,0(r1)
+ mr r11, r1
+ stw r10,_CCR(r11) /* save registers */
stw r12,GPR12(r11)
stw r9,GPR9(r11)
- stw r10,GPR10(r11)
-#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
-BEGIN_MMU_FTR_SECTION
- mfcr r10
- stw r10, _CCR(r11)
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
-#endif
+ mfspr r10,SPRN_SPRG_SCRATCH0
mfspr r12,SPRN_SPRG_SCRATCH1
+ stw r10,GPR10(r11)
stw r12,GPR11(r11)
mflr r10
stw r10,_LINK(r11)
-#ifdef CONFIG_VMAP_STACK
mfspr r12, SPRN_SPRG_THREAD
tovirt(r12, r12)
.if \handle_dar_dsisr
@@ -97,185 +95,76 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
stw r10, _DSISR(r11)
.endif
lwz r9, SRR1(r12)
-#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
-BEGIN_MMU_FTR_SECTION
- andi. r10, r9, MSR_PR
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
-#endif
lwz r12, SRR0(r12)
-#else
- mfspr r12,SPRN_SRR0
- mfspr r9,SPRN_SRR1
-#endif
- stw r1,GPR1(r11)
- stw r1,0(r11)
- tovirt_novmstack r1, r11 /* set new kernel sp */
#ifdef CONFIG_40x
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+#elif defined(CONFIG_PPC_8xx)
+ mtspr SPRN_EID, r2 /* Set MSR_RI */
#else
-#ifdef CONFIG_VMAP_STACK
- li r10, MSR_KERNEL & ~MSR_IR /* can take exceptions */
-#else
- li r10,MSR_KERNEL & ~(MSR_IR|MSR_DR) /* can take exceptions */
-#endif
+ li r10, MSR_KERNEL /* can take exceptions */
mtmsr r10 /* (except for mach check in rtas) */
#endif
- stw r0,GPR0(r11)
- lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
- addi r10,r10,STACK_FRAME_REGS_MARKER@l
- stw r10,8(r11)
- SAVE_4GPRS(3, r11)
- SAVE_2GPRS(7, r11)
+ COMMON_EXCEPTION_PROLOG_END \trapno
+_ASM_NOKPROBE_SYMBOL(\name\()_virt)
.endm
-.macro SYSCALL_ENTRY trapno
- mfspr r12,SPRN_SPRG_THREAD
-#ifdef CONFIG_VMAP_STACK
- mfspr r9, SPRN_SRR0
- mfspr r11, SPRN_SRR1
- stw r9, SRR0(r12)
- stw r11, SRR1(r12)
-#endif
- mfcr r10
- lwz r11,TASK_STACK-THREAD(r12)
- rlwinm r10,r10,0,4,2 /* Clear SO bit in CR */
- addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
-#ifdef CONFIG_VMAP_STACK
- li r9, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
- mtmsr r9
- isync
-#endif
- tovirt_vmstack r12, r12
- tophys_novmstack r11, r11
- mflr r9
- stw r10,_CCR(r11) /* save registers */
- stw r9, _LINK(r11)
-#ifdef CONFIG_VMAP_STACK
- lwz r10, SRR0(r12)
- lwz r9, SRR1(r12)
-#else
- mfspr r10,SPRN_SRR0
- mfspr r9,SPRN_SRR1
-#endif
- stw r1,GPR1(r11)
- stw r1,0(r11)
- tovirt_novmstack r1, r11 /* set new kernel sp */
- stw r10,_NIP(r11)
-#ifdef CONFIG_40x
- rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
-#else
-#ifdef CONFIG_VMAP_STACK
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~MSR_IR) /* can take exceptions */
-#else
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL & ~(MSR_IR|MSR_DR)) /* can take exceptions */
-#endif
- mtmsr r10 /* (except for mach check in rtas) */
-#endif
+.macro COMMON_EXCEPTION_PROLOG_END trapno
+ stw r0,GPR0(r1)
lis r10,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
- stw r2,GPR2(r11)
addi r10,r10,STACK_FRAME_REGS_MARKER@l
- stw r9,_MSR(r11)
- li r2, \trapno + 1
- stw r10,8(r11)
- stw r2,_TRAP(r11)
- SAVE_GPR(0, r11)
- SAVE_4GPRS(3, r11)
- SAVE_2GPRS(7, r11)
- addi r11,r1,STACK_FRAME_OVERHEAD
- addi r2,r12,-THREAD
- stw r11,PT_REGS(r12)
-#if defined(CONFIG_40x)
- /* Check to see if the dbcr0 register is set up to debug. Use the
- internal debug mode bit to do this. */
- lwz r12,THREAD_DBCR0(r12)
- andis. r12,r12,DBCR0_IDM@h
-#endif
- ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
-#if defined(CONFIG_40x)
- beq+ 3f
- /* From user and task is ptraced - load up global dbcr0 */
- li r12,-1 /* clear all pending debug events */
- mtspr SPRN_DBSR,r12
- lis r11,global_dbcr0@ha
- tophys(r11,r11)
- addi r11,r11,global_dbcr0@l
- lwz r12,0(r11)
- mtspr SPRN_DBCR0,r12
- lwz r12,4(r11)
- addi r12,r12,-1
- stw r12,4(r11)
-#endif
-
-3:
- tovirt_novmstack r2, r2 /* set r2 to current */
- lis r11, transfer_to_syscall@h
- ori r11, r11, transfer_to_syscall@l
-#ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * If MSR is changing we need to keep interrupts disabled at this point
- * otherwise we might risk taking an interrupt before we tell lockdep
- * they are enabled.
- */
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL)
- rlwimi r10, r9, 0, MSR_EE
-#else
- LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE)
-#endif
-#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
- mtspr SPRN_NRI, r0
-#endif
- mtspr SPRN_SRR1,r10
- mtspr SPRN_SRR0,r11
- SYNC
- RFI /* jump to handler, enable MMU */
-.endm
-
-.macro save_dar_dsisr_on_stack reg1, reg2, sp
-#ifndef CONFIG_VMAP_STACK
- mfspr \reg1, SPRN_DAR
- mfspr \reg2, SPRN_DSISR
- stw \reg1, _DAR(\sp)
- stw \reg2, _DSISR(\sp)
-#endif
-.endm
-
-.macro get_and_save_dar_dsisr_on_stack reg1, reg2, sp
-#ifdef CONFIG_VMAP_STACK
- lwz \reg1, _DAR(\sp)
- lwz \reg2, _DSISR(\sp)
-#else
- save_dar_dsisr_on_stack \reg1, \reg2, \sp
-#endif
+ stw r10,8(r1)
+ li r10, \trapno
+ stw r10,_TRAP(r1)
+ SAVE_GPRS(3, 8, r1)
+ SAVE_NVGPRS(r1)
+ stw r2,GPR2(r1)
+ stw r12,_NIP(r1)
+ stw r9,_MSR(r1)
+ mfctr r10
+ mfspr r2,SPRN_SPRG_THREAD
+ stw r10,_CTR(r1)
+ tovirt(r2, r2)
+ mfspr r10,SPRN_XER
+ addi r2, r2, -THREAD
+ stw r10,_XER(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
.endm
-.macro tovirt_vmstack dst, src
-#ifdef CONFIG_VMAP_STACK
- tovirt(\dst, \src)
-#else
- .ifnc \dst, \src
- mr \dst, \src
- .endif
+.macro prepare_transfer_to_handler
+#ifdef CONFIG_PPC_BOOK3S_32
+ andi. r12,r9,MSR_PR
+ bne 777f
+ bl prepare_transfer_to_handler
+#ifdef CONFIG_PPC_KUEP
+ b 778f
+777:
+ bl __kuep_lock
+778:
#endif
-.endm
-
-.macro tovirt_novmstack dst, src
-#ifndef CONFIG_VMAP_STACK
- tovirt(\dst, \src)
-#else
- .ifnc \dst, \src
- mr \dst, \src
- .endif
+777:
#endif
.endm
-.macro tophys_novmstack dst, src
-#ifndef CONFIG_VMAP_STACK
- tophys(\dst, \src)
-#else
- .ifnc \dst, \src
- mr \dst, \src
- .endif
-#endif
+.macro SYSCALL_ENTRY trapno
+ mfspr r9, SPRN_SRR1
+ mfspr r12, SPRN_SRR0
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL) /* can take exceptions */
+ lis r10, 1f@h
+ ori r10, r10, 1f@l
+ mtspr SPRN_SRR1, r11
+ mtspr SPRN_SRR0, r10
+ mfspr r10,SPRN_SPRG_THREAD
+ mr r11, r1
+ lwz r1,TASK_STACK-THREAD(r10)
+ tovirt(r10, r10)
+ addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ rfi
+1:
+ stw r12,_NIP(r1)
+ mfcr r12
+ rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
+ stw r12,_CCR(r1)
+ b transfer_to_syscall /* jump to handler */
.endm
/*
@@ -291,61 +180,43 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
*/
#ifdef CONFIG_PPC_BOOK3S
#define START_EXCEPTION(n, label) \
+ __HEAD; \
. = n; \
DO_KVM n; \
label:
#else
#define START_EXCEPTION(n, label) \
+ __HEAD; \
. = n; \
label:
#endif
-#define EXCEPTION(n, label, hdlr, xfer) \
+#define EXCEPTION(n, label, hdlr) \
START_EXCEPTION(n, label) \
- EXCEPTION_PROLOG; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- xfer(n, hdlr)
-
-#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
- li r10,trap; \
- stw r10,_TRAP(r11); \
- LOAD_REG_IMMEDIATE(r10, msr); \
- bl tfer; \
- .long hdlr; \
- .long ret
-
-#define EXC_XFER_STD(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
- ret_from_except)
+ EXCEPTION_PROLOG n label; \
+ prepare_transfer_to_handler; \
+ bl hdlr; \
+ b interrupt_return
.macro vmap_stack_overflow_exception
-#ifdef CONFIG_VMAP_STACK
+ __HEAD
+vmap_stack_overflow:
#ifdef CONFIG_SMP
- mfspr r11, SPRN_SPRG_THREAD
- tovirt(r11, r11)
- lwz r11, TASK_CPU - THREAD(r11)
- slwi r11, r11, 3
- addis r11, r11, emergency_ctx@ha
+ mfspr r1, SPRN_SPRG_THREAD
+ lwz r1, TASK_CPU - THREAD(r1)
+ slwi r1, r1, 3
+ addis r1, r1, emergency_ctx-PAGE_OFFSET@ha
#else
- lis r11, emergency_ctx@ha
-#endif
- lwz r11, emergency_ctx@l(r11)
- cmpwi cr1, r11, 0
- bne cr1, 1f
- lis r11, init_thread_union@ha
- addi r11, r11, init_thread_union@l
-1: addi r11, r11, THREAD_SIZE - INT_FRAME_SIZE
- EXCEPTION_PROLOG_2
- SAVE_NVGPRS(r11)
- addi r3, r1, STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0, stack_overflow_exception)
-#endif
+ lis r1, emergency_ctx-PAGE_OFFSET@ha
+#endif
+ lwz r1, emergency_ctx-PAGE_OFFSET@l(r1)
+ addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
+ EXCEPTION_PROLOG_2 0 vmap_stack_overflow
+ prepare_transfer_to_handler
+ bl stack_overflow_exception
+ b interrupt_return
.endm
#endif /* __HEAD_32_H__ */
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 9bb663977e84..088f500896c7 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -26,17 +26,17 @@
*/
#include <linux/init.h>
+#include <linux/pgtable.h>
+#include <linux/sizes.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
#include <asm/export.h>
-#include <asm/asm-405.h>
#include "head_32.h"
@@ -53,8 +53,8 @@
* This is all going to change RSN when we add bi_recs....... -- Dan
*/
__HEAD
-_ENTRY(_stext);
-_ENTRY(_start);
+_GLOBAL(_stext);
+_GLOBAL(_start);
mr r31,r3 /* save device tree ptr */
@@ -73,7 +73,6 @@ turn_on_mmu:
lis r0,start_here@h
ori r0,r0,start_here@l
mtspr SPRN_SRR0,r0
- SYNC
rfi /* enables MMU */
b . /* prevent prefetch past rfi */
@@ -83,15 +82,19 @@ turn_on_mmu:
*/
. = 0xc0
crit_save:
-_ENTRY(crit_r10)
+_GLOBAL(crit_r10)
.space 4
-_ENTRY(crit_r11)
+_GLOBAL(crit_r11)
.space 4
-_ENTRY(crit_srr0)
+_GLOBAL(crit_srr0)
.space 4
-_ENTRY(crit_srr1)
+_GLOBAL(crit_srr1)
.space 4
-_ENTRY(saved_ksp_limit)
+_GLOBAL(crit_r1)
+ .space 4
+_GLOBAL(crit_dear)
+ .space 4
+_GLOBAL(crit_esr)
.space 4
/*
@@ -102,42 +105,62 @@ _ENTRY(saved_ksp_limit)
* Instead we use a couple of words of memory at low physical addresses.
* This is OK since we don't support SMP on these processors.
*/
-#define CRITICAL_EXCEPTION_PROLOG \
- stw r10,crit_r10@l(0); /* save two registers to work with */\
- stw r11,crit_r11@l(0); \
- mfcr r10; /* save CR in r10 for now */\
- mfspr r11,SPRN_SRR3; /* check whether user or kernel */\
- andi. r11,r11,MSR_PR; \
- lis r11,critirq_ctx@ha; \
- tophys(r11,r11); \
- lwz r11,critirq_ctx@l(r11); \
- beq 1f; \
- /* COMING FROM USER MODE */ \
- mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
- lwz r11,TASK_STACK-THREAD(r11); /* this thread's kernel stack */\
-1: addi r11,r11,THREAD_SIZE-INT_FRAME_SIZE; /* Alloc an excpt frm */\
- tophys(r11,r11); \
- stw r10,_CCR(r11); /* save various registers */\
- stw r12,GPR12(r11); \
- stw r9,GPR9(r11); \
- mflr r10; \
- stw r10,_LINK(r11); \
- mfspr r12,SPRN_DEAR; /* save DEAR and ESR in the frame */\
- stw r12,_DEAR(r11); /* since they may have had stuff */\
- mfspr r9,SPRN_ESR; /* in them at the point where the */\
- stw r9,_ESR(r11); /* exception was taken */\
- mfspr r12,SPRN_SRR2; \
- stw r1,GPR1(r11); \
- mfspr r9,SPRN_SRR3; \
- stw r1,0(r11); \
- tovirt(r1,r11); \
- rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
- stw r0,GPR0(r11); \
- lis r10, STACK_FRAME_REGS_MARKER@ha; /* exception frame marker */\
- addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
- stw r10, 8(r11); \
- SAVE_4GPRS(3, r11); \
- SAVE_2GPRS(7, r11)
+.macro CRITICAL_EXCEPTION_PROLOG trapno name
+ stw r10,crit_r10@l(0) /* save two registers to work with */
+ stw r11,crit_r11@l(0)
+ mfspr r10,SPRN_SRR0
+ mfspr r11,SPRN_SRR1
+ stw r10,crit_srr0@l(0)
+ stw r11,crit_srr1@l(0)
+ mfspr r10,SPRN_DEAR
+ mfspr r11,SPRN_ESR
+ stw r10,crit_dear@l(0)
+ stw r11,crit_esr@l(0)
+ mfcr r10 /* save CR in r10 for now */
+ mfspr r11,SPRN_SRR3 /* check whether user or kernel */
+ andi. r11,r11,MSR_PR
+ lis r11,(critirq_ctx-PAGE_OFFSET)@ha
+ lwz r11,(critirq_ctx-PAGE_OFFSET)@l(r11)
+ beq 1f
+ /* COMING FROM USER MODE */
+ mfspr r11,SPRN_SPRG_THREAD /* if from user, start at top of */
+ lwz r11,TASK_STACK-THREAD(r11) /* this thread's kernel stack */
+1: stw r1,crit_r1@l(0)
+ addi r1,r11,THREAD_SIZE-INT_FRAME_SIZE /* Alloc an excpt frm */
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)) /* re-enable MMU */
+ mtspr SPRN_SRR1, r11
+ lis r11, 1f@h
+ ori r11, r11, 1f@l
+ mtspr SPRN_SRR0, r11
+ rfi
+
+ .text
+1:
+\name\()_virt:
+ lwz r11,crit_r1@l(0)
+ stw r11,GPR1(r1)
+ stw r11,0(r1)
+ mr r11,r1
+ stw r10,_CCR(r11) /* save various registers */
+ stw r12,GPR12(r11)
+ stw r9,GPR9(r11)
+ mflr r10
+ stw r10,_LINK(r11)
+ lis r9,PAGE_OFFSET@ha
+ lwz r10,crit_r10@l(r9)
+ lwz r12,crit_r11@l(r9)
+ stw r10,GPR10(r11)
+ stw r12,GPR11(r11)
+ lwz r12,crit_dear@l(r9)
+ lwz r9,crit_esr@l(r9)
+ stw r12,_DEAR(r11) /* since they may have had stuff */
+ stw r9,_ESR(r11) /* exception was taken */
+ mfspr r12,SPRN_SRR2
+ mfspr r9,SPRN_SRR3
+ rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
+ COMMON_EXCEPTION_PROLOG_END \trapno + 2
+_ASM_NOKPROBE_SYMBOL(\name\()_virt)
+.endm
/*
* State at this point:
@@ -157,10 +180,10 @@ _ENTRY(saved_ksp_limit)
*/
#define CRITICAL_EXCEPTION(n, label, hdlr) \
START_EXCEPTION(n, label); \
- CRITICAL_EXCEPTION_PROLOG; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- crit_transfer_to_handler, ret_from_crit_exc)
+ CRITICAL_EXCEPTION_PROLOG n label; \
+ prepare_transfer_to_handler; \
+ bl hdlr; \
+ b ret_from_crit_exc
/*
* 0x0100 - Critical Interrupt Exception
@@ -176,191 +199,71 @@ _ENTRY(saved_ksp_limit)
* 0x0300 - Data Storage Exception
* This happens for just a few reasons. U0 set (but we don't do that),
* or zone protection fault (user violation, write to protected page).
- * If this is just an update of modified status, we do that quickly
- * and exit. Otherwise, we call heavywight functions to do the work.
+ * The other Data TLB exceptions bail out to this point
+ * if they can't resolve the lightweight TLB fault.
*/
START_EXCEPTION(0x0300, DataStorage)
- mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
- mtspr SPRN_SPRG_SCRATCH1, r11
-#ifdef CONFIG_403GCX
- stw r12, 0(r0)
- stw r9, 4(r0)
- mfcr r11
- mfspr r12, SPRN_PID
- stw r11, 8(r0)
- stw r12, 12(r0)
-#else
- mtspr SPRN_SPRG_SCRATCH3, r12
- mtspr SPRN_SPRG_SCRATCH4, r9
- mfcr r11
- mfspr r12, SPRN_PID
- mtspr SPRN_SPRG_SCRATCH6, r11
- mtspr SPRN_SPRG_SCRATCH5, r12
-#endif
-
- /* First, check if it was a zone fault (which means a user
- * tried to access a kernel or read-protected page - always
- * a SEGV). All other faults here must be stores, so no
- * need to check ESR_DST as well. */
- mfspr r10, SPRN_ESR
- andis. r10, r10, ESR_DIZ@h
- bne 2f
-
- mfspr r10, SPRN_DEAR /* Get faulting address */
-
- /* If we are faulting a kernel address, we have to use the
- * kernel page tables.
- */
- lis r11, PAGE_OFFSET@h
- cmplw r10, r11
- blt+ 3f
- lis r11, swapper_pg_dir@h
- ori r11, r11, swapper_pg_dir@l
- li r9, 0
- mtspr SPRN_PID, r9 /* TLB will have 0 TID */
- b 4f
-
- /* Get the PGD for the current thread.
- */
-3:
- mfspr r11,SPRN_SPRG_THREAD
- lwz r11,PGDIR(r11)
-4:
- tophys(r11, r11)
- rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
- lwz r11, 0(r11) /* Get L1 entry */
- rlwinm. r12, r11, 0, 0, 19 /* Extract L2 (pte) base address */
- beq 2f /* Bail if no table */
-
- rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
- lwz r11, 0(r12) /* Get Linux PTE */
-
- andi. r9, r11, _PAGE_RW /* Is it writeable? */
- beq 2f /* Bail if not */
-
- /* Update 'changed'.
- */
- ori r11, r11, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
- stw r11, 0(r12) /* Update Linux page table */
-
- /* Most of the Linux PTE is ready to load into the TLB LO.
- * We set ZSEL, where only the LS-bit determines user access.
- * We set execute, because we don't have the granularity to
- * properly set this at the page level (Linux problem).
- * If shared is set, we cause a zero PID->TID load.
- * Many of these bits are software only. Bits we don't set
- * here we (properly should) assume have the appropriate value.
- */
- li r12, 0x0ce2
- andc r11, r11, r12 /* Make sure 20, 21 are zero */
-
- /* find the TLB index that caused the fault. It has to be here.
- */
- tlbsx r9, 0, r10
-
- tlbwe r11, r9, TLB_DATA /* Load TLB LO */
-
- /* Done...restore registers and get out of here.
- */
-#ifdef CONFIG_403GCX
- lwz r12, 12(r0)
- lwz r11, 8(r0)
- mtspr SPRN_PID, r12
- mtcr r11
- lwz r9, 4(r0)
- lwz r12, 0(r0)
-#else
- mfspr r12, SPRN_SPRG_SCRATCH5
- mfspr r11, SPRN_SPRG_SCRATCH6
- mtspr SPRN_PID, r12
- mtcr r11
- mfspr r9, SPRN_SPRG_SCRATCH4
- mfspr r12, SPRN_SPRG_SCRATCH3
-#endif
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r10, SPRN_SPRG_SCRATCH0
- PPC405_ERR77_SYNC
- rfi /* Should sync shadow TLBs */
- b . /* prevent prefetch past rfi */
-
-2:
- /* The bailout. Restore registers to pre-exception conditions
- * and call the heavyweights to help us out.
- */
-#ifdef CONFIG_403GCX
- lwz r12, 12(r0)
- lwz r11, 8(r0)
- mtspr SPRN_PID, r12
- mtcr r11
- lwz r9, 4(r0)
- lwz r12, 0(r0)
-#else
- mfspr r12, SPRN_SPRG_SCRATCH5
- mfspr r11, SPRN_SPRG_SCRATCH6
- mtspr SPRN_PID, r12
- mtcr r11
- mfspr r9, SPRN_SPRG_SCRATCH4
- mfspr r12, SPRN_SPRG_SCRATCH3
-#endif
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r10, SPRN_SPRG_SCRATCH0
- b DataAccess
+ EXCEPTION_PROLOG 0x300 DataStorage handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
/*
* 0x0400 - Instruction Storage Exception
* This is caused by a fetch from non-execute or guarded pages.
*/
START_EXCEPTION(0x0400, InstructionAccess)
- EXCEPTION_PROLOG
- mr r4,r12 /* Pass SRR0 as arg2 */
- stw r4, _DEAR(r11)
- li r5,0 /* Pass zero as arg3 */
- EXC_XFER_LITE(0x400, handle_page_fault)
+ EXCEPTION_PROLOG 0x400 InstructionAccess
+ li r5,0
+ stw r5, _ESR(r11) /* Zero ESR */
+ stw r12, _DEAR(r11) /* SRR0 as DEAR */
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
/* 0x0500 - External Interrupt Exception */
- EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, HardwareInterrupt, do_IRQ)
/* 0x0600 - Alignment Exception */
START_EXCEPTION(0x0600, Alignment)
- EXCEPTION_PROLOG
- mfspr r4,SPRN_DEAR /* Grab the DEAR and save it */
- stw r4,_DEAR(r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x600, alignment_exception)
+ EXCEPTION_PROLOG 0x600 Alignment handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl alignment_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
/* 0x0700 - Program Exception */
START_EXCEPTION(0x0700, ProgramCheck)
- EXCEPTION_PROLOG
- mfspr r4,SPRN_ESR /* Grab the ESR and save it */
- stw r4,_ESR(r11)
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x700, program_check_exception)
+ EXCEPTION_PROLOG 0x700 ProgramCheck handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl program_check_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
- EXCEPTION(0x0800, Trap_08, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x0900, Trap_09, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x0A00, Trap_0A, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x0B00, Trap_0B, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x0800, Trap_08, unknown_exception)
+ EXCEPTION(0x0900, Trap_09, unknown_exception)
+ EXCEPTION(0x0A00, Trap_0A, unknown_exception)
+ EXCEPTION(0x0B00, Trap_0B, unknown_exception)
/* 0x0C00 - System Call Exception */
START_EXCEPTION(0x0C00, SystemCall)
SYSCALL_ENTRY 0xc00
+/* Trap_0D is commented out to get more space for system call exception */
- EXCEPTION(0x0D00, Trap_0D, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x0E00, Trap_0E, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x0F00, Trap_0F, unknown_exception, EXC_XFER_STD)
+/* EXCEPTION(0x0D00, Trap_0D, unknown_exception) */
+ EXCEPTION(0x0E00, Trap_0E, unknown_exception)
+ EXCEPTION(0x0F00, Trap_0F, unknown_exception)
/* 0x1000 - Programmable Interval Timer (PIT) Exception */
- . = 0x1000
+ START_EXCEPTION(0x1000, DecrementerTrap)
b Decrementer
-/* 0x1010 - Fixed Interval Timer (FIT) Exception
-*/
- . = 0x1010
+/* 0x1010 - Fixed Interval Timer (FIT) Exception */
+ START_EXCEPTION(0x1010, FITExceptionTrap)
b FITException
-/* 0x1020 - Watchdog Timer (WDT) Exception
-*/
- . = 0x1020
+/* 0x1020 - Watchdog Timer (WDT) Exception */
+ START_EXCEPTION(0x1020, WDTExceptionTrap)
b WDTException
/* 0x1100 - Data TLB Miss Exception
@@ -369,23 +272,13 @@ _ENTRY(saved_ksp_limit)
* load TLB entries from the page table if they exist.
*/
START_EXCEPTION(0x1100, DTLBMiss)
- mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
- mtspr SPRN_SPRG_SCRATCH1, r11
-#ifdef CONFIG_403GCX
- stw r12, 0(r0)
- stw r9, 4(r0)
- mfcr r11
- mfspr r12, SPRN_PID
- stw r11, 8(r0)
- stw r12, 12(r0)
-#else
+ mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */
+ mtspr SPRN_SPRG_SCRATCH6, r11
mtspr SPRN_SPRG_SCRATCH3, r12
mtspr SPRN_SPRG_SCRATCH4, r9
- mfcr r11
- mfspr r12, SPRN_PID
- mtspr SPRN_SPRG_SCRATCH6, r11
- mtspr SPRN_SPRG_SCRATCH5, r12
-#endif
+ mfcr r12
+ mfspr r9, SPRN_PID
+ rlwimi r12, r9, 0, 0xff
mfspr r10, SPRN_DEAR /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -405,31 +298,37 @@ _ENTRY(saved_ksp_limit)
3:
mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
+#ifdef CONFIG_PPC_KUAP
+ rlwinm. r9, r9, 0, 0xff
+ beq 5f /* Kuap fault */
+#endif
4:
tophys(r11, r11)
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
- lwz r12, 0(r11) /* Get L1 entry */
- andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
+ lwz r11, 0(r11) /* Get L1 entry */
+ andi. r9, r11, _PMD_PRESENT /* Check if it points to a PTE page */
beq 2f /* Bail if no table */
- rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
- lwz r11, 0(r12) /* Get Linux PTE */
- andi. r9, r11, _PAGE_PRESENT
- beq 5f
+ rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
+ lwz r11, 0(r11) /* Get Linux PTE */
+ li r9, _PAGE_PRESENT | _PAGE_ACCESSED
+ andc. r9, r9, r11 /* Check permission */
+ bne 5f
- ori r11, r11, _PAGE_ACCESSED
- stw r11, 0(r12)
+ rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */
+ and r9, r9, r11 /* hwwrite = dirty & rw */
+ rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */
/* Create TLB tag. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0.
*/
- li r12, 0x00c0
- rlwimi r10, r12, 0, 20, 31
+ li r9, 0x00c0
+ rlwimi r10, r9, 0, 20, 31
b finish_tlb_load
2: /* Check for possible large-page pmd entry */
- rlwinm. r9, r12, 2, 22, 24
+ rlwinm. r9, r11, 2, 22, 24
beq 5f
/* Create TLB tag. This is the faulting address, plus a static
@@ -437,7 +336,6 @@ _ENTRY(saved_ksp_limit)
*/
ori r9, r9, 0x40
rlwimi r10, r9, 0, 20, 31
- mr r11, r12
b finish_tlb_load
@@ -445,47 +343,26 @@ _ENTRY(saved_ksp_limit)
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
-#ifdef CONFIG_403GCX
- lwz r12, 12(r0)
- lwz r11, 8(r0)
- mtspr SPRN_PID, r12
- mtcr r11
- lwz r9, 4(r0)
- lwz r12, 0(r0)
-#else
- mfspr r12, SPRN_SPRG_SCRATCH5
- mfspr r11, SPRN_SPRG_SCRATCH6
mtspr SPRN_PID, r12
- mtcr r11
+ mtcrf 0x80, r12
mfspr r9, SPRN_SPRG_SCRATCH4
mfspr r12, SPRN_SPRG_SCRATCH3
-#endif
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r10, SPRN_SPRG_SCRATCH0
- b DataAccess
+ mfspr r11, SPRN_SPRG_SCRATCH6
+ mfspr r10, SPRN_SPRG_SCRATCH5
+ b DataStorage
/* 0x1200 - Instruction TLB Miss Exception
* Nearly the same as above, except we get our information from different
* registers and bailout to a different point.
*/
START_EXCEPTION(0x1200, ITLBMiss)
- mtspr SPRN_SPRG_SCRATCH0, r10 /* Save some working registers */
- mtspr SPRN_SPRG_SCRATCH1, r11
-#ifdef CONFIG_403GCX
- stw r12, 0(r0)
- stw r9, 4(r0)
- mfcr r11
- mfspr r12, SPRN_PID
- stw r11, 8(r0)
- stw r12, 12(r0)
-#else
+ mtspr SPRN_SPRG_SCRATCH5, r10 /* Save some working registers */
+ mtspr SPRN_SPRG_SCRATCH6, r11
mtspr SPRN_SPRG_SCRATCH3, r12
mtspr SPRN_SPRG_SCRATCH4, r9
- mfcr r11
- mfspr r12, SPRN_PID
- mtspr SPRN_SPRG_SCRATCH6, r11
- mtspr SPRN_SPRG_SCRATCH5, r12
-#endif
+ mfcr r12
+ mfspr r9, SPRN_PID
+ rlwimi r12, r9, 0, 0xff
mfspr r10, SPRN_SRR0 /* Get faulting address */
/* If we are faulting a kernel address, we have to use the
@@ -505,31 +382,37 @@ _ENTRY(saved_ksp_limit)
3:
mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
+#ifdef CONFIG_PPC_KUAP
+ rlwinm. r9, r9, 0, 0xff
+ beq 5f /* Kuap fault */
+#endif
4:
tophys(r11, r11)
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
- lwz r12, 0(r11) /* Get L1 entry */
- andi. r9, r12, _PMD_PRESENT /* Check if it points to a PTE page */
+ lwz r11, 0(r11) /* Get L1 entry */
+ andi. r9, r11, _PMD_PRESENT /* Check if it points to a PTE page */
beq 2f /* Bail if no table */
- rlwimi r12, r10, 22, 20, 29 /* Compute PTE address */
- lwz r11, 0(r12) /* Get Linux PTE */
- andi. r9, r11, _PAGE_PRESENT
- beq 5f
+ rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */
+ lwz r11, 0(r11) /* Get Linux PTE */
+ li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
+ andc. r9, r9, r11 /* Check permission */
+ bne 5f
- ori r11, r11, _PAGE_ACCESSED
- stw r11, 0(r12)
+ rlwinm r9, r11, 1, _PAGE_RW /* dirty => rw */
+ and r9, r9, r11 /* hwwrite = dirty & rw */
+ rlwimi r11, r9, 0, _PAGE_RW /* replace rw by hwwrite */
/* Create TLB tag. This is the faulting address plus a static
* set of bits. These are size, valid, E, U0.
*/
- li r12, 0x00c0
- rlwimi r10, r12, 0, 20, 31
+ li r9, 0x00c0
+ rlwimi r10, r9, 0, 20, 31
b finish_tlb_load
2: /* Check for possible large-page pmd entry */
- rlwinm. r9, r12, 2, 22, 24
+ rlwinm. r9, r11, 2, 22, 24
beq 5f
/* Create TLB tag. This is the faulting address, plus a static
@@ -537,7 +420,6 @@ _ENTRY(saved_ksp_limit)
*/
ori r9, r9, 0x40
rlwimi r10, r9, 0, 20, 31
- mr r11, r12
b finish_tlb_load
@@ -545,44 +427,27 @@ _ENTRY(saved_ksp_limit)
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
*/
-#ifdef CONFIG_403GCX
- lwz r12, 12(r0)
- lwz r11, 8(r0)
- mtspr SPRN_PID, r12
- mtcr r11
- lwz r9, 4(r0)
- lwz r12, 0(r0)
-#else
- mfspr r12, SPRN_SPRG_SCRATCH5
- mfspr r11, SPRN_SPRG_SCRATCH6
mtspr SPRN_PID, r12
- mtcr r11
+ mtcrf 0x80, r12
mfspr r9, SPRN_SPRG_SCRATCH4
mfspr r12, SPRN_SPRG_SCRATCH3
-#endif
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r10, SPRN_SPRG_SCRATCH0
+ mfspr r11, SPRN_SPRG_SCRATCH6
+ mfspr r10, SPRN_SPRG_SCRATCH5
b InstructionAccess
- EXCEPTION(0x1300, Trap_13, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1400, Trap_14, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_STD)
-#ifdef CONFIG_IBM405_ERR51
- /* 405GP errata 51 */
- START_EXCEPTION(0x1700, Trap_17)
- b DTLBMiss
-#else
- EXCEPTION(0x1700, Trap_17, unknown_exception, EXC_XFER_STD)
-#endif
- EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1A00, Trap_1A, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1B00, Trap_1B, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1C00, Trap_1C, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1D00, Trap_1D, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1E00, Trap_1E, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1F00, Trap_1F, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1300, Trap_13, unknown_exception)
+ EXCEPTION(0x1400, Trap_14, unknown_exception)
+ EXCEPTION(0x1500, Trap_15, unknown_exception)
+ EXCEPTION(0x1600, Trap_16, unknown_exception)
+ EXCEPTION(0x1700, Trap_17, unknown_exception)
+ EXCEPTION(0x1800, Trap_18, unknown_exception)
+ EXCEPTION(0x1900, Trap_19, unknown_exception)
+ EXCEPTION(0x1A00, Trap_1A, unknown_exception)
+ EXCEPTION(0x1B00, Trap_1B, unknown_exception)
+ EXCEPTION(0x1C00, Trap_1C, unknown_exception)
+ EXCEPTION(0x1D00, Trap_1D, unknown_exception)
+ EXCEPTION(0x1E00, Trap_1E, unknown_exception)
+ EXCEPTION(0x1F00, Trap_1F, unknown_exception)
/* Check for a single step debug exception while in an exception
* handler before state has been saved. This is to catch the case
@@ -599,7 +464,7 @@ _ENTRY(saved_ksp_limit)
*/
/* 0x2000 - Debug Exception */
START_EXCEPTION(0x2000, DebugTrap)
- CRITICAL_EXCEPTION_PROLOG
+ CRITICAL_EXCEPTION_PROLOG 0x2000 DebugTrap
/*
* If this is a single step or branch-taken exception in an
@@ -635,50 +500,41 @@ _ENTRY(saved_ksp_limit)
lwz r12,GPR12(r11)
lwz r10,crit_r10@l(0)
lwz r11,crit_r11@l(0)
- PPC405_ERR77_SYNC
rfci
b .
/* continue normal handling for a critical exception... */
2: mfspr r4,SPRN_DBSR
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_TEMPLATE(DebugException, 0x2002, \
- (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- crit_transfer_to_handler, ret_from_crit_exc)
+ stw r4,_ESR(r11) /* DebugException takes DBSR in _ESR */
+ prepare_transfer_to_handler
+ bl DebugException
+ b ret_from_crit_exc
/* Programmable Interval Timer (PIT) Exception. (from 0x1000) */
+ __HEAD
Decrementer:
- EXCEPTION_PROLOG
+ EXCEPTION_PROLOG 0x1000 Decrementer
lis r0,TSR_PIS@h
mtspr SPRN_TSR,r0 /* Clear the PIT exception */
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0x1000, timer_interrupt)
+ prepare_transfer_to_handler
+ bl timer_interrupt
+ b interrupt_return
/* Fixed Interval Timer (FIT) Exception. (from 0x1010) */
+ __HEAD
FITException:
- EXCEPTION_PROLOG
- addi r3,r1,STACK_FRAME_OVERHEAD;
- EXC_XFER_STD(0x1010, unknown_exception)
+ EXCEPTION_PROLOG 0x1010 FITException
+ prepare_transfer_to_handler
+ bl unknown_exception
+ b interrupt_return
/* Watchdog Timer (WDT) Exception. (from 0x1020) */
+ __HEAD
WDTException:
- CRITICAL_EXCEPTION_PROLOG;
- addi r3,r1,STACK_FRAME_OVERHEAD;
- EXC_XFER_TEMPLATE(WatchdogException, 0x1020+2,
- (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)),
- crit_transfer_to_handler, ret_from_crit_exc)
-
-/*
- * The other Data TLB exceptions bail out to this point
- * if they can't resolve the lightweight TLB fault.
- */
-DataAccess:
- EXCEPTION_PROLOG
- mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
- stw r5,_ESR(r11)
- mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
- stw r4, _DEAR(r11)
- EXC_XFER_LITE(0x300, handle_page_fault)
+ CRITICAL_EXCEPTION_PROLOG 0x1020 WDTException
+ prepare_transfer_to_handler
+ bl WatchdogException
+ b ret_from_crit_exc
/* Other PowerPC processors, namely those derived from the 6xx-series
* have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
@@ -686,12 +542,13 @@ DataAccess:
* reserved.
*/
+ __HEAD
/* Damn, I came up one instruction too many to fit into the
* exception space :-). Both the instruction and data TLB
* miss get to this point to load the TLB.
* r10 - TLB_TAG value
* r11 - Linux PTE
- * r12, r9 - available to use
+ * r9 - available to use
* PID - loaded with proper value when we get here
* Upon exit, we reload everything and RFI.
* Actually, it will fit now, but oh well.....a common place
@@ -700,45 +557,31 @@ DataAccess:
tlb_4xx_index:
.long 0
finish_tlb_load:
- /* load the next available TLB index.
- */
- lwz r9, tlb_4xx_index@l(0)
- addi r9, r9, 1
- andi. r9, r9, (PPC40X_TLB_SIZE-1)
- stw r9, tlb_4xx_index@l(0)
-
-6:
/*
* Clear out the software-only bits in the PTE to generate the
* TLB_DATA value. These are the bottom 2 bits of the RPM, the
* top 3 bits of the zone field, and M.
*/
- li r12, 0x0ce2
- andc r11, r11, r12
+ li r9, 0x0ce2
+ andc r11, r11, r9
+
+ /* load the next available TLB index. */
+ lwz r9, tlb_4xx_index@l(0)
+ addi r9, r9, 1
+ andi. r9, r9, PPC40X_TLB_SIZE - 1
+ stw r9, tlb_4xx_index@l(0)
tlbwe r11, r9, TLB_DATA /* Load TLB LO */
tlbwe r10, r9, TLB_TAG /* Load TLB HI */
/* Done...restore registers and get out of here.
*/
-#ifdef CONFIG_403GCX
- lwz r12, 12(r0)
- lwz r11, 8(r0)
- mtspr SPRN_PID, r12
- mtcr r11
- lwz r9, 4(r0)
- lwz r12, 0(r0)
-#else
- mfspr r12, SPRN_SPRG_SCRATCH5
- mfspr r11, SPRN_SPRG_SCRATCH6
mtspr SPRN_PID, r12
- mtcr r11
+ mtcrf 0x80, r12
mfspr r9, SPRN_SPRG_SCRATCH4
mfspr r12, SPRN_SPRG_SCRATCH3
-#endif
- mfspr r11, SPRN_SPRG_SCRATCH1
- mfspr r10, SPRN_SPRG_SCRATCH0
- PPC405_ERR77_SYNC
+ mfspr r11, SPRN_SPRG_SCRATCH6
+ mfspr r10, SPRN_SPRG_SCRATCH5
rfi /* Should sync shadow TLBs */
b . /* prevent prefetch past rfi */
@@ -801,7 +644,7 @@ start_here:
ori r6, r6, swapper_pg_dir@l
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
- stw r5, 0xf0(r0) /* Must match your Abatron config file */
+ stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5)
stw r6, 0(r5)
@@ -816,7 +659,7 @@ start_here:
b . /* prevent prefetch past rfi */
/* Set up the initial MMU state so we can do the first level of
- * kernel initialization. This maps the first 16 MBytes of memory 1:1
+ * kernel initialization. This maps the first 32 MBytes of memory 1:1
* virtual to physical and more importantly sets the cache mode.
*/
initial_mmu:
@@ -853,6 +696,12 @@ initial_mmu:
tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
+ li r0,62 /* TLB slot 62 */
+ addis r4,r4,SZ_16M@h
+ addis r3,r3,SZ_16M@h
+ tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
+ tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
+
isync
/* Establish the exception vector base
@@ -867,39 +716,3 @@ _GLOBAL(abort)
mfspr r13,SPRN_DBCR0
oris r13,r13,DBCR0_RST_SYSTEM@h
mtspr SPRN_DBCR0,r13
-
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@ha
- stw r4, abatron_pteptrs@l + 0x4(r5)
-#endif
- sync
- mtspr SPRN_PID,r3
- isync /* Need an isync to flush shadow */
- /* TLBs after changing PID */
- blr
-
-/* We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align 12
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 51dd01a27314..f15cb9fdb692 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -25,10 +25,10 @@
*/
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
@@ -52,8 +52,8 @@
*
*/
__HEAD
-_ENTRY(_stext);
-_ENTRY(_start);
+_GLOBAL(_stext);
+_GLOBAL(_start);
/*
* Reserve a word at a fixed location to store the address
* of abatron_pteptrs
@@ -70,7 +70,7 @@ _ENTRY(_start);
* address.
* r21 will be loaded with the physical runtime address of _stext
*/
- bl 0f /* Get our runtime address */
+ bcl 20,31,$+4 /* Get our runtime address */
0: mflr r21 /* Make it accessible */
addis r21,r21,(_stext - 0b)@ha
addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */
@@ -263,8 +263,7 @@ interrupt_base:
INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */
- EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
- do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, do_IRQ)
/* Alignment Interrupt */
ALIGNMENT_EXCEPTION
@@ -277,7 +276,7 @@ interrupt_base:
FP_UNAVAILABLE_EXCEPTION
#else
EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
- FloatingPointUnavailable, unknown_exception, EXC_XFER_STD)
+ FloatingPointUnavailable, unknown_exception)
#endif
/* System Call Interrupt */
START_EXCEPTION(SystemCall)
@@ -285,15 +284,14 @@ interrupt_base:
/* Auxiliary Processor Unavailable Interrupt */
EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
- AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_STD)
+ AuxillaryProcessorUnavailable, unknown_exception)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, unknown_exception)
/* Watchdog Timer Interrupt */
/* TODO: Add watchdog support */
@@ -336,6 +334,10 @@ interrupt_base:
mfspr r12,SPRN_MMUCR
mfspr r13,SPRN_PID /* Get PID */
rlwimi r12,r13,0,24,31 /* Set TID */
+#ifdef CONFIG_PPC_KUAP
+ cmpwi r13,0
+ beq 2f /* KUAP Fault */
+#endif
4:
mtspr SPRN_MMUCR,r12
@@ -376,7 +378,7 @@ interrupt_base:
/* Load the next available TLB index */
lwz r13,tlb_44x_index@l(r10)
- bne 2f /* Bail if permission mismach */
+ bne 2f /* Bail if permission mismatch */
/* Increment, rollover, and store TLB index */
addi r13,r13,1
@@ -446,6 +448,10 @@ interrupt_base:
mfspr r12,SPRN_MMUCR
mfspr r13,SPRN_PID /* Get PID */
rlwimi r12,r13,0,24,31 /* Set TID */
+#ifdef CONFIG_PPC_KUAP
+ cmpwi r13,0
+ beq 2f /* KUAP Fault */
+#endif
4:
mtspr SPRN_MMUCR,r12
@@ -471,7 +477,7 @@ interrupt_base:
/* Load the next available TLB index */
lwz r13,tlb_44x_index@l(r10)
- bne 2f /* Bail if permission mismach */
+ bne 2f /* Bail if permission mismatch */
/* Increment, rollover, and store TLB index */
addi r13,r13,1
@@ -534,6 +540,7 @@ finish_tlb_load_44x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
+ rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */
1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
@@ -573,6 +580,10 @@ finish_tlb_load_44x:
3: mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
mfspr r12,SPRN_PID /* Get PID */
+#ifdef CONFIG_PPC_KUAP
+ cmpwi r12,0
+ beq 2f /* KUAP Fault */
+#endif
4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
/* Mask of required permission bits. Note that while we
@@ -670,6 +681,10 @@ finish_tlb_load_44x:
3: mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
mfspr r12,SPRN_PID /* Get PID */
+#ifdef CONFIG_PPC_KUAP
+ cmpwi r12,0
+ beq 2f /* KUAP Fault */
+#endif
4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
/* Make up the required permissions */
@@ -745,6 +760,7 @@ finish_tlb_load_47x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
+ rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */
1: tlbwe r11,r13,2
/* Done...restore registers and get out of here.
@@ -782,20 +798,6 @@ _GLOBAL(__fixup_440A_mcheck)
sync
blr
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
-
/*
* Init CPU state. This is called at boot time or for secondary CPUs
* to setup initial TLB entries, setup IVORs, etc...
@@ -861,7 +863,7 @@ _GLOBAL(init_cpu_state)
wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
sync
- bl invstr /* Find our address */
+ bcl 20,31,$+4 /* Find our address */
invstr: mflr r5 /* Make it accessible */
tlbsx r23,0,r5 /* Find entry we are in */
li r4,0 /* Start at TLB entry 0 */
@@ -1053,7 +1055,7 @@ head_start_47x:
sync
/* Find the entry we are running from */
- bl 1f
+ bcl 20,31,$+4
1: mflr r23
tlbsx r23,0,r23
tlbre r24,r23,0
@@ -1241,34 +1243,8 @@ head_start_common:
isync
blr
-/*
- * We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align PAGE_SHIFT
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
-
-/*
- * To support >32-bit physical addresses, we use an 8KB pgdir.
- */
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/*
- * Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
-
#ifdef CONFIG_SMP
+ .data
.align 12
temp_boot_stack:
.space 1024
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index ad79fddb974d..dedcc6fe2263 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -41,6 +41,11 @@
#include <asm/ppc-opcode.h>
#include <asm/export.h>
#include <asm/feature-fixups.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/exception-64s.h>
+#else
+#include <asm/exception-64e.h>
+#endif
/* The physical memory is laid out such that the secondary processor
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -106,7 +111,7 @@ __secondary_hold_acknowledge:
#ifdef CONFIG_RELOCATABLE
/* This flag is set to 1 by a loader if the kernel should run
* at the loaded address instead of the linked address. This
- * is used by kexec-tools to keep the the kdump kernel in the
+ * is used by kexec-tools to keep the kdump kernel in the
* crash_kernel region. The loader is responsible for
* observing the alignment requirement.
*/
@@ -121,7 +126,7 @@ __secondary_hold_acknowledge:
. = 0x5c
.globl __run_at_load
__run_at_load:
-DEFINE_FIXED_SYMBOL(__run_at_load)
+DEFINE_FIXED_SYMBOL(__run_at_load, first_256B)
.long RUN_AT_LOAD_DEFAULT
#endif
@@ -138,7 +143,7 @@ DEFINE_FIXED_SYMBOL(__run_at_load)
.globl __secondary_hold
__secondary_hold:
FIXUP_ENDIAN
-#ifndef CONFIG_PPC_BOOK3E
+#ifndef CONFIG_PPC_BOOK3E_64
mfmsr r24
ori r24,r24,MSR_RI
mtmsrd r24 /* RI on */
@@ -151,20 +156,20 @@ __secondary_hold:
/* Tell the master cpu we're here */
/* Relocation is off & we are located at an address less */
/* than 0x100, so only need to grab low order offset. */
- std r24,(ABS_ADDR(__secondary_hold_acknowledge))(0)
+ std r24,(ABS_ADDR(__secondary_hold_acknowledge, first_256B))(0)
sync
li r26,0
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
tovirt(r26,r26)
#endif
/* All secondary cpus wait here until told to start. */
-100: ld r12,(ABS_ADDR(__secondary_hold_spinloop))(r26)
+100: ld r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(r26)
cmpdi 0,r12,0
beq 100b
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
tovirt(r12,r12)
#endif
mtctr r12
@@ -173,7 +178,7 @@ __secondary_hold:
* it may be the case that other platforms have r4 right to
* begin with, this gives us some safety in case it is not
*/
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
mr r4,r25
#else
li r4,0
@@ -187,12 +192,6 @@ __secondary_hold:
#endif
CLOSE_FIXED_SECTION(first_256B)
-/* This value is used to mark exception frames on the stack. */
- .section ".toc","aw"
-exception_marker:
- .tc ID_72656773_68657265[TC],0x7265677368657265
- .previous
-
/*
* On server, we include the exception vectors code here as it
* relies on absolute addressing which is only possible within
@@ -206,7 +205,9 @@ OPEN_TEXT_SECTION(0x100)
USE_TEXT_SECTION()
-#ifdef CONFIG_PPC_BOOK3E
+#include "interrupt_64.S"
+
+#ifdef CONFIG_PPC_BOOK3E_64
/*
* The booting_thread_hwid holds the thread id we want to boot in cpu
* hotplug case. It is set by cpu hotplug code, and is invalid by default.
@@ -300,9 +301,6 @@ _GLOBAL(fsl_secondary_thread_init)
rlwimi r3, r3, 30, 2, 30
mtspr SPRN_PIR, r3
1:
-#endif
-
-_GLOBAL(generic_secondary_thread_init)
mr r24,r3
/* turn on 64-bit mode */
@@ -312,13 +310,13 @@ _GLOBAL(generic_secondary_thread_init)
bl relative_toc
tovirt(r2,r2)
-#ifdef CONFIG_PPC_BOOK3E
/* Book3E initialization */
mr r3,r24
bl book3e_secondary_thread_init
-#endif
b generic_secondary_common_init
+#endif /* CONFIG_PPC_BOOK3E_64 */
+
/*
* On pSeries and most other platforms, secondary processors spin
* in the following code.
@@ -340,7 +338,7 @@ _GLOBAL(generic_secondary_smp_init)
bl relative_toc
tovirt(r2,r2)
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* Book3E initialization */
mr r3,r24
mr r4,r25
@@ -395,8 +393,12 @@ generic_secondary_common_init:
#else
LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */
ld r8,0(r8) /* Get base vaddr of array */
+#if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS)
+ LOAD_REG_IMMEDIATE(r7, NR_CPUS)
+#else
LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
lwz r7,0(r7) /* also the max paca allocated */
+#endif
li r5,0 /* logical cpu id */
1:
sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */
@@ -412,7 +414,7 @@ generic_secondary_common_init:
b kexec_wait /* next kernel might do better */
2: SET_PACA(r13)
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */
mtspr SPRN_SPRG_TLB_EXFRAME,r12
#endif
@@ -420,13 +422,17 @@ generic_secondary_common_init:
/* From now on, r24 is expected to be logical cpuid */
mr r24,r5
+ /* Create a temp kernel stack for use before relocation is on. */
+ ld r1,PACAEMERGSP(r13)
+ subi r1,r1,STACK_FRAME_OVERHEAD
+
/* See if we need to call a cpu state restore handler */
LOAD_REG_ADDR(r23, cur_cpu_spec)
ld r23,0(r23)
ld r12,CPU_SPEC_RESTORE(r23)
cmpdi 0,r12,0
beq 3f
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
ld r12,0(r12)
#endif
mtctr r12
@@ -448,10 +454,6 @@ generic_secondary_common_init:
sync /* order paca.run and cur_cpu_spec */
isync /* In case code patching happened */
- /* Create a temp kernel stack for use before relocation is on. */
- ld r1,PACAEMERGSP(r13)
- subi r1,r1,STACK_FRAME_OVERHEAD
-
b __secondary_start
#endif /* SMP */
@@ -489,6 +491,9 @@ __start_initialization_multiplatform:
/* Make sure we are running in 64 bits mode */
bl enable_64b_mode
+ /* Zero r13 (paca) so early program check / mce don't use it */
+ li r13,0
+
/* Get TOC pointer (current runtime address) */
bl relative_toc
@@ -514,7 +519,7 @@ __start_initialization_multiplatform:
mr r29,r9
#endif
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
bl start_initialization_book3e
b __after_prom_start
#else
@@ -535,8 +540,9 @@ __start_initialization_multiplatform:
/* Switch off MMU if not already off */
bl __mmu_off
b __after_prom_start
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_BOOK3E_64 */
+__REF
__boot_from_prom:
#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
/* Save parameters */
@@ -574,17 +580,18 @@ __boot_from_prom:
/* We never return. We also hit that trap if trying to boot
* from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
trap
+ .previous
__after_prom_start:
#ifdef CONFIG_RELOCATABLE
/* process relocations for the final address of the kernel */
lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
sldi r25,r25,32
-#if defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3E_64)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif
lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
-#if defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3E_64)
tophys(r26,r26)
#endif
cmplwi cr0,r7,1 /* flagged to stay where we are ? */
@@ -592,7 +599,7 @@ __after_prom_start:
add r25,r25,r26
1: mr r3,r25
bl relocate
-#if defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3E_64)
/* IVPR needs to be set after relocation. */
bl init_core_book3e
#endif
@@ -606,11 +613,11 @@ __after_prom_start:
* Note: This process overwrites the OF exception vectors.
*/
li r3,0 /* target addr */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */
#endif
mr. r4,r26 /* In some cases the loader may */
-#if defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3E_64)
tovirt(r4,r4)
#endif
beq 9f /* have already put us at zero */
@@ -623,14 +630,14 @@ __after_prom_start:
* variable __run_at_load, if it is set the kernel is treated as relocatable
* kernel, otherwise it will be moved to PHYSICAL_START
*/
-#if defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3E_64)
tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
#endif
lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
cmplwi cr0,r7,1
bne 3f
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
LOAD_REG_ADDR(r5, __end_interrupts)
LOAD_REG_ADDR(r11, _stext)
sub r5,r5,r11
@@ -642,15 +649,15 @@ __after_prom_start:
3:
#endif
/* # bytes of memory to copy */
- lis r5,(ABS_ADDR(copy_to_here))@ha
- addi r5,r5,(ABS_ADDR(copy_to_here))@l
+ lis r5,(ABS_ADDR(copy_to_here, text))@ha
+ addi r5,r5,(ABS_ADDR(copy_to_here, text))@l
bl copy_and_flush /* copy the first n bytes */
/* this includes the code being */
/* executed here. */
/* Jump to the copy of this code that we just made */
- addis r8,r3,(ABS_ADDR(4f))@ha
- addi r12,r8,(ABS_ADDR(4f))@l
+ addis r8,r3,(ABS_ADDR(4f, text))@ha
+ addi r12,r8,(ABS_ADDR(4f, text))@l
mtctr r12
bctr
@@ -662,8 +669,8 @@ p_end: .8byte _end - copy_to_here
* Now copy the rest of the kernel up to _end, add
* _end - copy_to_here to the copy limit and run again.
*/
- addis r8,r26,(ABS_ADDR(p_end))@ha
- ld r8,(ABS_ADDR(p_end))@l(r8)
+ addis r8,r26,(ABS_ADDR(p_end, text))@ha
+ ld r8,(ABS_ADDR(p_end, text))@l(r8)
add r5,r5,r8
5: bl copy_and_flush /* copy the rest */
@@ -705,6 +712,8 @@ _GLOBAL(copy_and_flush)
isync
blr
+_ASM_NOKPROBE_SYMBOL(copy_and_flush); /* Called in real mode */
+
.align 8
copy_to_here:
@@ -830,7 +839,7 @@ __secondary_start:
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
- RFI
+ RFI_TO_KERNEL
b . /* prevent speculative execution */
/*
@@ -839,7 +848,7 @@ __secondary_start:
* before going into C code.
*/
start_secondary_prolog:
- ld r2,PACATOC(r13)
+ LOAD_PACA_TOC()
li r3,0
std r3,0(r1) /* Zero the stack frame pointer */
bl start_secondary
@@ -862,12 +871,11 @@ _GLOBAL(start_secondary_resume)
*/
enable_64b_mode:
mfmsr r11 /* grab the current MSR */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
mtmsr r11
-#else /* CONFIG_PPC_BOOK3E */
- li r12,(MSR_64BIT | MSR_ISF)@highest
- sldi r12,r12,48
+#else /* CONFIG_PPC_BOOK3E_64 */
+ LOAD_REG_IMMEDIATE(r12, MSR_64BIT)
or r11,r11,r12
mtmsrd r11
isync
@@ -896,7 +904,7 @@ _GLOBAL(relative_toc)
blr
.balign 8
-p_toc: .8byte __toc_start + 0x8000 - 0b
+p_toc: .8byte .TOC. - 0b
/*
* This is where the main kernel code starts.
@@ -932,7 +940,7 @@ start_here_multiplatform:
std r29,8(r11);
#endif
-#ifndef CONFIG_PPC_BOOK3E
+#ifndef CONFIG_PPC_BOOK3E_64
mfmsr r6
ori r6,r6,MSR_RI
mtmsrd r6 /* RI on */
@@ -945,15 +953,8 @@ start_here_multiplatform:
std r0,0(r4)
#endif
- /* The following gets the stack set up with the regs */
- /* pointing to the real addr of the kernel stack. This is */
- /* all done to support the C function call below which sets */
- /* up the htab. This is done because we have relocated the */
- /* kernel but are still running in real mode. */
-
- LOAD_REG_ADDR(r3,init_thread_union)
-
/* set up a stack pointer */
+ LOAD_REG_ADDR(r3,init_thread_union)
LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)
add r1,r3,r1
li r0,0
@@ -964,6 +965,9 @@ start_here_multiplatform:
* and SLB setup before we turn on relocation.
*/
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
/* Restore parameters passed from prom_init/kexec */
mr r3,r31
LOAD_REG_ADDR(r12, DOTSYM(early_setup))
@@ -974,10 +978,9 @@ start_here_multiplatform:
ld r4,PACAKMSR(r13)
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
- RFI
+ RFI_TO_KERNEL
b . /* prevent speculative execution */
- .previous
/* This is where all platforms converge execution */
start_here_common:
@@ -985,7 +988,7 @@ start_here_common:
std r1,PACAKSAVE(r13)
/* Load the TOC (virtual address) */
- ld r2,PACATOC(r13)
+ LOAD_PACA_TOC()
/* Mark interrupts soft and hard disabled (they might be enabled
* in the PACA when doing hotplug)
@@ -999,25 +1002,6 @@ start_here_common:
bl start_kernel
/* Not reached */
- trap
+0: trap
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the bss, which is page-aligned.
- */
- .section ".bss"
-/*
- * pgd dir should be aligned to PGD_TABLE_SIZE which is 64K.
- * We will need to find a better way to fix this
- */
- .align 16
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
- .globl empty_zero_page
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
+ .previous
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_85xx.S
index 840af004041e..52c0ab416326 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_85xx.S
@@ -28,10 +28,10 @@
#include <linux/init.h>
#include <linux/threads.h>
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
@@ -54,8 +54,8 @@
*
*/
__HEAD
-_ENTRY(_stext);
-_ENTRY(_start);
+_GLOBAL(_stext);
+_GLOBAL(_start);
/*
* Reserve a word at a fixed location to store the address
* of abatron_pteptrs
@@ -79,7 +79,7 @@ _ENTRY(_start);
mr r23,r3
mr r25,r4
- bl 0f
+ bcl 20,31,$+4
0: mflr r8
addis r3,r8,(is_second_reloc - 0b)@ha
lwz r19,(is_second_reloc - 0b)@l(r3)
@@ -113,7 +113,7 @@ _ENTRY(_start);
1:
/*
- * We have the runtime (virutal) address of our base.
+ * We have the runtime (virtual) address of our base.
* We calculate our shift of offset from a 64M page.
* We could map the 64M page we belong to at PAGE_OFFSET and
* get going from there.
@@ -129,7 +129,7 @@ _ENTRY(_start);
/*
* For the second relocation, we already set the right tlb entries
- * for the kernel space, so skip the code in fsl_booke_entry_mapping.S
+ * for the kernel space, so skip the code in 85xx_entry_mapping.S
*/
cmpwi r19,1
beq set_ivor
@@ -154,12 +154,12 @@ _ENTRY(_start);
* if needed
*/
-_ENTRY(__early_start)
+_GLOBAL(__early_start)
LOAD_REG_ADDR_PIC(r20, kernstart_virt_addr)
lwz r20,0(r20)
#define ENTRY_MAPPING_BOOT_SETUP
-#include "fsl_booke_entry_mapping.S"
+#include "85xx_entry_mapping.S"
#undef ENTRY_MAPPING_BOOT_SETUP
set_ivor:
@@ -187,9 +187,6 @@ set_ivor:
/* Setup the defaults for TLB entries */
li r2,(MAS4_TSIZED(BOOK3E_PAGESZ_4K))@l
-#ifdef CONFIG_E200
- oris r2,r2,MAS4_TLBSELD(1)@h
-#endif
mtspr SPRN_MAS4, r2
#if !defined(CONFIG_BDI_SWITCH)
@@ -362,33 +359,30 @@ interrupt_base:
CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
/* Machine Check Interrupt */
-#ifdef CONFIG_E200
- /* no RFMCI, MCSRRs on E200 */
- CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
- machine_check_exception)
-#else
MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
-#endif
/* Data Storage Interrupt */
START_EXCEPTION(DataStorage)
- NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
- mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
+ NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE)
+ mfspr r5,SPRN_ESR /* Grab the ESR, save it */
stw r5,_ESR(r11)
- mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
+ mfspr r4,SPRN_DEAR /* Grab the DEAR, save it */
+ stw r4, _DEAR(r11)
andis. r10,r5,(ESR_ILK|ESR_DLK)@h
bne 1f
- stw r4, _DEAR(r11)
- EXC_XFER_LITE(0x0300, handle_page_fault)
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
1:
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0x0300, CacheLockingException)
+ prepare_transfer_to_handler
+ bl CacheLockingException
+ b interrupt_return
/* Instruction Storage Interrupt */
INSTRUCTION_STORAGE_EXCEPTION
/* External Input Interrupt */
- EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ)
/* Alignment Interrupt */
ALIGNMENT_EXCEPTION
@@ -400,14 +394,7 @@ interrupt_base:
#ifdef CONFIG_PPC_FPU
FP_UNAVAILABLE_EXCEPTION
#else
-#ifdef CONFIG_E200
- /* E200 treats 'normal' floating point instructions as FP Unavail exception */
- EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
- program_check_exception, EXC_XFER_STD)
-#else
- EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
- unknown_exception, EXC_XFER_STD)
-#endif
+ EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
#endif
/* System Call Interrupt */
@@ -415,16 +402,14 @@ interrupt_base:
SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL SPRN_SRR1
/* Auxiliary Processor Unavailable Interrupt */
- EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, unknown_exception)
/* Decrementer Interrupt */
DECREMENTER_EXCEPTION
/* Fixed Internal Timer Interrupt */
/* TODO: Add FIT support */
- EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x3100, FIT, FixedIntervalTimer, unknown_exception)
/* Watchdog Timer Interrupt */
#ifdef CONFIG_BOOKE_WDT
@@ -477,6 +462,12 @@ END_BTB_FLUSH_SECTION
mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
+#ifdef CONFIG_PPC_KUAP
+ mfspr r12, SPRN_MAS1
+ rlwinm. r12,r12,0,0x3fff0000
+ beq 2f /* KUAP fault */
+#endif
+
4:
/* Mask of required permission bits. Note that while we
* do copy ESR:ST to _PAGE_RW position as trying to write
@@ -512,7 +503,7 @@ END_BTB_FLUSH_SECTION
#endif
#endif
- bne 2f /* Bail if permission/valid mismach */
+ bne 2f /* Bail if permission/valid mismatch */
/* Jump to common tlb load */
b finish_tlb_load
@@ -586,6 +577,12 @@ END_BTB_FLUSH_SECTION
mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
+#ifdef CONFIG_PPC_KUAP
+ mfspr r12, SPRN_MAS1
+ rlwinm. r12,r12,0,0x3fff0000
+ beq 2f /* KUAP fault */
+#endif
+
/* Make up the required permissions for user code */
#ifdef CONFIG_PTE_64BIT
li r13,_PAGE_PRESENT | _PAGE_BAP_UX
@@ -607,7 +604,7 @@ END_BTB_FLUSH_SECTION
#endif
#endif
- bne 2f /* Bail if permission mismach */
+ bne 2f /* Bail if permission mismatch */
/* Jump to common TLB load point */
b finish_tlb_load
@@ -625,42 +622,48 @@ END_BTB_FLUSH_SECTION
mfspr r10, SPRN_SPRG_RSCRATCH0
b InstructionStorage
-/* Define SPE handlers for e200 and e500v2 */
+/* Define SPE handlers for e500v2 */
#ifdef CONFIG_SPE
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
- NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
+ NORMAL_EXCEPTION_PROLOG(0x2010, SPE_UNAVAIL)
beq 1f
bl load_up_spe
b fast_exception_return
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0x2010, KernelSPE)
+1: prepare_transfer_to_handler
+ bl KernelSPE
+ b interrupt_return
#elif defined(CONFIG_SPE_POSSIBLE)
- EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, unknown_exception)
#endif /* CONFIG_SPE_POSSIBLE */
/* SPE Floating Point Data */
#ifdef CONFIG_SPE
- EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData,
- SPEFloatingPointException, EXC_XFER_STD)
+ START_EXCEPTION(SPEFloatingPointData)
+ NORMAL_EXCEPTION_PROLOG(0x2030, SPE_FP_DATA)
+ prepare_transfer_to_handler
+ bl SPEFloatingPointException
+ REST_NVGPRS(r1)
+ b interrupt_return
/* SPE Floating Point Round */
- EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
- SPEFloatingPointRoundException, EXC_XFER_STD)
+ START_EXCEPTION(SPEFloatingPointRound)
+ NORMAL_EXCEPTION_PROLOG(0x2050, SPE_FP_ROUND)
+ prepare_transfer_to_handler
+ bl SPEFloatingPointRoundException
+ REST_NVGPRS(r1)
+ b interrupt_return
#elif defined(CONFIG_SPE_POSSIBLE)
- EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData,
- unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
- unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, unknown_exception)
+ EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, unknown_exception)
#endif /* CONFIG_SPE_POSSIBLE */
/* Performance Monitor */
EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
- performance_monitor_exception, EXC_XFER_STD)
+ performance_monitor_exception)
- EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
+ EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception)
CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
CriticalDoorbell, unknown_exception)
@@ -675,10 +678,10 @@ END_BTB_FLUSH_SECTION
unknown_exception)
/* Hypercall */
- EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception)
/* Embedded Hypervisor Privilege */
- EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception)
interrupt_end:
@@ -786,6 +789,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
slwi r10, r12, 1
or r10, r10, r12
+ rlwinm r10, r10, 0, ~_PAGE_EXEC /* Clear SX on user pages */
iseleq r12, r12, r10
rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */
mtspr SPRN_MAS3, r13
@@ -807,31 +811,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
#endif
3: mtspr SPRN_MAS2, r12
-#ifdef CONFIG_E200
- /* Round robin TLB1 entries assignment */
- mfspr r12, SPRN_MAS0
-
- /* Extract TLB1CFG(NENTRY) */
- mfspr r11, SPRN_TLB1CFG
- andi. r11, r11, 0xfff
-
- /* Extract MAS0(NV) */
- andi. r13, r12, 0xfff
- addi r13, r13, 1
- cmpw 0, r13, r11
- addi r12, r12, 1
-
- /* check if we need to wrap */
- blt 7f
-
- /* wrap back to first free tlbcam entry */
- lis r13, tlbcam_index@ha
- lwz r13, tlbcam_index@l(r13)
- rlwimi r12, r13, 0, 20, 31
-7:
- mtspr SPRN_MAS0,r12
-#endif /* CONFIG_E200 */
-
tlb_write_entry:
tlbwe
@@ -892,9 +871,9 @@ KernelSPE:
ori r3,r3,87f@l
mr r4,r2 /* current */
lwz r5,_NIP(r1)
- bl printk
+ bl _printk
#endif
- b ret_from_except
+ b interrupt_return
#ifdef CONFIG_PRINTK
87: .string "SPE used in kernel (task=%p, pc=%x) \n"
#endif
@@ -933,22 +912,7 @@ get_phys_addr:
* Global functions
*/
-#ifdef CONFIG_E200
-/* Adjust or setup IVORs for e200 */
-_GLOBAL(__setup_e200_ivors)
- li r3,DebugDebug@l
- mtspr SPRN_IVOR15,r3
- li r3,SPEUnavailable@l
- mtspr SPRN_IVOR32,r3
- li r3,SPEFloatingPointData@l
- mtspr SPRN_IVOR33,r3
- li r3,SPEFloatingPointRound@l
- mtspr SPRN_IVOR34,r3
- sync
- blr
-#endif
-
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
#ifndef CONFIG_PPC_E500MC
/* Adjust or setup IVORs for e500v1/v2 */
_GLOBAL(__setup_e500_ivors)
@@ -991,7 +955,7 @@ _GLOBAL(__setup_ehv_ivors)
sync
blr
#endif /* CONFIG_PPC_E500MC */
-#endif /* CONFIG_E500 */
+#endif /* CONFIG_PPC_E500 */
#ifdef CONFIG_SPE
/*
@@ -1034,20 +998,6 @@ _GLOBAL(abort)
mtspr SPRN_DBCR0,r13
isync
-_GLOBAL(set_context)
-
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
-
#ifdef CONFIG_SMP
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
@@ -1195,7 +1145,7 @@ _GLOBAL(switch_to_as1)
bne 1b
/* Get the tlb entry used by the current running code */
- bl 0f
+ bcl 20,31,$+4
0: mflr r4
tlbsx 0,r4
@@ -1229,7 +1179,7 @@ _GLOBAL(switch_to_as1)
_GLOBAL(restore_to_as0)
mflr r0
- bl 0f
+ bcl 20,31,$+4
0: mflr r9
addi r9,r9,1f - 0b
@@ -1275,26 +1225,3 @@ _GLOBAL(restore_to_as0)
*/
3: mr r3,r5
bl _start
-
-/*
- * We put a few things here that have to be page-aligned. This stuff
- * goes at the beginning of the data segment, which is page-aligned.
- */
- .data
- .align 12
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/*
- * Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 073a651787df..0b05f2be66b9 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -16,11 +16,12 @@
#include <linux/init.h>
#include <linux/magic.h>
+#include <linux/pgtable.h>
+#include <linux/sizes.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/cache.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
@@ -28,23 +29,7 @@
#include <asm/ptrace.h>
#include <asm/export.h>
#include <asm/code-patching-asm.h>
-
-#include "head_32.h"
-
-#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
-/* By simply checking Address >= 0x80000000, we know if its a kernel address */
-#define SIMPLE_KERNEL_ADDRESS 1
-#endif
-
-/*
- * We need an ITLB miss handler for kernel addresses if:
- * - Either we have modules
- * - Or we have not pinned the first 8M
- */
-#if defined(CONFIG_MODULES) || !defined(CONFIG_PIN_TLB_TEXT) || \
- defined(CONFIG_DEBUG_PAGEALLOC)
-#define ITLB_MISS_KERNEL 1
-#endif
+#include <asm/interrupt.h>
/*
* Value for the bits that have fixed value in RPN entries.
@@ -52,12 +37,24 @@
*/
#define RPN_PATTERN 0x00f0
+#include "head_32.h"
+
+.macro compare_to_kernel_boundary scratch, addr
+#if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
+/* By simply checking Address >= 0x80000000, we know if its a kernel address */
+ not. \scratch, \addr
+#else
+ rlwinm \scratch, \addr, 16, 0xfff8
+ cmpli cr0, \scratch, PAGE_OFFSET@h
+#endif
+.endm
+
#define PAGE_SHIFT_512K 19
#define PAGE_SHIFT_8M 23
__HEAD
-_ENTRY(_stext);
-_ENTRY(_start);
+_GLOBAL(_stext);
+_GLOBAL(_start);
/* MPC8xx
* This port was done on an MBX board with an 860. Right now I only
@@ -122,56 +119,54 @@ instruction_counter:
#endif
/* System reset */
- EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD)
+ EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, system_reset_exception)
/* Machine check */
- . = 0x200
-MachineCheck:
- EXCEPTION_PROLOG handle_dar_dsisr=1
- save_dar_dsisr_on_stack r4, r5, r11
- li r6, RPN_PATTERN
- mtspr SPRN_DAR, r6 /* Tag DAR, to be used in DTLB Error */
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x200, machine_check_exception)
+ START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
+ EXCEPTION_PROLOG INTERRUPT_MACHINE_CHECK MachineCheck handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl machine_check_exception
+ b interrupt_return
/* External interrupt */
- EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
/* Alignment exception */
- . = 0x600
-Alignment:
- EXCEPTION_PROLOG handle_dar_dsisr=1
- save_dar_dsisr_on_stack r4, r5, r11
- li r6, RPN_PATTERN
- mtspr SPRN_DAR, r6 /* Tag DAR, to be used in DTLB Error */
- addi r3,r1,STACK_FRAME_OVERHEAD
- b .Lalignment_exception_ool
+ START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
+ EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl alignment_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
/* Program check exception */
- EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
+ START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
+ EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
+ prepare_transfer_to_handler
+ bl program_check_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
/* Decrementer */
- EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
-
- /* With VMAP_STACK there's not enough room for this at 0x600 */
- . = 0xa00
-.Lalignment_exception_ool:
- EXC_XFER_STD(0x600, alignment_exception)
+ EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
/* System call */
- . = 0xc00
-SystemCall:
- SYSCALL_ENTRY 0xc00
+ START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
+ SYSCALL_ENTRY INTERRUPT_SYSCALL
/* Single step - not used on 601 */
- EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
+ EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
/* On the MPC8xx, this is a software emulation interrupt. It occurs
* for all unimplemented and illegal instructions.
*/
- EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
+ START_EXCEPTION(INTERRUPT_SOFT_EMU_8xx, SoftEmu)
+ EXCEPTION_PROLOG INTERRUPT_SOFT_EMU_8xx SoftEmu
+ prepare_transfer_to_handler
+ bl emulation_assist_interrupt
+ REST_NVGPRS(r1)
+ b interrupt_return
- . = 0x1100
/*
* For the MPC8xx, this is a software tablewalk to load the instruction
* TLB. The task switch loads the M_TWB register with the pointer to the first
@@ -184,72 +179,44 @@ SystemCall:
*/
#ifdef CONFIG_8xx_CPU15
-#define INVALIDATE_ADJACENT_PAGES_CPU15(addr) \
- addi addr, addr, PAGE_SIZE; \
- tlbie addr; \
- addi addr, addr, -(PAGE_SIZE << 1); \
- tlbie addr; \
- addi addr, addr, PAGE_SIZE
+#define INVALIDATE_ADJACENT_PAGES_CPU15(addr, tmp) \
+ addi tmp, addr, PAGE_SIZE; \
+ tlbie tmp; \
+ addi tmp, addr, -PAGE_SIZE; \
+ tlbie tmp
#else
-#define INVALIDATE_ADJACENT_PAGES_CPU15(addr)
+#define INVALIDATE_ADJACENT_PAGES_CPU15(addr, tmp)
#endif
-InstructionTLBMiss:
- mtspr SPRN_SPRG_SCRATCH0, r10
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
- mtspr SPRN_SPRG_SCRATCH1, r11
-#endif
+ START_EXCEPTION(INTERRUPT_INST_TLB_MISS_8xx, InstructionTLBMiss)
+ mtspr SPRN_SPRG_SCRATCH2, r10
+ mtspr SPRN_M_TW, r11
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
mfspr r10, SPRN_SRR0 /* Get effective address of fault */
- INVALIDATE_ADJACENT_PAGES_CPU15(r10)
+ INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11)
mtspr SPRN_MD_EPN, r10
- /* Only modules will cause ITLB Misses as we always
- * pin the first 8MB of kernel memory */
-#ifdef ITLB_MISS_KERNEL
+#ifdef CONFIG_MODULES
mfcr r11
-#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
- cmpi cr0, r10, 0 /* Address >= 0x80000000 */
-#else
- rlwinm r10, r10, 16, 0xfff8
- cmpli cr0, r10, PAGE_OFFSET@h
-#ifndef CONFIG_PIN_TLB_TEXT
- /* It is assumed that kernel code fits into the first 32M */
-0: cmpli cr7, r10, (PAGE_OFFSET + 0x2000000)@h
- patch_site 0b, patch__itlbmiss_linmem_top
-#endif
-#endif
+ compare_to_kernel_boundary r10, r10
#endif
mfspr r10, SPRN_M_TWB /* Get level 1 table */
-#ifdef ITLB_MISS_KERNEL
-#if defined(SIMPLE_KERNEL_ADDRESS) && defined(CONFIG_PIN_TLB_TEXT)
- bge+ 3f
-#else
+#ifdef CONFIG_MODULES
blt+ 3f
-#endif
-#ifndef CONFIG_PIN_TLB_TEXT
- blt cr7, ITLBMissLinear
-#endif
rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
3:
+ mtcr r11
#endif
- lwz r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
- mtspr SPRN_MI_TWC, r10 /* Set segment attributes */
-
- mtspr SPRN_MD_TWC, r10
+ lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */
+ mtspr SPRN_MD_TWC, r11
mfspr r10, SPRN_MD_TWC
lwz r10, 0(r10) /* Get the pte */
-#ifdef ITLB_MISS_KERNEL
- mtcr r11
-#endif
-#ifdef CONFIG_SWAP
- rlwinm r11, r10, 32-5, _PAGE_PRESENT
- and r11, r11, r10
- rlwimi r10, r11, 0, _PAGE_PRESENT
-#endif
+ rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
+ rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
+ mtspr SPRN_MI_TWC, r11
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 20 and 23 must be clear.
* Software indicator bits 22, 24, 25, 26, and 27 must be
@@ -262,10 +229,8 @@ InstructionTLBMiss:
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
/* Restore registers */
-0: mfspr r10, SPRN_SPRG_SCRATCH0
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
- mfspr r11, SPRN_SPRG_SCRATCH1
-#endif
+0: mfspr r10, SPRN_SPRG_SCRATCH2
+ mfspr r11, SPRN_M_TW
rfi
patch_site 0b, patch__itlbmiss_exit_1
@@ -274,43 +239,13 @@ InstructionTLBMiss:
0: lwz r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
addi r10, r10, 1
stw r10, (itlb_miss_counter - PAGE_OFFSET)@l(0)
- mfspr r10, SPRN_SPRG_SCRATCH0
-#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP)
- mfspr r11, SPRN_SPRG_SCRATCH1
-#endif
- rfi
-#endif
-
-#ifndef CONFIG_PIN_TLB_TEXT
-ITLBMissLinear:
- mtcr r11
-#if defined(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23
- patch_site 0f, patch__itlbmiss_linmem_top8
-
- mfspr r10, SPRN_SRR0
-0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha
- rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K
- ori r11, r11, MI_PS512K | MI_SVALID
- rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */
-#else
- /* Set 8M byte page and mark it valid */
- li r11, MI_PS8MEG | MI_SVALID
- rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
-#endif
- mtspr SPRN_MI_TWC, r11
- ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
- _PAGE_PRESENT
- mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
-
-0: mfspr r10, SPRN_SPRG_SCRATCH0
- mfspr r11, SPRN_SPRG_SCRATCH1
+ mfspr r10, SPRN_SPRG_SCRATCH2
+ mfspr r11, SPRN_M_TW
rfi
- patch_site 0b, patch__itlbmiss_exit_2
#endif
- . = 0x1200
-DataStoreTLBMiss:
- mtspr SPRN_DAR, r10
+ START_EXCEPTION(INTERRUPT_DATA_TLB_MISS_8xx, DataStoreTLBMiss)
+ mtspr SPRN_SPRG_SCRATCH2, r10
mtspr SPRN_M_TW, r11
mfcr r11
@@ -318,21 +253,9 @@ DataStoreTLBMiss:
* kernel page tables.
*/
mfspr r10, SPRN_MD_EPN
- rlwinm r10, r10, 16, 0xfff8
- cmpli cr0, r10, PAGE_OFFSET@h
-#ifndef CONFIG_PIN_TLB_IMMR
- cmpli cr6, r10, VIRT_IMMR_BASE@h
-#endif
-0: cmpli cr7, r10, (PAGE_OFFSET + 0x2000000)@h
- patch_site 0b, patch__dtlbmiss_linmem_top
-
+ compare_to_kernel_boundary r10, r10
mfspr r10, SPRN_M_TWB /* Get level 1 table */
blt+ 3f
-#ifndef CONFIG_PIN_TLB_IMMR
-0: beq- cr6, DTLBMissIMMR
- patch_site 0b, patch__dtlbmiss_immr_jmp
-#endif
- blt cr7, DTLBMissLinear
rlwinm r10, r10, 0, 20, 31
oris r10, r10, (swapper_pg_dir - PAGE_OFFSET)@ha
3:
@@ -343,29 +266,16 @@ DataStoreTLBMiss:
mfspr r10, SPRN_MD_TWC
lwz r10, 0(r10) /* Get the pte */
- /* Insert the Guarded flag into the TWC from the Linux PTE.
+ /* Insert Guarded and Accessed flags into the TWC from the Linux PTE.
* It is bit 27 of both the Linux PTE and the TWC (at least
* I got that right :-). It will be better when we can put
* this into the Linux pgd/pmd and load it in the operation
* above.
*/
- rlwimi r11, r10, 0, _PAGE_GUARDED
+ rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED
+ rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K
mtspr SPRN_MD_TWC, r11
- /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
- * We also need to know if the insn is a load/store, so:
- * Clear _PAGE_PRESENT and load that which will
- * trap into DTLB Error with store bit set accordinly.
- */
- /* PRESENT=0x1, ACCESSED=0x20
- * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
- * r10 = (r10 & ~PRESENT) | r11;
- */
-#ifdef CONFIG_SWAP
- rlwinm r11, r10, 32-5, _PAGE_PRESENT
- and r11, r11, r10
- rlwimi r10, r11, 0, _PAGE_PRESENT
-#endif
/* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior
@@ -374,165 +284,97 @@ DataStoreTLBMiss:
li r11, RPN_PATTERN
rlwimi r10, r11, 0, 24, 27 /* Set 24-27 */
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
+ mtspr SPRN_DAR, r11 /* Tag DAR */
/* Restore registers */
-0: mfspr r10, SPRN_DAR
- mtspr SPRN_DAR, r11 /* Tag DAR */
+0: mfspr r10, SPRN_SPRG_SCRATCH2
mfspr r11, SPRN_M_TW
rfi
patch_site 0b, patch__dtlbmiss_exit_1
-DTLBMissIMMR:
- mtcr r11
- /* Set 512k byte guarded page and mark it valid */
- li r10, MD_PS512K | MD_GUARDED | MD_SVALID
- mtspr SPRN_MD_TWC, r10
- mfspr r10, SPRN_IMMR /* Get current IMMR */
- rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
- ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
- _PAGE_PRESENT | _PAGE_NO_CACHE
- mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
-
- li r11, RPN_PATTERN
-
-0: mfspr r10, SPRN_DAR
- mtspr SPRN_DAR, r11 /* Tag DAR */
+#ifdef CONFIG_PERF_EVENTS
+ patch_site 0f, patch__dtlbmiss_perf
+0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+ addi r10, r10, 1
+ stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
+ mfspr r10, SPRN_SPRG_SCRATCH2
mfspr r11, SPRN_M_TW
rfi
- patch_site 0b, patch__dtlbmiss_exit_2
-
-DTLBMissLinear:
- mtcr r11
- rlwinm r10, r10, 20, 0x0f800000 /* 8xx supports max 256Mb RAM */
-#if defined(CONFIG_STRICT_KERNEL_RWX) && CONFIG_DATA_SHIFT < 23
- patch_site 0f, patch__dtlbmiss_romem_top8
-
-0: subis r11, r10, (PAGE_OFFSET - 0x80000000)@ha
- rlwinm r11, r11, 0, 0xff800000
- neg r10, r11
- or r11, r11, r10
- rlwinm r11, r11, 4, MI_PS8MEG ^ MI_PS512K
- ori r11, r11, MI_PS512K | MI_SVALID
- mfspr r10, SPRN_MD_EPN
- rlwinm r10, r10, 0, 0x0ff80000 /* 8xx supports max 256Mb RAM */
-#else
- /* Set 8M byte page and mark it valid */
- li r11, MD_PS8MEG | MD_SVALID
#endif
- mtspr SPRN_MD_TWC, r11
-#ifdef CONFIG_STRICT_KERNEL_RWX
- patch_site 0f, patch__dtlbmiss_romem_top
-
-0: subis r11, r10, 0
- rlwimi r10, r11, 11, _PAGE_RO
-#endif
- ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
- _PAGE_PRESENT
- mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
-
- li r11, RPN_PATTERN
-
-0: mfspr r10, SPRN_DAR
- mtspr SPRN_DAR, r11 /* Tag DAR */
- mfspr r11, SPRN_M_TW
- rfi
- patch_site 0b, patch__dtlbmiss_exit_3
/* This is an instruction TLB error on the MPC8xx. This could be due
* to many reasons, such as executing guarded memory or illegal instruction
* addresses. There is nothing to do but handle a big time error fault.
*/
- . = 0x1300
-InstructionTLBError:
- EXCEPTION_PROLOG
- mr r4,r12
+ START_EXCEPTION(INTERRUPT_INST_TLB_ERROR_8xx, InstructionTLBError)
+ /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
+ EXCEPTION_PROLOG INTERRUPT_INST_STORAGE InstructionTLBError
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
andis. r10,r9,SRR1_ISI_NOPT@h
beq+ .Litlbie
- tlbie r4
- /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
+ tlbie r12
.Litlbie:
- stw r4, _DAR(r11)
- EXC_XFER_LITE(0x400, handle_page_fault)
+ stw r12, _DAR(r11)
+ stw r5, _DSISR(r11)
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
/* This is the data TLB error on the MPC8xx. This could be due to
* many reasons, including a dirty update to a pte. We bail out to
* a higher level function that can handle it.
*/
- . = 0x1400
-DataTLBError:
+ START_EXCEPTION(INTERRUPT_DATA_TLB_ERROR_8xx, DataTLBError)
EXCEPTION_PROLOG_0 handle_dar_dsisr=1
mfspr r11, SPRN_DAR
cmpwi cr1, r11, RPN_PATTERN
beq- cr1, FixupDAR /* must be a buggy dcbX, icbi insn. */
DARFixed:/* Return from dcbx instruction bug workaround */
-#ifdef CONFIG_VMAP_STACK
- li r11, RPN_PATTERN
- mtspr SPRN_DAR, r11 /* Tag DAR, to be used in DTLB Error */
-#endif
EXCEPTION_PROLOG_1
- EXCEPTION_PROLOG_2 handle_dar_dsisr=1
- get_and_save_dar_dsisr_on_stack r4, r5, r11
+ /* 0x300 is DataAccess exception, needed by bad_page_fault() */
+ EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataTLBError handle_dar_dsisr=1
+ lwz r4, _DAR(r11)
+ lwz r5, _DSISR(r11)
andis. r10,r5,DSISR_NOHPTE@h
beq+ .Ldtlbie
tlbie r4
.Ldtlbie:
-#ifndef CONFIG_VMAP_STACK
- li r10,RPN_PATTERN
- mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
-#endif
- /* 0x300 is DataAccess exception, needed by bad_page_fault() */
- EXC_XFER_LITE(0x300, handle_page_fault)
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
-/* Called from DataStoreTLBMiss when perf TLB misses events are activated */
-#ifdef CONFIG_PERF_EVENTS
- patch_site 0f, patch__dtlbmiss_perf
-0: lwz r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
- addi r10, r10, 1
- stw r10, (dtlb_miss_counter - PAGE_OFFSET)@l(0)
- mfspr r10, SPRN_DAR
- mtspr SPRN_DAR, r11 /* Tag DAR */
- mfspr r11, SPRN_M_TW
- rfi
-#endif
-
-stack_overflow:
+#ifdef CONFIG_VMAP_STACK
vmap_stack_overflow_exception
+#endif
/* On the MPC8xx, these next four traps are used for development
* support of breakpoints and such. Someday I will get around to
* using them.
*/
-do_databreakpoint:
- EXCEPTION_PROLOG_1
- EXCEPTION_PROLOG_2 handle_dar_dsisr=1
- addi r3,r1,STACK_FRAME_OVERHEAD
- mfspr r4,SPRN_BAR
- stw r4,_DAR(r11)
-#ifdef CONFIG_VMAP_STACK
- lwz r5,_DSISR(r11)
-#else
- mfspr r5,SPRN_DSISR
-#endif
- EXC_XFER_STD(0x1c00, do_break)
-
- . = 0x1c00
-DataBreakpoint:
+ START_EXCEPTION(INTERRUPT_DATA_BREAKPOINT_8xx, DataBreakpoint)
EXCEPTION_PROLOG_0 handle_dar_dsisr=1
mfspr r11, SPRN_SRR0
cmplwi cr1, r11, (.Ldtlbie - PAGE_OFFSET)@l
cmplwi cr7, r11, (.Litlbie - PAGE_OFFSET)@l
cror 4*cr1+eq, 4*cr1+eq, 4*cr7+eq
- bne cr1, do_databreakpoint
+ bne cr1, 1f
mtcr r10
mfspr r10, SPRN_SPRG_SCRATCH0
mfspr r11, SPRN_SPRG_SCRATCH1
rfi
+1: EXCEPTION_PROLOG_1
+ EXCEPTION_PROLOG_2 INTERRUPT_DATA_BREAKPOINT_8xx DataBreakpoint handle_dar_dsisr=1
+ mfspr r4,SPRN_BAR
+ stw r4,_DAR(r11)
+ prepare_transfer_to_handler
+ bl do_break
+ REST_NVGPRS(r1)
+ b interrupt_return
+
#ifdef CONFIG_PERF_EVENTS
- . = 0x1d00
-InstructionBreakpoint:
+ START_EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, InstructionBreakpoint)
mtspr SPRN_SPRG_SCRATCH0, r10
lwz r10, (instruction_counter - PAGE_OFFSET)@l(0)
addi r10, r10, -1
@@ -543,11 +385,12 @@ InstructionBreakpoint:
mfspr r10, SPRN_SPRG_SCRATCH0
rfi
#else
- EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(INTERRUPT_INST_BREAKPOINT_8xx, Trap_1d, unknown_exception)
#endif
- EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0x1e00, Trap_1e, unknown_exception)
+ EXCEPTION(0x1f00, Trap_1f, unknown_exception)
+ __HEAD
. = 0x2000
/* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
@@ -563,14 +406,9 @@ FixupDAR:/* Entry point for dcbx workaround. */
cmpli cr1, r11, PAGE_OFFSET@h
mfspr r11, SPRN_M_TWB /* Get level 1 table */
blt+ cr1, 3f
- rlwinm r11, r10, 16, 0xfff8
-
-0: cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
- patch_site 0b, patch__fixupdar_linmem_top
/* create physical page address from effective address */
tophys(r11, r10)
- blt- cr7, 201f
mfspr r11, SPRN_M_TWB /* Get level 1 table */
rlwinm r11, r11, 0, 20, 31
oris r11, r11, (swapper_pg_dir - PAGE_OFFSET)@ha
@@ -581,7 +419,6 @@ FixupDAR:/* Entry point for dcbx workaround. */
mfspr r11, SPRN_MD_TWC
lwz r11, 0(r11) /* Get the pte */
bt 28,200f /* bit 28 = Large page (8M) */
- bt 29,202f /* bit 29 = Large page (8M or 512K) */
/* concat physical page address(r11) and page offset(r10) */
rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31
201: lwz r11,0(r11)
@@ -608,11 +445,6 @@ FixupDAR:/* Entry point for dcbx workaround. */
rlwimi r11, r10, 0, 32 - PAGE_SHIFT_8M, 31
b 201b
-202:
- /* concat physical page address(r11) and page offset(r10) */
- rlwimi r11, r10, 0, 32 - PAGE_SHIFT_512K, 31
- b 201b
-
144: mfspr r10, SPRN_DSISR
rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
mtspr SPRN_DSISR, r10
@@ -668,14 +500,10 @@ FixupDAR:/* Entry point for dcbx workaround. */
152:
mfdar r11
mtctr r11 /* restore ctr reg from DAR */
-#ifdef CONFIG_VMAP_STACK
mfspr r11, SPRN_SPRG_THREAD
stw r10, DAR(r11)
mfspr r10, SPRN_DSISR
stw r10, DSISR(r11)
-#else
- mtdar r10 /* save fault EA to DAR */
-#endif
mfspr r10,SPRN_M_TW
b DARFixed /* Go back to normal TLB handling */
@@ -747,6 +575,27 @@ start_here:
rfi
/* Load up the kernel context */
2:
+#ifdef CONFIG_PIN_TLB_IMMR
+ lis r0, MD_TWAM@h
+ oris r0, r0, 0x1f00
+ mtspr SPRN_MD_CTR, r0
+ LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
+ tlbie r0
+ mtspr SPRN_MD_EPN, r0
+ LOAD_REG_IMMEDIATE(r0, MD_SVALID | MD_PS512K | MD_GUARDED)
+ mtspr SPRN_MD_TWC, r0
+ mfspr r0, SPRN_IMMR
+ rlwinm r0, r0, 0, 0xfff80000
+ ori r0, r0, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
+ _PAGE_NO_CACHE | _PAGE_PRESENT
+ mtspr SPRN_MD_RPN, r0
+ lis r0, (MD_TWAM | MD_RSV4I)@h
+ mtspr SPRN_MD_CTR, r0
+#endif
+#if !defined(CONFIG_PIN_TLB_DATA) && !defined(CONFIG_PIN_TLB_IMMR)
+ lis r0, MD_TWAM@h
+ mtspr SPRN_MD_CTR, r0
+#endif
tlbia /* Clear all TLB entries */
sync /* wait for tlbia/tlbie to finish */
@@ -779,17 +628,10 @@ start_here:
initial_mmu:
li r8, 0
mtspr SPRN_MI_CTR, r8 /* remove PINNED ITLB entries */
- lis r10, MD_RESETVAL@h
-#ifndef CONFIG_8xx_COPYBACK
- oris r10, r10, MD_WTDEF@h
-#endif
+ lis r10, MD_TWAM@h
mtspr SPRN_MD_CTR, r10 /* remove PINNED DTLB entries */
tlbia /* Invalidate all TLB entries */
-#ifdef CONFIG_PIN_TLB_DATA
- oris r10, r10, MD_RSV4I@h
- mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
-#endif
lis r8, MI_APG_INIT@h /* Set protection modes */
ori r8, r8, MI_APG_INIT@l
@@ -798,55 +640,32 @@ initial_mmu:
ori r8, r8, MD_APG_INIT@l
mtspr SPRN_MD_AP, r8
- /* Map a 512k page for the IMMR to get the processor
- * internal registers (among other things).
- */
-#ifdef CONFIG_PIN_TLB_IMMR
- oris r10, r10, MD_RSV4I@h
- ori r10, r10, 0x1c00
- mtspr SPRN_MD_CTR, r10
-
- mfspr r9, 638 /* Get current IMMR */
- andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */
-
- lis r8, VIRT_IMMR_BASE@h /* Create vaddr for TLB */
- ori r8, r8, MD_EVALID /* Mark it valid */
- mtspr SPRN_MD_EPN, r8
- li r8, MD_PS512K | MD_GUARDED /* Set 512k byte page */
- ori r8, r8, MD_SVALID /* Make it valid */
- mtspr SPRN_MD_TWC, r8
- mr r8, r9 /* Create paddr for TLB */
- ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
- mtspr SPRN_MD_RPN, r8
-#endif
-
- /* Now map the lower RAM (up to 32 Mbytes) into the ITLB. */
-#ifdef CONFIG_PIN_TLB_TEXT
+ /* Map the lower RAM (up to 32 Mbytes) into the ITLB and DTLB */
lis r8, MI_RSV4I@h
ori r8, r8, 0x1c00
-#endif
+ oris r12, r10, MD_RSV4I@h
+ ori r12, r12, 0x1c00
li r9, 4 /* up to 4 pages of 8M */
mtctr r9
lis r9, KERNELBASE@h /* Create vaddr for TLB */
- li r10, MI_PS8MEG | MI_SVALID /* Set 8M byte page */
+ li r10, MI_PS8MEG | _PMD_ACCESSED | MI_SVALID
li r11, MI_BOOTINIT /* Create RPN for address 0 */
- lis r12, _einittext@h
- ori r12, r12, _einittext@l
1:
-#ifdef CONFIG_PIN_TLB_TEXT
mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */
addi r8, r8, 0x100
-#endif
-
ori r0, r9, MI_EVALID /* Mark it valid */
mtspr SPRN_MI_EPN, r0
mtspr SPRN_MI_TWC, r10
mtspr SPRN_MI_RPN, r11 /* Store TLB entry */
+ mtspr SPRN_MD_CTR, r12
+ addi r12, r12, 0x100
+ mtspr SPRN_MD_EPN, r0
+ mtspr SPRN_MD_TWC, r10
+ mtspr SPRN_MD_RPN, r11
addis r9, r9, 0x80
addis r11, r11, 0x80
- cmpl cr0, r9, r12
- bdnzf gt, 1b
+ bdnz 1b
/* Since the cache is enabled according to the information we
* just loaded into the TLB, invalidate and enable the caches here.
@@ -857,17 +676,7 @@ initial_mmu:
mtspr SPRN_DC_CST, r8
lis r8, IDC_ENABLE@h
mtspr SPRN_IC_CST, r8
-#ifdef CONFIG_8xx_COPYBACK
- mtspr SPRN_DC_CST, r8
-#else
- /* For a debug option, I left this here to easily enable
- * the write through cache mode
- */
- lis r8, DC_SFWT@h
- mtspr SPRN_DC_CST, r8
- lis r8, IDC_ENABLE@h
mtspr SPRN_DC_CST, r8
-#endif
/* Disable debug mode entry on breakpoints */
mfspr r8, SPRN_DER
#ifdef CONFIG_PERF_EVENTS
@@ -878,28 +687,103 @@ initial_mmu:
mtspr SPRN_DER, r8
blr
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the data segment,
- * which is page-aligned.
- */
- .data
- .globl sdata
-sdata:
- .globl empty_zero_page
- .align PAGE_SHIFT
-empty_zero_page:
- .space PAGE_SIZE
-EXPORT_SYMBOL(empty_zero_page)
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE table poiners, usually the kernel and current user
- * pointer to their respective root page table (pgdir).
- */
- .globl abatron_pteptrs
-abatron_pteptrs:
- .space 8
+_GLOBAL(mmu_pin_tlb)
+ lis r9, (1f - PAGE_OFFSET)@h
+ ori r9, r9, (1f - PAGE_OFFSET)@l
+ mfmsr r10
+ mflr r11
+ li r12, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
+ rlwinm r0, r10, 0, ~MSR_RI
+ rlwinm r0, r0, 0, ~MSR_EE
+ mtmsr r0
+ isync
+ .align 4
+ mtspr SPRN_SRR0, r9
+ mtspr SPRN_SRR1, r12
+ rfi
+1:
+ li r5, 0
+ lis r6, MD_TWAM@h
+ mtspr SPRN_MI_CTR, r5
+ mtspr SPRN_MD_CTR, r6
+ tlbia
+
+ LOAD_REG_IMMEDIATE(r5, 28 << 8)
+ LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
+ LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
+ LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
+ LOAD_REG_ADDR(r9, _sinittext)
+ li r0, 4
+ mtctr r0
+
+2: ori r0, r6, MI_EVALID
+ mtspr SPRN_MI_CTR, r5
+ mtspr SPRN_MI_EPN, r0
+ mtspr SPRN_MI_TWC, r7
+ mtspr SPRN_MI_RPN, r8
+ addi r5, r5, 0x100
+ addis r6, r6, SZ_8M@h
+ addis r8, r8, SZ_8M@h
+ cmplw r6, r9
+ bdnzt lt, 2b
+ lis r0, MI_RSV4I@h
+ mtspr SPRN_MI_CTR, r0
+
+ LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM)
+#ifdef CONFIG_PIN_TLB_DATA
+ LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
+ LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
+ li r8, 0
+#ifdef CONFIG_PIN_TLB_IMMR
+ li r0, 3
+#else
+ li r0, 4
+#endif
+ mtctr r0
+ cmpwi r4, 0
+ beq 4f
+ LOAD_REG_ADDR(r9, _sinittext)
+
+2: ori r0, r6, MD_EVALID
+ ori r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
+ mtspr SPRN_MD_CTR, r5
+ mtspr SPRN_MD_EPN, r0
+ mtspr SPRN_MD_TWC, r7
+ mtspr SPRN_MD_RPN, r12
+ addi r5, r5, 0x100
+ addis r6, r6, SZ_8M@h
+ addis r8, r8, SZ_8M@h
+ cmplw r6, r9
+ bdnzt lt, 2b
+4:
+2: ori r0, r6, MD_EVALID
+ ori r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
+ mtspr SPRN_MD_CTR, r5
+ mtspr SPRN_MD_EPN, r0
+ mtspr SPRN_MD_TWC, r7
+ mtspr SPRN_MD_RPN, r12
+ addi r5, r5, 0x100
+ addis r6, r6, SZ_8M@h
+ addis r8, r8, SZ_8M@h
+ cmplw r6, r3
+ bdnzt lt, 2b
+#endif
+#ifdef CONFIG_PIN_TLB_IMMR
+ LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID)
+ LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED | _PMD_ACCESSED)
+ mfspr r8, SPRN_IMMR
+ rlwinm r8, r8, 0, 0xfff80000
+ ori r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \
+ _PAGE_NO_CACHE | _PAGE_PRESENT
+ mtspr SPRN_MD_CTR, r5
+ mtspr SPRN_MD_EPN, r0
+ mtspr SPRN_MD_TWC, r7
+ mtspr SPRN_MD_RPN, r8
+#endif
+#if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA)
+ lis r0, (MD_RSV4I | MD_TWAM)@h
+ mtspr SPRN_MD_CTR, r0
+#endif
+ mtspr SPRN_SRR1, r10
+ mtspr SPRN_SRR0, r11
+ rfi
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 97c887950c3c..519b60695167 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -17,10 +17,10 @@
*/
#include <linux/init.h>
+#include <linux/pgtable.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
@@ -31,19 +31,10 @@
#include <asm/kvm_book3s_asm.h>
#include <asm/export.h>
#include <asm/feature-fixups.h>
+#include <asm/interrupt.h>
#include "head_32.h"
-/* 601 only have IBAT */
-#ifdef CONFIG_PPC_BOOK3S_601
-#define LOAD_BAT(n, reg, RA, RB) \
- li RA,0; \
- mtspr SPRN_IBAT##n##U,RA; \
- lwz RA,(n*16)+0(reg); \
- lwz RB,(n*16)+4(reg); \
- mtspr SPRN_IBAT##n##U,RA; \
- mtspr SPRN_IBAT##n##L,RB
-#else
#define LOAD_BAT(n, reg, RA, RB) \
/* see the comment for clear_bats() -- Cort */ \
li RA,0; \
@@ -57,19 +48,15 @@
lwz RB,(n*16)+12(reg); \
mtspr SPRN_DBAT##n##U,RA; \
mtspr SPRN_DBAT##n##L,RB
-#endif
__HEAD
- .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
- .stabs "head_32.S",N_SO,0,0,0f
-0:
-_ENTRY(_stext);
+_GLOBAL(_stext);
/*
* _start is defined this way because the XCOFF loader in the OpenFirmware
* on the powermac expects the entry point to be a procedure descriptor.
*/
-_ENTRY(_start);
+_GLOBAL(_start);
/*
* These are here for legacy reasons, the kernel used to
* need to look like a coff function entry for the pmac
@@ -166,9 +153,8 @@ __after_mmu_off:
bl initial_bats
bl load_segment_registers
-#ifdef CONFIG_KASAN
+ bl reloc_offset
bl early_hash_table
-#endif
#if defined(CONFIG_BOOTX_TEXT)
bl setup_disp_bat
#endif
@@ -185,10 +171,8 @@ __after_mmu_off:
bl reloc_offset
li r24,0 /* cpu# */
bl call_setup_cpu /* Call setup_cpu for this CPU */
-#ifdef CONFIG_PPC_BOOK3S_32
bl reloc_offset
bl init_idle_6xx
-#endif /* CONFIG_PPC_BOOK3S_32 */
/*
@@ -219,8 +203,7 @@ turn_on_mmu:
lis r0,start_here@h
ori r0,r0,start_here@l
mtspr SPRN_SRR0,r0
- SYNC
- RFI /* enables MMU */
+ rfi /* enables MMU */
/*
* We need __secondary_hold as a place to hold the other cpus on
@@ -253,8 +236,8 @@ __secondary_hold_acknowledge:
/* System reset */
/* core99 pmac starts the seconary here by changing the vector, and
- putting it back to what it was (unknown_exception) when done. */
- EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
+ putting it back to what it was (unknown_async_exception) when done. */
+ EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
/* Machine check */
/*
@@ -270,97 +253,62 @@ __secondary_hold_acknowledge:
* pointer when we take an exception from supervisor mode.)
* -- paulus.
*/
- . = 0x200
- DO_KVM 0x200
-MachineCheck:
+ START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
EXCEPTION_PROLOG_0
-#ifdef CONFIG_VMAP_STACK
- li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
- mtmsr r11
- isync
-#endif
#ifdef CONFIG_PPC_CHRP
- mfspr r11, SPRN_SPRG_THREAD
- tovirt_vmstack r11, r11
- lwz r11, RTAS_SP(r11)
- cmpwi cr1, r11, 0
+ mtspr SPRN_SPRG_SCRATCH2,r1
+ mfspr r1, SPRN_SPRG_THREAD
+ lwz r1, RTAS_SP(r1)
+ cmpwi cr1, r1, 0
bne cr1, 7f
+ mfspr r1, SPRN_SPRG_SCRATCH2
#endif /* CONFIG_PPC_CHRP */
- EXCEPTION_PROLOG_1 for_rtas=1
-7: EXCEPTION_PROLOG_2
- addi r3,r1,STACK_FRAME_OVERHEAD
+ EXCEPTION_PROLOG_1
+7: EXCEPTION_PROLOG_2 0x200 MachineCheck
#ifdef CONFIG_PPC_CHRP
-#ifdef CONFIG_VMAP_STACK
- mfspr r4, SPRN_SPRG_THREAD
- tovirt(r4, r4)
- lwz r4, RTAS_SP(r4)
- cmpwi cr1, r4, 0
-#endif
- beq cr1, machine_check_tramp
- b machine_check_in_rtas
-#else
- b machine_check_tramp
+ beq cr1, 1f
+ twi 31, 0, 0
#endif
+1: prepare_transfer_to_handler
+ bl machine_check_exception
+ b interrupt_return
/* Data access exception. */
- . = 0x300
- DO_KVM 0x300
-DataAccess:
-#ifdef CONFIG_VMAP_STACK
- mtspr SPRN_SPRG_SCRATCH0,r10
- mfspr r10, SPRN_SPRG_THREAD
+ START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
+#ifdef CONFIG_PPC_BOOK3S_604
BEGIN_MMU_FTR_SECTION
+ mtspr SPRN_SPRG_SCRATCH2,r10
+ mfspr r10, SPRN_SPRG_THREAD
stw r11, THR11(r10)
mfspr r10, SPRN_DSISR
mfcr r11
-#ifdef CONFIG_PPC_KUAP
- andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
-#else
andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
-#endif
mfspr r10, SPRN_SPRG_THREAD
beq hash_page_dsi
.Lhash_page_dsi_cont:
mtcr r11
lwz r11, THR11(r10)
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
- mtspr SPRN_SPRG_SCRATCH1,r11
- mfspr r11, SPRN_DAR
- stw r11, DAR(r10)
- mfspr r11, SPRN_DSISR
- stw r11, DSISR(r10)
- mfspr r11, SPRN_SRR0
- stw r11, SRR0(r10)
- mfspr r11, SPRN_SRR1 /* check whether user or kernel */
- stw r11, SRR1(r10)
- mfcr r10
- andi. r11, r11, MSR_PR
-
- EXCEPTION_PROLOG_1
- b handle_page_fault_tramp_1
-#else /* CONFIG_VMAP_STACK */
- EXCEPTION_PROLOG handle_dar_dsisr=1
- get_and_save_dar_dsisr_on_stack r4, r5, r11
-BEGIN_MMU_FTR_SECTION
-#ifdef CONFIG_PPC_KUAP
- andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
-#else
- andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
-#endif
- bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
- rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
- bl hash_page
- b handle_page_fault_tramp_1
-FTR_SECTION_ELSE
- b handle_page_fault_tramp_2
+ mfspr r10, SPRN_SPRG_SCRATCH2
+MMU_FTR_SECTION_ELSE
+ b 1f
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
-#endif /* CONFIG_VMAP_STACK */
+#endif
+1: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
+ EXCEPTION_PROLOG_1
+ EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ lwz r5, _DSISR(r1)
+ andis. r0, r5, DSISR_DABRMATCH@h
+ bne- 1f
+ bl do_page_fault
+ b interrupt_return
+1: bl do_break
+ REST_NVGPRS(r1)
+ b interrupt_return
+
/* Instruction access exception. */
- . = 0x400
- DO_KVM 0x400
-InstructionAccess:
-#ifdef CONFIG_VMAP_STACK
+ START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
mtspr SPRN_SPRG_SCRATCH0,r10
mtspr SPRN_SPRG_SCRATCH1,r11
mfspr r10, SPRN_SPRG_THREAD
@@ -369,50 +317,47 @@ InstructionAccess:
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
stw r11, SRR1(r10)
mfcr r10
+#ifdef CONFIG_PPC_BOOK3S_604
BEGIN_MMU_FTR_SECTION
andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
bne hash_page_isi
.Lhash_page_isi_cont:
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+#endif
andi. r11, r11, MSR_PR
EXCEPTION_PROLOG_1
- EXCEPTION_PROLOG_2
-#else /* CONFIG_VMAP_STACK */
- EXCEPTION_PROLOG
- andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
- beq 1f /* if so, try to put a PTE */
- li r3,0 /* into the hash table */
- mr r4,r12 /* SRR0 is fault address */
-BEGIN_MMU_FTR_SECTION
- bl hash_page
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
-#endif /* CONFIG_VMAP_STACK */
-1: mr r4,r12
+ EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
- stw r4, _DAR(r11)
- EXC_XFER_LITE(0x400, handle_page_fault)
+ stw r5, _DSISR(r11)
+ stw r12, _DAR(r11)
+ prepare_transfer_to_handler
+ bl do_page_fault
+ b interrupt_return
/* External interrupt */
- EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
+ EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
/* Alignment exception */
- . = 0x600
- DO_KVM 0x600
-Alignment:
- EXCEPTION_PROLOG handle_dar_dsisr=1
- save_dar_dsisr_on_stack r4, r5, r11
- addi r3,r1,STACK_FRAME_OVERHEAD
- b alignment_exception_tramp
+ START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
+ EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
+ prepare_transfer_to_handler
+ bl alignment_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
/* Program check exception */
- EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
+ START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
+ EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
+ prepare_transfer_to_handler
+ bl program_check_exception
+ REST_NVGPRS(r1)
+ b interrupt_return
/* Floating-point unavailable */
- . = 0x800
- DO_KVM 0x800
-FPUnavailable:
+ START_EXCEPTION(0x800, FPUnavailable)
+#ifdef CONFIG_PPC_FPU
BEGIN_FTR_SECTION
/*
* Certain Freescale cores don't have a FPU and treat fp instructions
@@ -420,28 +365,29 @@ BEGIN_FTR_SECTION
*/
b ProgramCheck
END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
- EXCEPTION_PROLOG
+ EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
beq 1f
bl load_up_fpu /* if from user, just load it up */
b fast_exception_return
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
+1: prepare_transfer_to_handler
+ bl kernel_fp_unavailable_exception
+ b interrupt_return
+#else
+ b ProgramCheck
+#endif
/* Decrementer */
- EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
+ EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
- EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(0xa00, Trap_0a, unknown_exception)
+ EXCEPTION(0xb00, Trap_0b, unknown_exception)
/* System call */
- . = 0xc00
- DO_KVM 0xc00
-SystemCall:
- SYSCALL_ENTRY 0xc00
+ START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
+ SYSCALL_ENTRY INTERRUPT_SYSCALL
-/* Single step - not used on 601 */
- EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
- EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
+ EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
+ EXCEPTION(0xe00, Trap_0e, unknown_exception)
/*
* The Altivec unavailable trap is at 0x0f20. Foo.
@@ -451,19 +397,18 @@ SystemCall:
* non-altivec kernel running on a machine with altivec just
* by executing an altivec instruction.
*/
- . = 0xf00
- DO_KVM 0xf00
+ START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
b PerformanceMonitor
- . = 0xf20
- DO_KVM 0xf20
+ START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
b AltiVecUnavailable
+ __HEAD
/*
* Handle TLB miss for instruction on 603/603e.
* Note: we get an alternate set of r0 - r3 to use automatically.
*/
- . = 0x1000
+ . = INTERRUPT_INST_TLB_MISS_603
InstructionTLBMiss:
/*
* r0: scratch
@@ -473,19 +418,17 @@ InstructionTLBMiss:
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_IMISS
-#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
- lis r1,PAGE_OFFSET@h /* check if kernel address */
+#ifdef CONFIG_MODULES
+ lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
#endif
- mfspr r2, SPRN_SPRG_PGDIR
-#ifdef CONFIG_SWAP
- li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
-#else
- li r1,_PAGE_PRESENT | _PAGE_EXEC
-#endif
-#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
- bge- 112f
+ mfspr r2, SPRN_SDR1
+ li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
+ rlwinm r2, r2, 28, 0xfffff000
+#ifdef CONFIG_MODULES
+ bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
+ li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
#endif
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
@@ -531,7 +474,7 @@ InstructionAddressInvalid:
/*
* Handle TLB miss for DATA Load operation on 603/603e
*/
- . = 0x1100
+ . = INTERRUPT_DATA_LOAD_TLB_MISS_603
DataLoadTLBMiss:
/*
* r0: scratch
@@ -541,16 +484,14 @@ DataLoadTLBMiss:
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_DMISS
- lis r1,PAGE_OFFSET@h /* check if kernel address */
+ lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
- mfspr r2, SPRN_SPRG_PGDIR
-#ifdef CONFIG_SWAP
- li r1, _PAGE_PRESENT | _PAGE_ACCESSED
-#else
- li r1, _PAGE_PRESENT
-#endif
- bge- 112f
+ mfspr r2, SPRN_SDR1
+ li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
+ rlwinm r2, r2, 28, 0xfffff000
+ bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
+ li r1, _PAGE_PRESENT | _PAGE_ACCESSED
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r2) /* get pmd entry */
@@ -560,22 +501,18 @@ DataLoadTLBMiss:
lwz r0,0(r2) /* get linux-style pte */
andc. r1,r1,r0 /* check access & ~permission */
bne- DataAddressInvalid /* return if access not permitted */
- /*
- * NOTE! We are assuming this is not an SMP system, otherwise
- * we would need to update the pte atomically with lwarx/stwcx.
- */
/* Convert linux-style PTE to low word of PPC-style PTE */
rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
+ rlwimi r1,r0,32-3,24,24 /* _PAGE_RW -> _PAGE_DIRTY */
rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
+ xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
ori r1,r1,0xe04 /* clear out reserved bits */
andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
BEGIN_FTR_SECTION
rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
mtspr SPRN_RPA,r1
- mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
- mtcrf 0x80,r2
BEGIN_MMU_FTR_SECTION
li r0,1
mfspr r1,SPRN_SPRG_603_LRU
@@ -587,9 +524,15 @@ BEGIN_MMU_FTR_SECTION
mfspr r2,SPRN_SRR1
rlwimi r2,r0,31-14,14,14
mtspr SPRN_SRR1,r2
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
+ mtcrf 0x80,r2
+ tlbld r3
+ rfi
+MMU_FTR_SECTION_ELSE
+ mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r2
tlbld r3
rfi
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
DataAddressInvalid:
mfspr r3,SPRN_SRR1
rlwinm r1,r3,9,6,6 /* Get load/store bit */
@@ -611,7 +554,7 @@ DataAddressInvalid:
/*
* Handle TLB miss for DATA Store on 603/603e
*/
- . = 0x1200
+ . = INTERRUPT_DATA_STORE_TLB_MISS_603
DataStoreTLBMiss:
/*
* r0: scratch
@@ -621,16 +564,14 @@ DataStoreTLBMiss:
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_DMISS
- lis r1,PAGE_OFFSET@h /* check if kernel address */
+ lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
- mfspr r2, SPRN_SPRG_PGDIR
-#ifdef CONFIG_SWAP
- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
-#else
- li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
-#endif
- bge- 112f
+ mfspr r2, SPRN_SDR1
+ li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
+ rlwinm r2, r2, 28, 0xfffff000
+ bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
+ li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
lwz r2,0(r2) /* get pmd entry */
@@ -640,10 +581,6 @@ DataStoreTLBMiss:
lwz r0,0(r2) /* get linux-style pte */
andc. r1,r1,r0 /* check access & ~permission */
bne- DataAddressInvalid /* return if access not permitted */
- /*
- * NOTE! We are assuming this is not an SMP system, otherwise
- * we would need to update the pte atomically with lwarx/stwcx.
- */
/* Convert linux-style PTE to low word of PPC-style PTE */
rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
li r1,0xe06 /* clear out reserved bits & PP msb */
@@ -665,63 +602,58 @@ BEGIN_MMU_FTR_SECTION
mfspr r2,SPRN_SRR1
rlwimi r2,r0,31-14,14,14
mtspr SPRN_SRR1,r2
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
+ mtcrf 0x80,r2
+ tlbld r3
+ rfi
+MMU_FTR_SECTION_ELSE
+ mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
+ mtcrf 0x80,r2
tlbld r3
rfi
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
#ifndef CONFIG_ALTIVEC
#define altivec_assist_exception unknown_exception
#endif
- EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
- EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
- EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
- EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
- EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
- EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
- EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
-
- . = 0x3000
-
-machine_check_tramp:
- EXC_XFER_STD(0x200, machine_check_exception)
+#ifndef CONFIG_TAU_INT
+#define TAUException unknown_async_exception
+#endif
-alignment_exception_tramp:
- EXC_XFER_STD(0x600, alignment_exception)
+ EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
+ EXCEPTION(0x1400, SMI, SMIException)
+ EXCEPTION(0x1500, Trap_15, unknown_exception)
+ EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
+ EXCEPTION(0x1700, Trap_17, TAUException)
+ EXCEPTION(0x1800, Trap_18, unknown_exception)
+ EXCEPTION(0x1900, Trap_19, unknown_exception)
+ EXCEPTION(0x1a00, Trap_1a, unknown_exception)
+ EXCEPTION(0x1b00, Trap_1b, unknown_exception)
+ EXCEPTION(0x1c00, Trap_1c, unknown_exception)
+ EXCEPTION(0x1d00, Trap_1d, unknown_exception)
+ EXCEPTION(0x1e00, Trap_1e, unknown_exception)
+ EXCEPTION(0x1f00, Trap_1f, unknown_exception)
+ EXCEPTION(0x2000, RunMode, RunModeException)
+ EXCEPTION(0x2100, Trap_21, unknown_exception)
+ EXCEPTION(0x2200, Trap_22, unknown_exception)
+ EXCEPTION(0x2300, Trap_23, unknown_exception)
+ EXCEPTION(0x2400, Trap_24, unknown_exception)
+ EXCEPTION(0x2500, Trap_25, unknown_exception)
+ EXCEPTION(0x2600, Trap_26, unknown_exception)
+ EXCEPTION(0x2700, Trap_27, unknown_exception)
+ EXCEPTION(0x2800, Trap_28, unknown_exception)
+ EXCEPTION(0x2900, Trap_29, unknown_exception)
+ EXCEPTION(0x2a00, Trap_2a, unknown_exception)
+ EXCEPTION(0x2b00, Trap_2b, unknown_exception)
+ EXCEPTION(0x2c00, Trap_2c, unknown_exception)
+ EXCEPTION(0x2d00, Trap_2d, unknown_exception)
+ EXCEPTION(0x2e00, Trap_2e, unknown_exception)
+ EXCEPTION(0x2f00, Trap_2f, unknown_exception)
-handle_page_fault_tramp_1:
-#ifdef CONFIG_VMAP_STACK
- EXCEPTION_PROLOG_2 handle_dar_dsisr=1
-#endif
- lwz r4, _DAR(r11)
- lwz r5, _DSISR(r11)
- /* fall through */
-handle_page_fault_tramp_2:
- EXC_XFER_LITE(0x300, handle_page_fault)
+ __HEAD
+ . = 0x3000
-#ifdef CONFIG_VMAP_STACK
+#ifdef CONFIG_PPC_BOOK3S_604
.macro save_regs_thread thread
stw r0, THR0(\thread)
stw r3, THR3(\thread)
@@ -785,45 +717,46 @@ fast_hash_page_return:
/* DSI */
mtcr r11
lwz r11, THR11(r10)
- mfspr r10, SPRN_SPRG_SCRATCH0
- SYNC
- RFI
+ mfspr r10, SPRN_SPRG_SCRATCH2
+ rfi
1: /* ISI */
mtcr r11
mfspr r11, SPRN_SPRG_SCRATCH1
mfspr r10, SPRN_SPRG_SCRATCH0
- SYNC
- RFI
+ rfi
+#endif /* CONFIG_PPC_BOOK3S_604 */
-stack_overflow:
+#ifdef CONFIG_VMAP_STACK
vmap_stack_overflow_exception
#endif
+ __HEAD
AltiVecUnavailable:
- EXCEPTION_PROLOG
+ EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
#ifdef CONFIG_ALTIVEC
beq 1f
bl load_up_altivec /* if from user, just load it up */
b fast_exception_return
#endif /* CONFIG_ALTIVEC */
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
+1: prepare_transfer_to_handler
+ bl altivec_unavailable_exception
+ b interrupt_return
+ __HEAD
PerformanceMonitor:
- EXCEPTION_PROLOG
- addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0xf00, performance_monitor_exception)
+ EXCEPTION_PROLOG 0xf00 PerformanceMonitor
+ prepare_transfer_to_handler
+ bl performance_monitor_exception
+ b interrupt_return
+ __HEAD
/*
* This code is jumped to from the startup code to copy
* the kernel image to physical address PHYSICAL_START.
*/
relocate_kernel:
- addis r9,r26,klimit@ha /* fetch klimit */
- lwz r25,klimit@l(r9)
- addis r25,r25,-KERNELBASE@h
lis r3,PHYSICAL_START@h /* Destination base address */
li r6,0 /* Destination offset */
li r5,0x4000 /* # bytes of memory to copy */
@@ -831,7 +764,8 @@ relocate_kernel:
addi r0,r3,4f@l /* jump to the address of 4f */
mtctr r0 /* in copy and do the rest. */
bctr /* jump to the copy */
-4: mr r5,r25
+4: lis r5,_end-KERNELBASE@h
+ ori r5,r5,_end-KERNELBASE@l
bl copy_and_flush /* copy the rest */
b turn_on_mmu
@@ -841,7 +775,7 @@ relocate_kernel:
* r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
* on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
*/
-_ENTRY(copy_and_flush)
+_GLOBAL(copy_and_flush)
addi r5,r5,-4
addi r6,r6,-4
4: li r0,L1_CACHE_BYTES/4
@@ -884,7 +818,6 @@ __secondary_start_pmac_0:
set to map the 0xf0000000 - 0xffffffff region */
mfmsr r0
rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
- SYNC
mtmsr r0
isync
@@ -896,10 +829,8 @@ __secondary_start:
lis r3,-KERNELBASE@h
mr r4,r24
bl call_setup_cpu /* Call setup_cpu for this CPU */
-#ifdef CONFIG_PPC_BOOK3S_32
lis r3,-KERNELBASE@h
bl init_idle_6xx
-#endif /* CONFIG_PPC_BOOK3S_32 */
/* get current's stack and current */
lis r2,secondary_current@ha
@@ -922,9 +853,12 @@ __secondary_start:
tophys(r4,r2)
addi r4,r4,THREAD /* phys address of our thread_struct */
mtspr SPRN_SPRG_THREAD,r4
+BEGIN_MMU_FTR_SECTION
lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
- mtspr SPRN_SPRG_PGDIR, r4
+ rlwinm r4, r4, 4, 0xffff01ff
+ mtspr SPRN_SDR1, r4
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
/* enable MMU and jump to start_secondary */
li r4,MSR_KERNEL
@@ -932,8 +866,7 @@ __secondary_start:
ori r3,r3,start_secondary@l
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
- SYNC
- RFI
+ rfi
#endif /* CONFIG_SMP */
#ifdef CONFIG_KVM_BOOK3S_HANDLER
@@ -941,21 +874,9 @@ __secondary_start:
#endif
/*
- * Those generic dummy functions are kept for CPUs not
- * included in CONFIG_PPC_BOOK3S_32
- */
-#if !defined(CONFIG_PPC_BOOK3S_32)
-_ENTRY(__save_cpu_setup)
- blr
-_ENTRY(__restore_cpu_setup)
- blr
-#endif /* !defined(CONFIG_PPC_BOOK3S_32) */
-
-/*
* Load stuff into the MMU. Intended to be called with
* IR=0 and DR=0.
*/
-#ifdef CONFIG_KASAN
early_hash_table:
sync /* Force all PTE updates to finish */
isync
@@ -967,7 +888,6 @@ early_hash_table:
ori r6, r6, 3 /* 256kB table */
mtspr SPRN_SDR1, r6
blr
-#endif
load_up_mmu:
sync /* Force all PTE updates to finish */
@@ -975,14 +895,15 @@ load_up_mmu:
tlbia /* Clear all TLB entries */
sync /* wait for tlbia/tlbie to finish */
TLBSYNC /* ... on all CPUs */
+BEGIN_MMU_FTR_SECTION
/* Load the SDR1 register (hash table base & size) */
lis r6,_SDR1@ha
tophys(r6,r6)
lwz r6,_SDR1@l(r6)
mtspr SPRN_SDR1,r6
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
-/* Load the BAT registers with the values set up by MMU_init.
- MMU_init takes care of whether we're on a 601 or not. */
+/* Load the BAT registers with the values set up by MMU_init. */
lis r3,BATS@ha
addi r3,r3,BATS@l
tophys(r3,r3)
@@ -998,15 +919,13 @@ BEGIN_MMU_FTR_SECTION
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
-load_segment_registers:
+_GLOBAL(load_segment_registers)
li r0, NUM_USER_SEGMENTS /* load up user segment register values */
mtctr r0 /* for context 0 */
- li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
#ifdef CONFIG_PPC_KUEP
- oris r3, r3, SR_NX@h /* Set Nx */
-#endif
-#ifdef CONFIG_PPC_KUAP
- oris r3, r3, SR_KS@h /* Set Ks */
+ lis r3, SR_NX@h /* Kp = 0, Ks = 0, VSID = 0 */
+#else
+ li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
#endif
li r4, 0
3: mtsrin r3, r4
@@ -1036,9 +955,12 @@ start_here:
tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */
mtspr SPRN_SPRG_THREAD,r4
+BEGIN_MMU_FTR_SECTION
lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
- mtspr SPRN_SPRG_PGDIR, r4
+ rlwinm r4, r4, 4, 0xffff01ff
+ mtspr SPRN_SDR1, r4
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
/* stack */
lis r1,init_thread_union@ha
@@ -1057,11 +979,7 @@ start_here:
bl machine_init
bl __save_cpu_setup
bl MMU_init
-#ifdef CONFIG_KASAN
-BEGIN_MMU_FTR_SECTION
bl MMU_init_hw_patch
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
-#endif
/*
* Go back to running unmapped so we can load up new values
@@ -1076,8 +994,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
.align 4
mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r3
- SYNC
- RFI
+ rfi
/* Load up the kernel context */
2: bl load_up_mmu
@@ -1088,7 +1005,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
*/
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
- stw r5, 0xf0(r0) /* This much match your Abatron config */
+ stw r5, 0xf0(0) /* This much match your Abatron config */
lis r6, swapper_pg_dir@h
ori r6, r6, swapper_pg_dir@l
tophys(r5, r5)
@@ -1101,54 +1018,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
ori r3,r3,start_kernel@l
mtspr SPRN_SRR0,r3
mtspr SPRN_SRR1,r4
- SYNC
- RFI
-
-/*
- * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
- *
- * Set up the segment registers for a new context.
- */
-_ENTRY(switch_mmu_context)
- lwz r3,MMCONTEXTID(r4)
- cmpwi cr0,r3,0
- blt- 4f
- mulli r3,r3,897 /* multiply context by skew factor */
- rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
-#ifdef CONFIG_PPC_KUEP
- oris r3, r3, SR_NX@h /* Set Nx */
-#endif
-#ifdef CONFIG_PPC_KUAP
- oris r3, r3, SR_KS@h /* Set Ks */
-#endif
- li r0,NUM_USER_SEGMENTS
- mtctr r0
-
- lwz r4, MM_PGD(r4)
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is passed as second argument.
- */
- lis r5, abatron_pteptrs@ha
- stw r4, abatron_pteptrs@l + 0x4(r5)
-#endif
- tophys(r4, r4)
- mtspr SPRN_SPRG_PGDIR, r4
- li r4,0
- isync
-3:
- mtsrin r3,r4
- addi r3,r3,0x111 /* next VSID */
- rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
- addis r4,r4,0x1000 /* address of next segment */
- bdnz 3b
- sync
- isync
- blr
-4: trap
- EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
- blr
-EXPORT_SYMBOL(switch_mmu_context)
+ rfi
/*
* An undocumented "feature" of 604e requires that the v bit
@@ -1161,7 +1031,6 @@ EXPORT_SYMBOL(switch_mmu_context)
clear_bats:
li r10,0
-#ifndef CONFIG_PPC_BOOK3S_601
mtspr SPRN_DBAT0U,r10
mtspr SPRN_DBAT0L,r10
mtspr SPRN_DBAT1U,r10
@@ -1170,7 +1039,6 @@ clear_bats:
mtspr SPRN_DBAT2L,r10
mtspr SPRN_DBAT3U,r10
mtspr SPRN_DBAT3L,r10
-#endif
mtspr SPRN_IBAT0U,r10
mtspr SPRN_IBAT0L,r10
mtspr SPRN_IBAT1U,r10
@@ -1205,7 +1073,7 @@ BEGIN_MMU_FTR_SECTION
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blr
-_ENTRY(update_bats)
+_GLOBAL(update_bats)
lis r4, 1f@h
ori r4, r4, 1f@l
tophys(r4, r4)
@@ -1219,8 +1087,7 @@ _ENTRY(update_bats)
.align 4
mtspr SPRN_SRR0, r4
mtspr SPRN_SRR1, r3
- SYNC
- RFI
+ rfi
1: bl clear_bats
lis r3, BATS@ha
addi r3, r3, BATS@l
@@ -1239,8 +1106,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
mtmsr r3
mtspr SPRN_SRR0, r7
mtspr SPRN_SRR1, r6
- SYNC
- RFI
+ rfi
flush_tlbs:
lis r10, 0x40
@@ -1261,28 +1127,11 @@ mmu_off:
mtspr SPRN_SRR0,r4
mtspr SPRN_SRR1,r3
sync
- RFI
+ rfi
-/*
- * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
- * (we keep one for debugging) and on others, we use one 256M BAT.
- */
+/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
initial_bats:
lis r11,PAGE_OFFSET@h
-#ifdef CONFIG_PPC_BOOK3S_601
- ori r11,r11,4 /* set up BAT registers for 601 */
- li r8,0x7f /* valid, block length = 8MB */
- mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
- mtspr SPRN_IBAT0L,r8 /* lower BAT register */
- addis r11,r11,0x800000@h
- addis r8,r8,0x800000@h
- mtspr SPRN_IBAT1U,r11
- mtspr SPRN_IBAT1L,r8
- addis r11,r11,0x800000@h
- addis r8,r8,0x800000@h
- mtspr SPRN_IBAT2U,r11
- mtspr SPRN_IBAT2L,r8
-#else
tophys(r8,r11)
#ifdef CONFIG_SMP
ori r8,r8,0x12 /* R/W access, M=1 */
@@ -1291,11 +1140,10 @@ initial_bats:
#endif /* CONFIG_SMP */
ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
- mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
+ mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
mtspr SPRN_IBAT0L,r8
mtspr SPRN_IBAT0U,r11
-#endif
isync
blr
@@ -1313,13 +1161,8 @@ setup_disp_bat:
beqlr
lwz r11,0(r8)
lwz r8,4(r8)
-#ifndef CONFIG_PPC_BOOK3S_601
mtspr SPRN_DBAT3L,r8
mtspr SPRN_DBAT3U,r11
-#else
- mtspr SPRN_IBAT3L,r8
- mtspr SPRN_IBAT3U,r11
-#endif
blr
#endif /* CONFIG_BOOTX_TEXT */
@@ -1358,61 +1201,4 @@ setup_usbgecko_bat:
blr
#endif
-#ifdef CONFIG_8260
-/* Jump into the system reset for the rom.
- * We first disable the MMU, and then jump to the ROM reset address.
- *
- * r3 is the board info structure, r4 is the location for starting.
- * I use this for building a small kernel that can load other kernels,
- * rather than trying to write or rely on a rom monitor that can tftp load.
- */
- .globl m8260_gorom
-m8260_gorom:
- mfmsr r0
- rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
- sync
- mtmsr r0
- sync
- mfspr r11, SPRN_HID0
- lis r10, 0
- ori r10,r10,HID0_ICE|HID0_DCE
- andc r11, r11, r10
- mtspr SPRN_HID0, r11
- isync
- li r5, MSR_ME|MSR_RI
- lis r6,2f@h
- addis r6,r6,-KERNELBASE@h
- ori r6,r6,2f@l
- mtspr SPRN_SRR0,r6
- mtspr SPRN_SRR1,r5
- isync
- sync
- rfi
-2:
- mtlr r4
- blr
-#endif
-
-
-/*
- * We put a few things here that have to be page-aligned.
- * This stuff goes at the beginning of the data segment,
- * which is page-aligned.
- */
.data
- .globl sdata
-sdata:
- .globl empty_zero_page
-empty_zero_page:
- .space 4096
-EXPORT_SYMBOL(empty_zero_page)
-
- .globl swapper_pg_dir
-swapper_pg_dir:
- .space PGD_TABLE_SIZE
-
-/* Room for two PTE pointers, usually the kernel and current user pointers
- * to their respective root page table.
- */
-abatron_pteptrs:
- .space 8
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 37fc84ed90e3..1cb9d0f7cbf2 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -34,7 +34,7 @@
*/
#define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
#define BOOKE_CLEAR_BTB(reg) \
START_BTB_FLUSH_SECTION \
BTB_FLUSH(reg) \
@@ -44,7 +44,7 @@ END_BTB_FLUSH_SECTION
#endif
-#define NORMAL_EXCEPTION_PROLOG(intno) \
+#define NORMAL_EXCEPTION_PROLOG(trapno, intno) \
mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
mfspr r10, SPRN_SPRG_THREAD; \
stw r11, THREAD_NORMSAVE(0)(r10); \
@@ -53,6 +53,8 @@ END_BTB_FLUSH_SECTION
mfspr r11, SPRN_SRR1; \
DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \
andi. r11, r11, MSR_PR; /* check whether user or kernel */\
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL); \
+ mtmsr r11; \
mr r11, r1; \
beq 1f; \
BOOKE_CLEAR_BTB(r11) \
@@ -76,12 +78,38 @@ END_BTB_FLUSH_SECTION
stw r1, 0(r11); \
mr r1, r11; \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
- stw r0,GPR0(r11); \
- lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
- addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
- stw r10, 8(r11); \
- SAVE_4GPRS(3, r11); \
- SAVE_2GPRS(7, r11)
+ COMMON_EXCEPTION_PROLOG_END trapno
+
+.macro COMMON_EXCEPTION_PROLOG_END trapno
+ stw r0,GPR0(r1)
+ lis r10, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
+ addi r10, r10, STACK_FRAME_REGS_MARKER@l
+ stw r10, 8(r1)
+ li r10, \trapno
+ stw r10,_TRAP(r1)
+ SAVE_GPRS(3, 8, r1)
+ SAVE_NVGPRS(r1)
+ stw r2,GPR2(r1)
+ stw r12,_NIP(r1)
+ stw r9,_MSR(r1)
+ mfctr r10
+ mfspr r2,SPRN_SPRG_THREAD
+ stw r10,_CTR(r1)
+ tovirt(r2, r2)
+ mfspr r10,SPRN_XER
+ addi r2, r2, -THREAD
+ stw r10,_XER(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+.endm
+
+.macro prepare_transfer_to_handler
+#ifdef CONFIG_PPC_E500
+ andi. r12,r9,MSR_PR
+ bne 777f
+ bl prepare_transfer_to_handler
+777:
+#endif
+.endm
.macro SYSCALL_ENTRY trapno intno srr1
mfspr r10, SPRN_SPRG_THREAD
@@ -99,83 +127,21 @@ BEGIN_FTR_SECTION
mr r12, r13
lwz r13, THREAD_NORMSAVE(2)(r10)
FTR_SECTION_ELSE
-#endif
mfcr r12
-#ifdef CONFIG_KVM_BOOKE_HV
ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
+#else
+ mfcr r12
#endif
+ mfspr r9, SPRN_SRR1
BOOKE_CLEAR_BTB(r11)
- lwz r11, TASK_STACK - THREAD(r10)
+ mr r11, r1
+ lwz r1, TASK_STACK - THREAD(r10)
rlwinm r12,r12,0,4,2 /* Clear SO bit in CR */
- ALLOC_STACK_FRAME(r11, THREAD_SIZE - INT_FRAME_SIZE)
- stw r12, _CCR(r11) /* save various registers */
- mflr r12
- stw r12,_LINK(r11)
+ ALLOC_STACK_FRAME(r1, THREAD_SIZE - INT_FRAME_SIZE)
+ stw r12, _CCR(r1)
mfspr r12,SPRN_SRR0
- stw r1, GPR1(r11)
- mfspr r9,SPRN_SRR1
- stw r1, 0(r11)
- mr r1, r11
- stw r12,_NIP(r11)
- rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
- lis r12, STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
- stw r2,GPR2(r11)
- addi r12, r12, STACK_FRAME_REGS_MARKER@l
- stw r9,_MSR(r11)
- li r2, \trapno + 1
- stw r12, 8(r11)
- stw r2,_TRAP(r11)
- SAVE_GPR(0, r11)
- SAVE_4GPRS(3, r11)
- SAVE_2GPRS(7, r11)
-
- addi r11,r1,STACK_FRAME_OVERHEAD
- addi r2,r10,-THREAD
- stw r11,PT_REGS(r10)
- /* Check to see if the dbcr0 register is set up to debug. Use the
- internal debug mode bit to do this. */
- lwz r12,THREAD_DBCR0(r10)
- andis. r12,r12,DBCR0_IDM@h
- ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
- beq+ 3f
- /* From user and task is ptraced - load up global dbcr0 */
- li r12,-1 /* clear all pending debug events */
- mtspr SPRN_DBSR,r12
- lis r11,global_dbcr0@ha
- tophys(r11,r11)
- addi r11,r11,global_dbcr0@l
-#ifdef CONFIG_SMP
- lwz r10, TASK_CPU(r2)
- slwi r10, r10, 3
- add r11, r11, r10
-#endif
- lwz r12,0(r11)
- mtspr SPRN_DBCR0,r12
- lwz r12,4(r11)
- addi r12,r12,-1
- stw r12,4(r11)
-
-3:
- tovirt(r2, r2) /* set r2 to current */
- lis r11, transfer_to_syscall@h
- ori r11, r11, transfer_to_syscall@l
-#ifdef CONFIG_TRACE_IRQFLAGS
- /*
- * If MSR is changing we need to keep interrupts disabled at this point
- * otherwise we might risk taking an interrupt before we tell lockdep
- * they are enabled.
- */
- lis r10, MSR_KERNEL@h
- ori r10, r10, MSR_KERNEL@l
- rlwimi r10, r9, 0, MSR_EE
-#else
- lis r10, (MSR_KERNEL | MSR_EE)@h
- ori r10, r10, (MSR_KERNEL | MSR_EE)@l
-#endif
- mtspr SPRN_SRR1,r10
- mtspr SPRN_SRR0,r11
- SYNC
- RFI /* jump to handler, enable MMU */
+ stw r12,_NIP(r1)
+ b transfer_to_syscall /* jump to handler */
.endm
/* To handle the additional exception priority levels on 40x and Book-E
@@ -183,7 +149,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
*
* On 40x critical is the only additional level
* On 44x/e500 we have critical and machine check
- * On e200 we have critical and debug (machine check occurs via critical)
*
* Additionally we reserve a SPRG for each priority level so we can free up a
* GPR to use as the base for indirect access to the exception stacks. This
@@ -199,23 +164,21 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
#define MC_STACK_BASE mcheckirq_ctx
#define CRIT_STACK_BASE critirq_ctx
-/* only on e500mc/e200 */
+/* only on e500mc */
#define DBG_STACK_BASE dbgirq_ctx
-#define EXC_LVL_FRAME_OVERHEAD (THREAD_SIZE - INT_FRAME_SIZE - EXC_LVL_SIZE)
-
#ifdef CONFIG_SMP
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
mfspr r8,SPRN_PIR; \
slwi r8,r8,2; \
addis r8,r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \
- addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
+ addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#else
#define BOOKE_LOAD_EXC_LEVEL_STACK(level) \
lis r8,level##_STACK_BASE@ha; \
lwz r8,level##_STACK_BASE@l(r8); \
- addi r8,r8,EXC_LVL_FRAME_OVERHEAD;
+ addi r8,r8,THREAD_SIZE - INT_FRAME_SIZE;
#endif
/*
@@ -226,7 +189,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
* registers as the normal prolog above. Instead we use a portion of the
* critical/machine check exception stack at low physical addresses.
*/
-#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \
+#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, trapno, intno, exc_level_srr0, exc_level_srr1) \
mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \
BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
stw r9,GPR9(r8); /* save various registers */\
@@ -238,9 +201,11 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
BOOKE_CLEAR_BTB(r10) \
andi. r11,r11,MSR_PR; \
+ LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)); \
+ mtmsr r11; \
mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
lwz r11, TASK_STACK - THREAD(r11); /* this thread's kernel stack */\
- addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
+ addi r11,r11,THREAD_SIZE - INT_FRAME_SIZE; /* allocate stack frame */\
beq 1f; \
/* COMING FROM USER MODE */ \
stw r9,_CCR(r11); /* save CR */\
@@ -267,16 +232,44 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
stw r1,0(r11); \
mr r1,r11; \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
- stw r0,GPR0(r11); \
- SAVE_4GPRS(3, r11); \
- SAVE_2GPRS(7, r11)
-
-#define CRITICAL_EXCEPTION_PROLOG(intno) \
- EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1)
-#define DEBUG_EXCEPTION_PROLOG \
- EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
-#define MCHECK_EXCEPTION_PROLOG \
- EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \
+ COMMON_EXCEPTION_PROLOG_END trapno
+
+#define SAVE_xSRR(xSRR) \
+ mfspr r0,SPRN_##xSRR##0; \
+ stw r0,_##xSRR##0(r1); \
+ mfspr r0,SPRN_##xSRR##1; \
+ stw r0,_##xSRR##1(r1)
+
+
+.macro SAVE_MMU_REGS
+#ifdef CONFIG_PPC_E500
+ mfspr r0,SPRN_MAS0
+ stw r0,MAS0(r1)
+ mfspr r0,SPRN_MAS1
+ stw r0,MAS1(r1)
+ mfspr r0,SPRN_MAS2
+ stw r0,MAS2(r1)
+ mfspr r0,SPRN_MAS3
+ stw r0,MAS3(r1)
+ mfspr r0,SPRN_MAS6
+ stw r0,MAS6(r1)
+#ifdef CONFIG_PHYS_64BIT
+ mfspr r0,SPRN_MAS7
+ stw r0,MAS7(r1)
+#endif /* CONFIG_PHYS_64BIT */
+#endif /* CONFIG_PPC_E500 */
+#ifdef CONFIG_44x
+ mfspr r0,SPRN_MMUCR
+ stw r0,MMUCR(r1)
+#endif
+.endm
+
+#define CRITICAL_EXCEPTION_PROLOG(trapno, intno) \
+ EXC_LEVEL_EXCEPTION_PROLOG(CRIT, trapno+2, intno, SPRN_CSRR0, SPRN_CSRR1)
+#define DEBUG_EXCEPTION_PROLOG(trapno) \
+ EXC_LEVEL_EXCEPTION_PROLOG(DBG, trapno+8, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
+#define MCHECK_EXCEPTION_PROLOG(trapno) \
+ EXC_LEVEL_EXCEPTION_PROLOG(MC, trapno+4, MACHINE_CHECK, \
SPRN_MCSRR0, SPRN_MCSRR1)
/*
@@ -303,44 +296,34 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_EMB_HV)
.align 5; \
label:
-#define EXCEPTION(n, intno, label, hdlr, xfer) \
+#define EXCEPTION(n, intno, label, hdlr) \
START_EXCEPTION(label); \
- NORMAL_EXCEPTION_PROLOG(intno); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- xfer(n, hdlr)
+ NORMAL_EXCEPTION_PROLOG(n, intno); \
+ prepare_transfer_to_handler; \
+ bl hdlr; \
+ b interrupt_return
#define CRITICAL_EXCEPTION(n, intno, label, hdlr) \
START_EXCEPTION(label); \
- CRITICAL_EXCEPTION_PROLOG(intno); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- crit_transfer_to_handler, ret_from_crit_exc)
+ CRITICAL_EXCEPTION_PROLOG(n, intno); \
+ SAVE_MMU_REGS; \
+ SAVE_xSRR(SRR); \
+ prepare_transfer_to_handler; \
+ bl hdlr; \
+ b ret_from_crit_exc
#define MCHECK_EXCEPTION(n, label, hdlr) \
START_EXCEPTION(label); \
- MCHECK_EXCEPTION_PROLOG; \
+ MCHECK_EXCEPTION_PROLOG(n); \
mfspr r5,SPRN_ESR; \
stw r5,_ESR(r11); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(hdlr, n+4, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
- mcheck_transfer_to_handler, ret_from_mcheck_exc)
-
-#define EXC_XFER_TEMPLATE(hdlr, trap, msr, tfer, ret) \
- li r10,trap; \
- stw r10,_TRAP(r11); \
- lis r10,msr@h; \
- ori r10,r10,msr@l; \
- bl tfer; \
- .long hdlr; \
- .long ret
-
-#define EXC_XFER_STD(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n, MSR_KERNEL, transfer_to_handler_full, \
- ret_from_except_full)
-
-#define EXC_XFER_LITE(n, hdlr) \
- EXC_XFER_TEMPLATE(hdlr, n+1, MSR_KERNEL, transfer_to_handler, \
- ret_from_except)
+ SAVE_xSRR(DSRR); \
+ SAVE_xSRR(CSRR); \
+ SAVE_MMU_REGS; \
+ SAVE_xSRR(SRR); \
+ prepare_transfer_to_handler; \
+ bl hdlr; \
+ b ret_from_mcheck_exc
/* Check for a single step debug exception while in an exception
* handler before state has been saved. This is to catch the case
@@ -357,7 +340,7 @@ label:
*/
#define DEBUG_DEBUG_EXCEPTION \
START_EXCEPTION(DebugDebug); \
- DEBUG_EXCEPTION_PROLOG; \
+ DEBUG_EXCEPTION_PROLOG(2000); \
\
/* \
* If there is a single step or branch-taken exception in an \
@@ -405,12 +388,17 @@ label:
\
/* continue normal handling for a debug exception... */ \
2: mfspr r4,SPRN_DBSR; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(DebugException, 0x2008, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), debug_transfer_to_handler, ret_from_debug_exc)
+ stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\
+ SAVE_xSRR(CSRR); \
+ SAVE_MMU_REGS; \
+ SAVE_xSRR(SRR); \
+ prepare_transfer_to_handler; \
+ bl DebugException; \
+ b ret_from_debug_exc
#define DEBUG_CRIT_EXCEPTION \
START_EXCEPTION(DebugCrit); \
- CRITICAL_EXCEPTION_PROLOG(DEBUG); \
+ CRITICAL_EXCEPTION_PROLOG(2000,DEBUG); \
\
/* \
* If there is a single step or branch-taken exception in an \
@@ -458,80 +446,81 @@ label:
\
/* continue normal handling for a critical exception... */ \
2: mfspr r4,SPRN_DBSR; \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_TEMPLATE(DebugException, 0x2002, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), crit_transfer_to_handler, ret_from_crit_exc)
+ stw r4,_ESR(r11); /* DebugException takes DBSR in _ESR */\
+ SAVE_MMU_REGS; \
+ SAVE_xSRR(SRR); \
+ prepare_transfer_to_handler; \
+ bl DebugException; \
+ b ret_from_crit_exc
#define DATA_STORAGE_EXCEPTION \
START_EXCEPTION(DataStorage) \
- NORMAL_EXCEPTION_PROLOG(DATA_STORAGE); \
+ NORMAL_EXCEPTION_PROLOG(0x300, DATA_STORAGE); \
mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
stw r5,_ESR(r11); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
stw r4, _DEAR(r11); \
- EXC_XFER_LITE(0x0300, handle_page_fault)
+ prepare_transfer_to_handler; \
+ bl do_page_fault; \
+ b interrupt_return
+/*
+ * Instruction TLB Error interrupt handlers may call InstructionStorage
+ * directly without clearing ESR, so the ESR at this point may be left over
+ * from a prior interrupt.
+ *
+ * In any case, do_page_fault for BOOK3E does not use ESR and always expects
+ * dsisr to be 0. ESR_DST from a prior store in particular would confuse fault
+ * handling.
+ */
#define INSTRUCTION_STORAGE_EXCEPTION \
START_EXCEPTION(InstructionStorage) \
- NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \
- mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
+ NORMAL_EXCEPTION_PROLOG(0x400, INST_STORAGE); \
+ li r5,0; /* Store 0 in regs->esr (dsisr) */ \
stw r5,_ESR(r11); \
- mr r4,r12; /* Pass SRR0 as arg2 */ \
- stw r4, _DEAR(r11); \
- li r5,0; /* Pass zero as arg3 */ \
- EXC_XFER_LITE(0x0400, handle_page_fault)
+ stw r12, _DEAR(r11); /* Set regs->dear (dar) to SRR0 */ \
+ prepare_transfer_to_handler; \
+ bl do_page_fault; \
+ b interrupt_return
#define ALIGNMENT_EXCEPTION \
START_EXCEPTION(Alignment) \
- NORMAL_EXCEPTION_PROLOG(ALIGNMENT); \
+ NORMAL_EXCEPTION_PROLOG(0x600, ALIGNMENT); \
mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
stw r4,_DEAR(r11); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_STD(0x0600, alignment_exception)
+ prepare_transfer_to_handler; \
+ bl alignment_exception; \
+ REST_NVGPRS(r1); \
+ b interrupt_return
#define PROGRAM_EXCEPTION \
START_EXCEPTION(Program) \
- NORMAL_EXCEPTION_PROLOG(PROGRAM); \
+ NORMAL_EXCEPTION_PROLOG(0x700, PROGRAM); \
mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
stw r4,_ESR(r11); \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_STD(0x0700, program_check_exception)
+ prepare_transfer_to_handler; \
+ bl program_check_exception; \
+ REST_NVGPRS(r1); \
+ b interrupt_return
#define DECREMENTER_EXCEPTION \
START_EXCEPTION(Decrementer) \
- NORMAL_EXCEPTION_PROLOG(DECREMENTER); \
+ NORMAL_EXCEPTION_PROLOG(0x900, DECREMENTER); \
lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \
mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \
- addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_LITE(0x0900, timer_interrupt)
+ prepare_transfer_to_handler; \
+ bl timer_interrupt; \
+ b interrupt_return
#define FP_UNAVAILABLE_EXCEPTION \
START_EXCEPTION(FloatingPointUnavailable) \
- NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL); \
+ NORMAL_EXCEPTION_PROLOG(0x800, FP_UNAVAIL); \
beq 1f; \
bl load_up_fpu; /* if from user, just load it up */ \
b fast_exception_return; \
-1: addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_STD(0x800, kernel_fp_unavailable_exception)
-
-#else /* __ASSEMBLY__ */
-struct exception_regs {
- unsigned long mas0;
- unsigned long mas1;
- unsigned long mas2;
- unsigned long mas3;
- unsigned long mas6;
- unsigned long mas7;
- unsigned long srr0;
- unsigned long srr1;
- unsigned long csrr0;
- unsigned long csrr1;
- unsigned long dsrr0;
- unsigned long dsrr1;
- unsigned long saved_ksp_limit;
-};
-
-/* ensure this structure is always sized to a multiple of the stack alignment */
-#define STACK_EXC_LVL_FRAME_SIZE _ALIGN_UP(sizeof (struct exception_regs), 16)
+1: prepare_transfer_to_handler; \
+ bl kernel_fp_unavailable_exception; \
+ b interrupt_return
#endif /* __ASSEMBLY__ */
#endif /* __HEAD_BOOKE_H__ */
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index d0854320bb50..8db1a15d7acb 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
+#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/init.h>
@@ -22,15 +23,15 @@
#include <asm/processor.h>
#include <asm/sstep.h>
#include <asm/debug.h>
-#include <asm/debugfs.h>
#include <asm/hvcall.h>
+#include <asm/inst.h>
#include <linux/uaccess.h>
/*
* Stores the breakpoints currently in use on each breakpoint address
* register for every cpu
*/
-static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
+static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
/*
* Returns total number of data or instruction breakpoints available.
@@ -38,10 +39,21 @@ static DEFINE_PER_CPU(struct perf_event *, bp_per_reg);
int hw_breakpoint_slots(int type)
{
if (type == TYPE_DATA)
- return HBP_NUM;
+ return nr_wp_slots();
return 0; /* no instruction breakpoints available */
}
+static bool single_step_pending(void)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (current->thread.last_hit_ubp[i])
+ return true;
+ }
+ return false;
+}
+
/*
* Install a perf counter breakpoint.
*
@@ -54,16 +66,26 @@ int hw_breakpoint_slots(int type)
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
+ struct perf_event **slot;
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ slot = this_cpu_ptr(&bp_per_reg[i]);
+ if (!*slot) {
+ *slot = bp;
+ break;
+ }
+ }
- *slot = bp;
+ if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
+ return -EBUSY;
/*
* Do not install DABR values if the instruction must be single-stepped.
* If so, DABR will be populated in single_step_dabr_instruction().
*/
- if (current->thread.last_hit_ubp != bp)
- __set_breakpoint(info);
+ if (!single_step_pending())
+ __set_breakpoint(i, info);
return 0;
}
@@ -79,15 +101,274 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
- struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
+ struct arch_hw_breakpoint null_brk = {0};
+ struct perf_event **slot;
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ slot = this_cpu_ptr(&bp_per_reg[i]);
+ if (*slot == bp) {
+ *slot = NULL;
+ break;
+ }
+ }
- if (*slot != bp) {
- WARN_ONCE(1, "Can't find the breakpoint");
+ if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
return;
+
+ __set_breakpoint(i, &null_brk);
+}
+
+static bool is_ptrace_bp(struct perf_event *bp)
+{
+ return bp->overflow_handler == ptrace_triggered;
+}
+
+struct breakpoint {
+ struct list_head list;
+ struct perf_event *bp;
+ bool ptrace_bp;
+};
+
+/*
+ * While kernel/events/hw_breakpoint.c does its own synchronization, we cannot
+ * rely on it safely synchronizing internals here; however, we can rely on it
+ * not requesting more breakpoints than available.
+ */
+static DEFINE_SPINLOCK(cpu_bps_lock);
+static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
+static DEFINE_SPINLOCK(task_bps_lock);
+static LIST_HEAD(task_bps);
+
+static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
+{
+ struct breakpoint *tmp;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ERR_PTR(-ENOMEM);
+ tmp->bp = bp;
+ tmp->ptrace_bp = is_ptrace_bp(bp);
+ return tmp;
+}
+
+static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
+{
+ __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
+
+ bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE);
+ bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE);
+ bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE);
+ bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE);
+
+ return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr);
+}
+
+static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
+{
+ return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
+}
+
+static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
+{
+ return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
+}
+
+static int task_bps_add(struct perf_event *bp)
+{
+ struct breakpoint *tmp;
+
+ tmp = alloc_breakpoint(bp);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ spin_lock(&task_bps_lock);
+ list_add(&tmp->list, &task_bps);
+ spin_unlock(&task_bps_lock);
+ return 0;
+}
+
+static void task_bps_remove(struct perf_event *bp)
+{
+ struct list_head *pos, *q;
+
+ spin_lock(&task_bps_lock);
+ list_for_each_safe(pos, q, &task_bps) {
+ struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
+
+ if (tmp->bp == bp) {
+ list_del(&tmp->list);
+ kfree(tmp);
+ break;
+ }
}
+ spin_unlock(&task_bps_lock);
+}
- *slot = NULL;
- hw_breakpoint_disable();
+/*
+ * If any task has breakpoint from alternate infrastructure,
+ * return true. Otherwise return false.
+ */
+static bool all_task_bps_check(struct perf_event *bp)
+{
+ struct breakpoint *tmp;
+ bool ret = false;
+
+ spin_lock(&task_bps_lock);
+ list_for_each_entry(tmp, &task_bps, list) {
+ if (!can_co_exist(tmp, bp)) {
+ ret = true;
+ break;
+ }
+ }
+ spin_unlock(&task_bps_lock);
+ return ret;
+}
+
+/*
+ * If same task has breakpoint from alternate infrastructure,
+ * return true. Otherwise return false.
+ */
+static bool same_task_bps_check(struct perf_event *bp)
+{
+ struct breakpoint *tmp;
+ bool ret = false;
+
+ spin_lock(&task_bps_lock);
+ list_for_each_entry(tmp, &task_bps, list) {
+ if (tmp->bp->hw.target == bp->hw.target &&
+ !can_co_exist(tmp, bp)) {
+ ret = true;
+ break;
+ }
+ }
+ spin_unlock(&task_bps_lock);
+ return ret;
+}
+
+static int cpu_bps_add(struct perf_event *bp)
+{
+ struct breakpoint **cpu_bp;
+ struct breakpoint *tmp;
+ int i = 0;
+
+ tmp = alloc_breakpoint(bp);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ spin_lock(&cpu_bps_lock);
+ cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!cpu_bp[i]) {
+ cpu_bp[i] = tmp;
+ break;
+ }
+ }
+ spin_unlock(&cpu_bps_lock);
+ return 0;
+}
+
+static void cpu_bps_remove(struct perf_event *bp)
+{
+ struct breakpoint **cpu_bp;
+ int i = 0;
+
+ spin_lock(&cpu_bps_lock);
+ cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!cpu_bp[i])
+ continue;
+
+ if (cpu_bp[i]->bp == bp) {
+ kfree(cpu_bp[i]);
+ cpu_bp[i] = NULL;
+ break;
+ }
+ }
+ spin_unlock(&cpu_bps_lock);
+}
+
+static bool cpu_bps_check(int cpu, struct perf_event *bp)
+{
+ struct breakpoint **cpu_bp;
+ bool ret = false;
+ int i;
+
+ spin_lock(&cpu_bps_lock);
+ cpu_bp = per_cpu_ptr(cpu_bps, cpu);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp)) {
+ ret = true;
+ break;
+ }
+ }
+ spin_unlock(&cpu_bps_lock);
+ return ret;
+}
+
+static bool all_cpu_bps_check(struct perf_event *bp)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu_bps_check(cpu, bp))
+ return true;
+ }
+ return false;
+}
+
+int arch_reserve_bp_slot(struct perf_event *bp)
+{
+ int ret;
+
+ /* ptrace breakpoint */
+ if (is_ptrace_bp(bp)) {
+ if (all_cpu_bps_check(bp))
+ return -ENOSPC;
+
+ if (same_task_bps_check(bp))
+ return -ENOSPC;
+
+ return task_bps_add(bp);
+ }
+
+ /* perf breakpoint */
+ if (is_kernel_addr(bp->attr.bp_addr))
+ return 0;
+
+ if (bp->hw.target && bp->cpu == -1) {
+ if (same_task_bps_check(bp))
+ return -ENOSPC;
+
+ return task_bps_add(bp);
+ } else if (!bp->hw.target && bp->cpu != -1) {
+ if (all_task_bps_check(bp))
+ return -ENOSPC;
+
+ return cpu_bps_add(bp);
+ }
+
+ if (same_task_bps_check(bp))
+ return -ENOSPC;
+
+ ret = cpu_bps_add(bp);
+ if (ret)
+ return ret;
+ ret = task_bps_add(bp);
+ if (ret)
+ cpu_bps_remove(bp);
+
+ return ret;
+}
+
+void arch_release_bp_slot(struct perf_event *bp)
+{
+ if (!is_kernel_addr(bp->attr.bp_addr)) {
+ if (bp->hw.target)
+ task_bps_remove(bp);
+ if (bp->cpu != -1)
+ cpu_bps_remove(bp);
+ }
}
/*
@@ -102,8 +383,14 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
* restoration variables to prevent dangling pointers.
* FIXME, this should not be using bp->ctx at all! Sayeth peterz.
*/
- if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
- bp->ctx->task->thread.last_hit_ubp = NULL;
+ if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) {
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (bp->ctx->task->thread.last_hit_ubp[i] == bp)
+ bp->ctx->task->thread.last_hit_ubp[i] = NULL;
+ }
+ }
}
/*
@@ -140,10 +427,10 @@ int arch_bp_generic_fields(int type, int *gen_bp_type)
* <---8 bytes--->
*
* In this case, we should configure hw as:
- * start_addr = address & ~HW_BREAKPOINT_ALIGN
+ * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1)
* len = 16 bytes
*
- * @start_addr and @end_addr are inclusive.
+ * @start_addr is inclusive but @end_addr is exclusive.
*/
static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
{
@@ -151,14 +438,15 @@ static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
u16 hw_len;
unsigned long start_addr, end_addr;
- start_addr = hw->address & ~HW_BREAKPOINT_ALIGN;
- end_addr = (hw->address + hw->len - 1) | HW_BREAKPOINT_ALIGN;
- hw_len = end_addr - start_addr + 1;
+ start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
+ end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
+ hw_len = end_addr - start_addr;
if (dawr_enabled()) {
max_len = DAWR_MAX_LEN;
- /* DAWR region can't cross 512 bytes boundary */
- if ((start_addr >> 9) != (end_addr >> 9))
+ /* DAWR region can't cross 512 bytes boundary on p10 predecessors */
+ if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
+ (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)))
return -EINVAL;
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
/* 8xx can setup a range without limitation */
@@ -215,90 +503,150 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
{
struct arch_hw_breakpoint *info;
+ int i;
- if (likely(!tsk->thread.last_hit_ubp))
- return;
-
- info = counter_arch_bp(tsk->thread.last_hit_ubp);
- regs->msr &= ~MSR_SE;
- __set_breakpoint(info);
- tsk->thread.last_hit_ubp = NULL;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (unlikely(tsk->thread.last_hit_ubp[i]))
+ goto reset;
+ }
+ return;
+
+reset:
+ regs_set_return_msr(regs, regs->msr & ~MSR_SE);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
+ __set_breakpoint(i, info);
+ tsk->thread.last_hit_ubp[i] = NULL;
+ }
}
-static bool dar_within_range(unsigned long dar, struct arch_hw_breakpoint *info)
+static bool is_larx_stcx_instr(int type)
{
- return ((info->address <= dar) && (dar - info->address < info->len));
+ return type == LARX || type == STCX;
}
-static bool
-dar_range_overlaps(unsigned long dar, int size, struct arch_hw_breakpoint *info)
+static bool is_octword_vsx_instr(int type, int size)
{
- return ((dar <= info->address + info->len - 1) &&
- (dar + size - 1 >= info->address));
+ return ((type == LOAD_VSX || type == STORE_VSX) && size == 32);
}
/*
- * Handle debug exception notifications.
+ * We've failed in reliably handling the hw-breakpoint. Unregister
+ * it and throw a warning message to let the user know about it.
*/
-static bool stepping_handler(struct pt_regs *regs, struct perf_event *bp,
- struct arch_hw_breakpoint *info)
+static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info)
{
- unsigned int instr = 0;
- int ret, type, size;
- struct instruction_op op;
- unsigned long addr = info->address;
-
- if (__get_user_inatomic(instr, (unsigned int *)regs->nip))
- goto fail;
-
- ret = analyse_instr(&op, regs, instr);
- type = GETTYPE(op.type);
- size = GETSIZE(op.type);
+ WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
+ info->address);
+ perf_event_disable_inatomic(bp);
+}
- if (!ret && (type == LARX || type == STCX)) {
- printk_ratelimited("Breakpoint hit on instruction that can't be emulated."
- " Breakpoint at 0x%lx will be disabled.\n", addr);
- goto disable;
- }
+static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info)
+{
+ printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
+ info->address);
+ perf_event_disable_inatomic(bp);
+}
- /*
- * If it's extraneous event, we still need to emulate/single-
- * step the instruction, but we don't generate an event.
- */
- if (size && !dar_range_overlaps(regs->dar, size, info))
- info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
+ struct arch_hw_breakpoint **info, int *hit,
+ ppc_inst_t instr)
+{
+ int i;
+ int stepped;
/* Do not emulate user-space instructions, instead single-step them */
if (user_mode(regs)) {
- current->thread.last_hit_ubp = bp;
- regs->msr |= MSR_SE;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!hit[i])
+ continue;
+ current->thread.last_hit_ubp[i] = bp[i];
+ info[i] = NULL;
+ }
+ regs_set_return_msr(regs, regs->msr | MSR_SE);
return false;
}
- if (!emulate_step(regs, instr))
- goto fail;
-
+ stepped = emulate_step(regs, instr);
+ if (!stepped) {
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!hit[i])
+ continue;
+ handler_error(bp[i], info[i]);
+ info[i] = NULL;
+ }
+ return false;
+ }
return true;
+}
+
+static void handle_p10dd1_spurious_exception(struct arch_hw_breakpoint **info,
+ int *hit, unsigned long ea)
+{
+ int i;
+ unsigned long hw_end_addr;
-fail:
/*
- * We've failed in reliably handling the hw-breakpoint. Unregister
- * it and throw a warning message to let the user know about it.
+ * Handle spurious exception only when any bp_per_reg is set.
+ * Otherwise this might be created by xmon and not actually a
+ * spurious exception.
*/
- WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
- "0x%lx will be disabled.", addr);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!info[i])
+ continue;
+
+ hw_end_addr = ALIGN(info[i]->address + info[i]->len, HW_BREAKPOINT_SIZE);
+
+ /*
+ * Ending address of DAWR range is less than starting
+ * address of op.
+ */
+ if ((hw_end_addr - 1) >= ea)
+ continue;
+
+ /*
+ * Those addresses need to be in the same or in two
+ * consecutive 512B blocks;
+ */
+ if (((hw_end_addr - 1) >> 10) != (ea >> 10))
+ continue;
+
+ /*
+ * 'op address + 64B' generates an address that has a
+ * carry into bit 52 (crosses 2K boundary).
+ */
+ if ((ea & 0x800) == ((ea + 64) & 0x800))
+ continue;
-disable:
- perf_event_disable_inatomic(bp);
- return false;
+ break;
+ }
+
+ if (i == nr_wp_slots())
+ return;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (info[i]) {
+ hit[i] = 1;
+ info[i]->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ }
+ }
}
int hw_breakpoint_handler(struct die_args *args)
{
+ bool err = false;
int rc = NOTIFY_STOP;
- struct perf_event *bp;
+ struct perf_event *bp[HBP_NUM_MAX] = { NULL };
struct pt_regs *regs = args->regs;
- struct arch_hw_breakpoint *info;
+ struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL };
+ int i;
+ int hit[HBP_NUM_MAX] = {0};
+ int nr_hit = 0;
+ bool ptrace_bp = false;
+ ppc_inst_t instr = ppc_inst(0);
+ int type = 0;
+ int size = 0;
+ unsigned long ea;
/* Disable breakpoints during exception handling */
hw_breakpoint_disable();
@@ -311,12 +659,46 @@ int hw_breakpoint_handler(struct die_args *args)
*/
rcu_read_lock();
- bp = __this_cpu_read(bp_per_reg);
- if (!bp) {
- rc = NOTIFY_DONE;
- goto out;
+ if (!IS_ENABLED(CONFIG_PPC_8xx))
+ wp_get_instr_detail(regs, &instr, &type, &size, &ea);
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ bp[i] = __this_cpu_read(bp_per_reg[i]);
+ if (!bp[i])
+ continue;
+
+ info[i] = counter_arch_bp(bp[i]);
+ info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
+
+ if (wp_check_constraints(regs, instr, ea, type, size, info[i])) {
+ if (!IS_ENABLED(CONFIG_PPC_8xx) &&
+ ppc_inst_equal(instr, ppc_inst(0))) {
+ handler_error(bp[i], info[i]);
+ info[i] = NULL;
+ err = 1;
+ continue;
+ }
+
+ if (is_ptrace_bp(bp[i]))
+ ptrace_bp = true;
+ hit[i] = 1;
+ nr_hit++;
+ }
+ }
+
+ if (err)
+ goto reset;
+
+ if (!nr_hit) {
+ /* Workaround for Power10 DD1 */
+ if (!IS_ENABLED(CONFIG_PPC_8xx) && mfspr(SPRN_PVR) == 0x800100 &&
+ is_octword_vsx_instr(type, size)) {
+ handle_p10dd1_spurious_exception(info, hit, ea);
+ } else {
+ rc = NOTIFY_DONE;
+ goto out;
+ }
}
- info = counter_arch_bp(bp);
/*
* Return early after invoking user-callback function without restoring
@@ -324,29 +706,50 @@ int hw_breakpoint_handler(struct die_args *args)
* one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
* generated in do_dabr().
*/
- if (bp->overflow_handler == ptrace_triggered) {
- perf_bp_event(bp, regs);
+ if (ptrace_bp) {
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!hit[i])
+ continue;
+ perf_bp_event(bp[i], regs);
+ info[i] = NULL;
+ }
rc = NOTIFY_DONE;
- goto out;
+ goto reset;
}
- info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
- if (IS_ENABLED(CONFIG_PPC_8xx)) {
- if (!dar_within_range(regs->dar, info))
- info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
- } else {
- if (!stepping_handler(regs, bp, info))
- goto out;
+ if (!IS_ENABLED(CONFIG_PPC_8xx)) {
+ if (is_larx_stcx_instr(type)) {
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!hit[i])
+ continue;
+ larx_stcx_err(bp[i], info[i]);
+ info[i] = NULL;
+ }
+ goto reset;
+ }
+
+ if (!stepping_handler(regs, bp, info, hit, instr))
+ goto reset;
}
/*
* As a policy, the callback is invoked in a 'trigger-after-execute'
* fashion
*/
- if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
- perf_bp_event(bp, regs);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!hit[i])
+ continue;
+ if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
+ perf_bp_event(bp[i], regs);
+ }
+
+reset:
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!info[i])
+ continue;
+ __set_breakpoint(i, info[i]);
+ }
- __set_breakpoint(info);
out:
rcu_read_unlock();
return rc;
@@ -361,26 +764,43 @@ static int single_step_dabr_instruction(struct die_args *args)
struct pt_regs *regs = args->regs;
struct perf_event *bp = NULL;
struct arch_hw_breakpoint *info;
+ int i;
+ bool found = false;
- bp = current->thread.last_hit_ubp;
/*
* Check if we are single-stepping as a result of a
* previous HW Breakpoint exception
*/
- if (!bp)
+ for (i = 0; i < nr_wp_slots(); i++) {
+ bp = current->thread.last_hit_ubp[i];
+
+ if (!bp)
+ continue;
+
+ found = true;
+ info = counter_arch_bp(bp);
+
+ /*
+ * We shall invoke the user-defined callback function in the
+ * single stepping handler to confirm to 'trigger-after-execute'
+ * semantics
+ */
+ if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
+ perf_bp_event(bp, regs);
+ current->thread.last_hit_ubp[i] = NULL;
+ }
+
+ if (!found)
return NOTIFY_DONE;
- info = counter_arch_bp(bp);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ bp = __this_cpu_read(bp_per_reg[i]);
+ if (!bp)
+ continue;
- /*
- * We shall invoke the user-defined callback function in the single
- * stepping handler to confirm to 'trigger-after-execute' semantics
- */
- if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
- perf_bp_event(bp, regs);
-
- __set_breakpoint(info);
- current->thread.last_hit_ubp = NULL;
+ info = counter_arch_bp(bp);
+ __set_breakpoint(i, info);
+ }
/*
* If the process was being single-stepped by ptrace, let the
@@ -419,13 +839,32 @@ NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
+ int i;
struct thread_struct *t = &tsk->thread;
- unregister_hw_breakpoint(t->ptrace_bps[0]);
- t->ptrace_bps[0] = NULL;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ unregister_hw_breakpoint(t->ptrace_bps[i]);
+ t->ptrace_bps[i] = NULL;
+ }
}
void hw_breakpoint_pmu_read(struct perf_event *bp)
{
/* TODO */
}
+
+void ptrace_triggered(struct perf_event *bp,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event_attr attr;
+
+ /*
+ * Disable the breakpoint request here since ptrace has defined a
+ * one-shot behaviour for breakpoint exceptions in PPC64.
+ * The SIGTRAP signal is generated automatically for us in do_dabr().
+ * We don't have to do anything about that here
+ */
+ attr = bp->attr;
+ attr.disabled = true;
+ modify_user_hw_breakpoint(bp, &attr);
+}
diff --git a/arch/powerpc/kernel/hw_breakpoint_constraints.c b/arch/powerpc/kernel/hw_breakpoint_constraints.c
new file mode 100644
index 000000000000..a74623025f3a
--- /dev/null
+++ b/arch/powerpc/kernel/hw_breakpoint_constraints.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0+
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/sstep.h>
+#include <asm/cache.h>
+
+static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info)
+{
+ return ((info->address <= dar) && (dar - info->address < info->len));
+}
+
+static bool ea_user_range_overlaps(unsigned long ea, int size,
+ struct arch_hw_breakpoint *info)
+{
+ return ((ea < info->address + info->len) &&
+ (ea + size > info->address));
+}
+
+static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info)
+{
+ unsigned long hw_start_addr, hw_end_addr;
+
+ hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
+ hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
+
+ return ((hw_start_addr <= dar) && (hw_end_addr > dar));
+}
+
+static bool ea_hw_range_overlaps(unsigned long ea, int size,
+ struct arch_hw_breakpoint *info)
+{
+ unsigned long hw_start_addr, hw_end_addr;
+ unsigned long align_size = HW_BREAKPOINT_SIZE;
+
+ /*
+ * On p10 predecessors, quadword is handle differently then
+ * other instructions.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_31) && size == 16)
+ align_size = HW_BREAKPOINT_SIZE_QUADWORD;
+
+ hw_start_addr = ALIGN_DOWN(info->address, align_size);
+ hw_end_addr = ALIGN(info->address + info->len, align_size);
+
+ return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
+}
+
+/*
+ * If hw has multiple DAWR registers, we also need to check all
+ * dawrx constraint bits to confirm this is _really_ a valid event.
+ * If type is UNKNOWN, but privilege level matches, consider it as
+ * a positive match.
+ */
+static bool check_dawrx_constraints(struct pt_regs *regs, int type,
+ struct arch_hw_breakpoint *info)
+{
+ if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
+ return false;
+
+ /*
+ * The Cache Management instructions other than dcbz never
+ * cause a match. i.e. if type is CACHEOP, the instruction
+ * is dcbz, and dcbz is treated as Store.
+ */
+ if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE))
+ return false;
+
+ if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
+ return false;
+
+ if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER))
+ return false;
+
+ return true;
+}
+
+/*
+ * Return true if the event is valid wrt dawr configuration,
+ * including extraneous exception. Otherwise return false.
+ */
+bool wp_check_constraints(struct pt_regs *regs, ppc_inst_t instr,
+ unsigned long ea, int type, int size,
+ struct arch_hw_breakpoint *info)
+{
+ bool in_user_range = dar_in_user_range(regs->dar, info);
+ bool dawrx_constraints;
+
+ /*
+ * 8xx supports only one breakpoint and thus we can
+ * unconditionally return true.
+ */
+ if (IS_ENABLED(CONFIG_PPC_8xx)) {
+ if (!in_user_range)
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ return true;
+ }
+
+ if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) {
+ if (cpu_has_feature(CPU_FTR_ARCH_31) &&
+ !dar_in_hw_range(regs->dar, info))
+ return false;
+
+ return true;
+ }
+
+ dawrx_constraints = check_dawrx_constraints(regs, type, info);
+
+ if (type == UNKNOWN) {
+ if (cpu_has_feature(CPU_FTR_ARCH_31) &&
+ !dar_in_hw_range(regs->dar, info))
+ return false;
+
+ return dawrx_constraints;
+ }
+
+ if (ea_user_range_overlaps(ea, size, info))
+ return dawrx_constraints;
+
+ if (ea_hw_range_overlaps(ea, size, info)) {
+ if (dawrx_constraints) {
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ return true;
+ }
+ }
+ return false;
+}
+
+void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
+ int *type, int *size, unsigned long *ea)
+{
+ struct instruction_op op;
+
+ if (__get_user_instr(*instr, (void __user *)regs->nip))
+ return;
+
+ analyse_instr(&op, regs, *instr);
+ *type = GETTYPE(op.type);
+ *ea = op.ea;
+
+ if (!(regs->msr & MSR_64BIT))
+ *ea &= 0xffffffffUL;
+
+
+ *size = GETSIZE(op.type);
+ if (*type == CACHEOP) {
+ *size = l1_dcache_bytes();
+ *ea &= ~(*size - 1);
+ } else if (*type == LOAD_VMX || *type == STORE_VMX) {
+ *ea &= ~(*size - 1);
+ }
+}
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 422e31d2f5a2..77cd4c5a2d63 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -37,18 +37,10 @@ static int __init powersave_off(char *arg)
{
ppc_md.power_save = NULL;
cpuidle_disable = IDLE_POWERSAVE_OFF;
- return 0;
+ return 1;
}
__setup("powersave=off", powersave_off);
-#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
-{
- sched_preempt_enable_no_resched();
- cpu_die();
-}
-#endif
-
void arch_cpu_idle(void)
{
ppc64_runlatch_off();
@@ -60,9 +52,9 @@ void arch_cpu_idle(void)
* interrupts enabled, some don't.
*/
if (irqs_disabled())
- local_irq_enable();
+ raw_local_irq_enable();
} else {
- local_irq_enable();
+ raw_local_irq_enable();
/*
* Go into low thread priority and possibly
* low power mode.
@@ -90,7 +82,7 @@ void power4_idle(void)
return;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- asm volatile("DSSALL ; sync" ::: "memory");
+ asm volatile(PPC_DSSALL " ; sync" ::: "memory");
power4_idle_nap();
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_64e.S
index cc008de58b05..0fc680e03dee 100644
--- a/arch/powerpc/kernel/idle_book3e.S
+++ b/arch/powerpc/kernel/idle_64e.S
@@ -2,7 +2,7 @@
/*
* Copyright 2010 IBM Corp, Benjamin Herrenschmidt <benh@kernel.crashing.org>
*
- * Generic idle routine for Book3E processors
+ * Generic idle routine for 64 bits e500 processors
*/
#include <linux/threads.h>
@@ -16,8 +16,6 @@
#include <asm/hw_irq.h>
/* 64-bit version only for now */
-#ifdef CONFIG_PPC64
-
.macro BOOK3E_IDLE name loop
_GLOBAL(\name)
/* Save LR for later */
@@ -77,7 +75,7 @@ _GLOBAL(\name)
.macro BOOK3E_IDLE_LOOP
1:
- PPC_WAIT(0)
+ PPC_WAIT_v203
b 1b
.endm
@@ -98,6 +96,4 @@ epapr_ev_idle_start:
BOOK3E_IDLE epapr_ev_idle EPAPR_EV_IDLE_LOOP
-BOOK3E_IDLE book3e_idle BOOK3E_IDLE_LOOP
-
-#endif /* CONFIG_PPC64 */
+BOOK3E_IDLE e500_idle BOOK3E_IDLE_LOOP
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 433d97bea1f3..3c097356366b 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -129,7 +129,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
mtspr SPRN_HID0,r4
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */
@@ -145,9 +145,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
/*
* Return from NAP/DOZE mode, restore some CPU specific registers,
- * we are called with DR/IR still off and r2 containing physical
- * address of current. R11 points to the exception frame (physical
- * address). We have to preserve r10.
+ * R11 points to the exception frame. We have to preserve r10.
*/
_GLOBAL(power_save_ppc32_restore)
lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
@@ -166,11 +164,7 @@ BEGIN_FTR_SECTION
mfspr r9,SPRN_HID0
andis. r9,r9,HID0_NAP@h
beq 1f
-#ifdef CONFIG_VMAP_STACK
addis r9, r11, nap_save_msscr0@ha
-#else
- addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
-#endif
lwz r9,nap_save_msscr0@l(r9)
mtspr SPRN_MSSCR0, r9
sync
@@ -178,15 +172,12 @@ BEGIN_FTR_SECTION
1:
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
BEGIN_FTR_SECTION
-#ifdef CONFIG_VMAP_STACK
addis r9, r11, nap_save_hid1@ha
-#else
- addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
-#endif
lwz r9,nap_save_hid1@l(r9)
mtspr SPRN_HID1, r9
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
- b transfer_to_handler_cont
+ blr
+_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
.data
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_85xx.S
index 308f499e146c..9e1bc4502c50 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_85xx.S
@@ -74,19 +74,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
/*
* Return from NAP/DOZE mode, restore some CPU specific registers,
- * r2 containing physical address of current.
- * r11 points to the exception frame (physical address).
+ * r2 containing address of current.
+ * r11 points to the exception frame.
* We have to preserve r10.
*/
_GLOBAL(power_save_ppc32_restore)
lwz r9,_LINK(r11) /* interrupted in e500_idle */
stw r9,_NIP(r11) /* make it do a blr */
-
-#ifdef CONFIG_SMP
- lwz r11,TASK_CPU(r2) /* get cpu number * 4 */
- slwi r11,r11,2
-#else
- li r11,0
-#endif
-
- b transfer_to_handler_cont
+ blr
+_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 22f249b6f58d..3d97fb833834 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -52,28 +52,32 @@ _GLOBAL(isa300_idle_stop_mayloss)
std r1,PACAR1(r13)
mflr r4
mfcr r5
- /* use stack red zone rather than a new frame for saving regs */
- std r2,-8*0(r1)
- std r14,-8*1(r1)
- std r15,-8*2(r1)
- std r16,-8*3(r1)
- std r17,-8*4(r1)
- std r18,-8*5(r1)
- std r19,-8*6(r1)
- std r20,-8*7(r1)
- std r21,-8*8(r1)
- std r22,-8*9(r1)
- std r23,-8*10(r1)
- std r24,-8*11(r1)
- std r25,-8*12(r1)
- std r26,-8*13(r1)
- std r27,-8*14(r1)
- std r28,-8*15(r1)
- std r29,-8*16(r1)
- std r30,-8*17(r1)
- std r31,-8*18(r1)
- std r4,-8*19(r1)
- std r5,-8*20(r1)
+ /*
+ * Use the stack red zone rather than a new frame for saving regs since
+ * in the case of no GPR loss the wakeup code branches directly back to
+ * the caller without deallocating the stack frame first.
+ */
+ std r2,-8*1(r1)
+ std r14,-8*2(r1)
+ std r15,-8*3(r1)
+ std r16,-8*4(r1)
+ std r17,-8*5(r1)
+ std r18,-8*6(r1)
+ std r19,-8*7(r1)
+ std r20,-8*8(r1)
+ std r21,-8*9(r1)
+ std r22,-8*10(r1)
+ std r23,-8*11(r1)
+ std r24,-8*12(r1)
+ std r25,-8*13(r1)
+ std r26,-8*14(r1)
+ std r27,-8*15(r1)
+ std r28,-8*16(r1)
+ std r29,-8*17(r1)
+ std r30,-8*18(r1)
+ std r31,-8*19(r1)
+ std r4,-8*20(r1)
+ std r5,-8*21(r1)
/* 168 bytes */
PPC_STOP
b . /* catch bugs */
@@ -89,8 +93,8 @@ _GLOBAL(isa300_idle_stop_mayloss)
*/
_GLOBAL(idle_return_gpr_loss)
ld r1,PACAR1(r13)
- ld r4,-8*19(r1)
- ld r5,-8*20(r1)
+ ld r4,-8*20(r1)
+ ld r5,-8*21(r1)
mtlr r4
mtcr r5
/*
@@ -98,38 +102,40 @@ _GLOBAL(idle_return_gpr_loss)
* from PACATOC. This could be avoided for that less common case
* if KVM saved its r2.
*/
- ld r2,-8*0(r1)
- ld r14,-8*1(r1)
- ld r15,-8*2(r1)
- ld r16,-8*3(r1)
- ld r17,-8*4(r1)
- ld r18,-8*5(r1)
- ld r19,-8*6(r1)
- ld r20,-8*7(r1)
- ld r21,-8*8(r1)
- ld r22,-8*9(r1)
- ld r23,-8*10(r1)
- ld r24,-8*11(r1)
- ld r25,-8*12(r1)
- ld r26,-8*13(r1)
- ld r27,-8*14(r1)
- ld r28,-8*15(r1)
- ld r29,-8*16(r1)
- ld r30,-8*17(r1)
- ld r31,-8*18(r1)
+ ld r2,-8*1(r1)
+ ld r14,-8*2(r1)
+ ld r15,-8*3(r1)
+ ld r16,-8*4(r1)
+ ld r17,-8*5(r1)
+ ld r18,-8*6(r1)
+ ld r19,-8*7(r1)
+ ld r20,-8*8(r1)
+ ld r21,-8*9(r1)
+ ld r22,-8*10(r1)
+ ld r23,-8*11(r1)
+ ld r24,-8*12(r1)
+ ld r25,-8*13(r1)
+ ld r26,-8*14(r1)
+ ld r27,-8*15(r1)
+ ld r28,-8*16(r1)
+ ld r29,-8*17(r1)
+ ld r30,-8*18(r1)
+ ld r31,-8*19(r1)
blr
/*
* This is the sequence required to execute idle instructions, as
* specified in ISA v2.07 (and earlier). MSR[IR] and MSR[DR] must be 0.
- *
- * The 0(r1) slot is used to save r2 in isa206, so use that here.
+ * We have to store a GPR somewhere, ptesync, then reload it, and create
+ * a false dependency on the result of the load. It doesn't matter which
+ * GPR we store, or where we store it. We have already stored r2 to the
+ * stack at -8(r1) in isa206_idle_insn_mayloss, so use that.
*/
#define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \
/* Magic NAP/SLEEP/WINKLE mode enter sequence */ \
- std r2,0(r1); \
+ std r2,-8(r1); \
ptesync; \
- ld r2,0(r1); \
+ ld r2,-8(r1); \
236: cmpd cr0,r2,r2; \
bne 236b; \
IDLE_INST; \
@@ -154,28 +160,32 @@ _GLOBAL(isa206_idle_insn_mayloss)
std r1,PACAR1(r13)
mflr r4
mfcr r5
- /* use stack red zone rather than a new frame for saving regs */
- std r2,-8*0(r1)
- std r14,-8*1(r1)
- std r15,-8*2(r1)
- std r16,-8*3(r1)
- std r17,-8*4(r1)
- std r18,-8*5(r1)
- std r19,-8*6(r1)
- std r20,-8*7(r1)
- std r21,-8*8(r1)
- std r22,-8*9(r1)
- std r23,-8*10(r1)
- std r24,-8*11(r1)
- std r25,-8*12(r1)
- std r26,-8*13(r1)
- std r27,-8*14(r1)
- std r28,-8*15(r1)
- std r29,-8*16(r1)
- std r30,-8*17(r1)
- std r31,-8*18(r1)
- std r4,-8*19(r1)
- std r5,-8*20(r1)
+ /*
+ * Use the stack red zone rather than a new frame for saving regs since
+ * in the case of no GPR loss the wakeup code branches directly back to
+ * the caller without deallocating the stack frame first.
+ */
+ std r2,-8*1(r1)
+ std r14,-8*2(r1)
+ std r15,-8*3(r1)
+ std r16,-8*4(r1)
+ std r17,-8*5(r1)
+ std r18,-8*6(r1)
+ std r19,-8*7(r1)
+ std r20,-8*8(r1)
+ std r21,-8*9(r1)
+ std r22,-8*10(r1)
+ std r23,-8*11(r1)
+ std r24,-8*12(r1)
+ std r25,-8*13(r1)
+ std r26,-8*14(r1)
+ std r27,-8*15(r1)
+ std r28,-8*16(r1)
+ std r29,-8*17(r1)
+ std r30,-8*18(r1)
+ std r31,-8*19(r1)
+ std r4,-8*20(r1)
+ std r5,-8*21(r1)
cmpwi r3,PNV_THREAD_NAP
bne 1f
IDLE_STATE_ENTER_SEQ_NORET(PPC_NAP)
@@ -201,4 +211,8 @@ _GLOBAL(power4_idle_nap)
mtmsrd r7
isync
b 1b
+
+ .globl power4_idle_nap_return
+power4_idle_nap_return:
+ blr
#endif
diff --git a/arch/powerpc/kernel/ima_arch.c b/arch/powerpc/kernel/ima_arch.c
index e34116255ced..957abd592075 100644
--- a/arch/powerpc/kernel/ima_arch.c
+++ b/arch/powerpc/kernel/ima_arch.c
@@ -19,12 +19,12 @@ bool arch_ima_get_secureboot(void)
* to be stored as an xattr or as an appended signature.
*
* To avoid duplicate signature verification as much as possible, the IMA
- * policy rule for module appraisal is added only if CONFIG_MODULE_SIG_FORCE
+ * policy rule for module appraisal is added only if CONFIG_MODULE_SIG
* is not enabled.
*/
static const char *const secure_rules[] = {
"appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
-#ifndef CONFIG_MODULE_SIG_FORCE
+#ifndef CONFIG_MODULE_SIG
"appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
#endif
NULL
@@ -50,7 +50,7 @@ static const char *const secure_and_trusted_rules[] = {
"measure func=KEXEC_KERNEL_CHECK template=ima-modsig",
"measure func=MODULE_CHECK template=ima-modsig",
"appraise func=KEXEC_KERNEL_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
-#ifndef CONFIG_MODULE_SIG_FORCE
+#ifndef CONFIG_MODULE_SIG
"appraise func=MODULE_CHECK appraise_flag=check_blacklist appraise_type=imasig|modsig",
#endif
NULL
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
new file mode 100644
index 000000000000..fc6631a80527
--- /dev/null
+++ b/arch/powerpc/kernel/interrupt.c
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/context_tracking.h>
+#include <linux/err.h>
+#include <linux/compat.h>
+#include <linux/sched/debug.h> /* for show_regs */
+
+#include <asm/kup.h>
+#include <asm/cputime.h>
+#include <asm/hw_irq.h>
+#include <asm/interrupt.h>
+#include <asm/kprobes.h>
+#include <asm/paca.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+#include <asm/signal.h>
+#include <asm/switch_to.h>
+#include <asm/syscall.h>
+#include <asm/time.h>
+#include <asm/tm.h>
+#include <asm/unistd.h>
+
+#if defined(CONFIG_PPC_ADV_DEBUG_REGS) && defined(CONFIG_PPC32)
+unsigned long global_dbcr0[NR_CPUS];
+#endif
+
+#ifdef CONFIG_PPC_BOOK3S_64
+DEFINE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
+static inline bool exit_must_hard_disable(void)
+{
+ return static_branch_unlikely(&interrupt_exit_not_reentrant);
+}
+#else
+static inline bool exit_must_hard_disable(void)
+{
+ return true;
+}
+#endif
+
+/*
+ * local irqs must be disabled. Returns false if the caller must re-enable
+ * them, check for new work, and try again.
+ *
+ * This should be called with local irqs disabled, but if they were previously
+ * enabled when the interrupt handler returns (indicating a process-context /
+ * synchronous interrupt) then irqs_enabled should be true.
+ *
+ * restartable is true then EE/RI can be left on because interrupts are handled
+ * with a restart sequence.
+ */
+static notrace __always_inline bool prep_irq_for_enabled_exit(bool restartable)
+{
+ /* This must be done with RI=1 because tracing may touch vmaps */
+ trace_hardirqs_on();
+
+ if (exit_must_hard_disable() || !restartable)
+ __hard_EE_RI_disable();
+
+#ifdef CONFIG_PPC64
+ /* This pattern matches prep_irq_for_idle */
+ if (unlikely(lazy_irq_pending_nocheck())) {
+ if (exit_must_hard_disable() || !restartable) {
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ __hard_RI_enable();
+ }
+ trace_hardirqs_off();
+
+ return false;
+ }
+#endif
+ return true;
+}
+
+static notrace void booke_load_dbcr0(void)
+{
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+ unsigned long dbcr0 = current->thread.debug.dbcr0;
+
+ if (likely(!(dbcr0 & DBCR0_IDM)))
+ return;
+
+ /*
+ * Check to see if the dbcr0 register is set up to debug.
+ * Use the internal debug mode bit to do this.
+ */
+ mtmsr(mfmsr() & ~MSR_DE);
+ if (IS_ENABLED(CONFIG_PPC32)) {
+ isync();
+ global_dbcr0[smp_processor_id()] = mfspr(SPRN_DBCR0);
+ }
+ mtspr(SPRN_DBCR0, dbcr0);
+ mtspr(SPRN_DBSR, -1);
+#endif
+}
+
+static void check_return_regs_valid(struct pt_regs *regs)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ unsigned long trap, srr0, srr1;
+ static bool warned;
+ u8 *validp;
+ char *h;
+
+ if (trap_is_scv(regs))
+ return;
+
+ trap = TRAP(regs);
+ // EE in HV mode sets HSRRs like 0xea0
+ if (cpu_has_feature(CPU_FTR_HVMODE) && trap == INTERRUPT_EXTERNAL)
+ trap = 0xea0;
+
+ switch (trap) {
+ case 0x980:
+ case INTERRUPT_H_DATA_STORAGE:
+ case 0xe20:
+ case 0xe40:
+ case INTERRUPT_HMI:
+ case 0xe80:
+ case 0xea0:
+ case INTERRUPT_H_FAC_UNAVAIL:
+ case 0x1200:
+ case 0x1500:
+ case 0x1600:
+ case 0x1800:
+ validp = &local_paca->hsrr_valid;
+ if (!*validp)
+ return;
+
+ srr0 = mfspr(SPRN_HSRR0);
+ srr1 = mfspr(SPRN_HSRR1);
+ h = "H";
+
+ break;
+ default:
+ validp = &local_paca->srr_valid;
+ if (!*validp)
+ return;
+
+ srr0 = mfspr(SPRN_SRR0);
+ srr1 = mfspr(SPRN_SRR1);
+ h = "";
+ break;
+ }
+
+ if (srr0 == regs->nip && srr1 == regs->msr)
+ return;
+
+ /*
+ * A NMI / soft-NMI interrupt may have come in after we found
+ * srr_valid and before the SRRs are loaded. The interrupt then
+ * comes in and clobbers SRRs and clears srr_valid. Then we load
+ * the SRRs here and test them above and find they don't match.
+ *
+ * Test validity again after that, to catch such false positives.
+ *
+ * This test in general will have some window for false negatives
+ * and may not catch and fix all such cases if an NMI comes in
+ * later and clobbers SRRs without clearing srr_valid, but hopefully
+ * such things will get caught most of the time, statistically
+ * enough to be able to get a warning out.
+ */
+ barrier();
+
+ if (!*validp)
+ return;
+
+ if (!warned) {
+ warned = true;
+ printk("%sSRR0 was: %lx should be: %lx\n", h, srr0, regs->nip);
+ printk("%sSRR1 was: %lx should be: %lx\n", h, srr1, regs->msr);
+ show_regs(regs);
+ }
+
+ *validp = 0; /* fixup */
+#endif
+}
+
+static notrace unsigned long
+interrupt_exit_user_prepare_main(unsigned long ret, struct pt_regs *regs)
+{
+ unsigned long ti_flags;
+
+again:
+ ti_flags = read_thread_flags();
+ while (unlikely(ti_flags & (_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM))) {
+ local_irq_enable();
+ if (ti_flags & _TIF_NEED_RESCHED) {
+ schedule();
+ } else {
+ /*
+ * SIGPENDING must restore signal handler function
+ * argument GPRs, and some non-volatiles (e.g., r1).
+ * Restore all for now. This could be made lighter.
+ */
+ if (ti_flags & _TIF_SIGPENDING)
+ ret |= _TIF_RESTOREALL;
+ do_notify_resume(regs, ti_flags);
+ }
+ local_irq_disable();
+ ti_flags = read_thread_flags();
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && IS_ENABLED(CONFIG_PPC_FPU)) {
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely((ti_flags & _TIF_RESTORE_TM))) {
+ restore_tm_state(regs);
+ } else {
+ unsigned long mathflags = MSR_FP;
+
+ if (cpu_has_feature(CPU_FTR_VSX))
+ mathflags |= MSR_VEC | MSR_VSX;
+ else if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ mathflags |= MSR_VEC;
+
+ /*
+ * If userspace MSR has all available FP bits set,
+ * then they are live and no need to restore. If not,
+ * it means the regs were given up and restore_math
+ * may decide to restore them (to avoid taking an FP
+ * fault).
+ */
+ if ((regs->msr & mathflags) != mathflags)
+ restore_math(regs);
+ }
+ }
+
+ check_return_regs_valid(regs);
+
+ user_enter_irqoff();
+ if (!prep_irq_for_enabled_exit(true)) {
+ user_exit_irqoff();
+ local_irq_enable();
+ local_irq_disable();
+ goto again;
+ }
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ local_paca->tm_scratch = regs->msr;
+#endif
+
+ booke_load_dbcr0();
+
+ account_cpu_user_exit();
+
+ /* Restore user access locks last */
+ kuap_user_restore(regs);
+
+ return ret;
+}
+
+/*
+ * This should be called after a syscall returns, with r3 the return value
+ * from the syscall. If this function returns non-zero, the system call
+ * exit assembly should additionally load all GPR registers and CTR and XER
+ * from the interrupt frame.
+ *
+ * The function graph tracer can not trace the return side of this function,
+ * because RI=0 and soft mask state is "unreconciled", so it is marked notrace.
+ */
+notrace unsigned long syscall_exit_prepare(unsigned long r3,
+ struct pt_regs *regs,
+ long scv)
+{
+ unsigned long ti_flags;
+ unsigned long ret = 0;
+ bool is_not_scv = !IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !scv;
+
+ CT_WARN_ON(ct_state() == CONTEXT_USER);
+
+ kuap_assert_locked();
+
+ regs->result = r3;
+
+ /* Check whether the syscall is issued inside a restartable sequence */
+ rseq_syscall(regs);
+
+ ti_flags = read_thread_flags();
+
+ if (unlikely(r3 >= (unsigned long)-MAX_ERRNO) && is_not_scv) {
+ if (likely(!(ti_flags & (_TIF_NOERROR | _TIF_RESTOREALL)))) {
+ r3 = -r3;
+ regs->ccr |= 0x10000000; /* Set SO bit in CR */
+ }
+ }
+
+ if (unlikely(ti_flags & _TIF_PERSYSCALL_MASK)) {
+ if (ti_flags & _TIF_RESTOREALL)
+ ret = _TIF_RESTOREALL;
+ else
+ regs->gpr[3] = r3;
+ clear_bits(_TIF_PERSYSCALL_MASK, &current_thread_info()->flags);
+ } else {
+ regs->gpr[3] = r3;
+ }
+
+ if (unlikely(ti_flags & _TIF_SYSCALL_DOTRACE)) {
+ do_syscall_trace_leave(regs);
+ ret |= _TIF_RESTOREALL;
+ }
+
+ local_irq_disable();
+ ret = interrupt_exit_user_prepare_main(ret, regs);
+
+#ifdef CONFIG_PPC64
+ regs->exit_result = ret;
+#endif
+
+ return ret;
+}
+
+#ifdef CONFIG_PPC64
+notrace unsigned long syscall_exit_restart(unsigned long r3, struct pt_regs *regs)
+{
+ /*
+ * This is called when detecting a soft-pending interrupt as well as
+ * an alternate-return interrupt. So we can't just have the alternate
+ * return path clear SRR1[MSR] and set PACA_IRQ_HARD_DIS (unless
+ * the soft-pending case were to fix things up as well). RI might be
+ * disabled, in which case it gets re-enabled by __hard_irq_disable().
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ trace_hardirqs_off();
+ user_exit_irqoff();
+ account_cpu_user_entry();
+
+ BUG_ON(!user_mode(regs));
+
+ regs->exit_result = interrupt_exit_user_prepare_main(regs->exit_result, regs);
+
+ return regs->exit_result;
+}
+#endif
+
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs)
+{
+ unsigned long ret;
+
+ BUG_ON(regs_is_unrecoverable(regs));
+ BUG_ON(arch_irq_disabled_regs(regs));
+ CT_WARN_ON(ct_state() == CONTEXT_USER);
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * AMR can only have been unlocked if we interrupted the kernel.
+ */
+ kuap_assert_locked();
+
+ local_irq_disable();
+
+ ret = interrupt_exit_user_prepare_main(0, regs);
+
+#ifdef CONFIG_PPC64
+ regs->exit_result = ret;
+#endif
+
+ return ret;
+}
+
+void preempt_schedule_irq(void);
+
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
+{
+ unsigned long flags;
+ unsigned long ret = 0;
+ unsigned long kuap;
+ bool stack_store = read_thread_flags() & _TIF_EMULATE_STACK_STORE;
+
+ if (regs_is_unrecoverable(regs))
+ unrecoverable_exception(regs);
+ /*
+ * CT_WARN_ON comes here via program_check_exception, so avoid
+ * recursion.
+ *
+ * Skip the assertion on PMIs on 64e to work around a problem caused
+ * by NMI PMIs incorrectly taking this interrupt return path, it's
+ * possible for this to hit after interrupt exit to user switches
+ * context to user. See also the comment in the performance monitor
+ * handler in exceptions-64e.S
+ */
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) &&
+ TRAP(regs) != INTERRUPT_PROGRAM &&
+ TRAP(regs) != INTERRUPT_PERFMON)
+ CT_WARN_ON(ct_state() == CONTEXT_USER);
+
+ kuap = kuap_get_and_assert_locked();
+
+ local_irq_save(flags);
+
+ if (!arch_irq_disabled_regs(regs)) {
+ /* Returning to a kernel context with local irqs enabled. */
+ WARN_ON_ONCE(!(regs->msr & MSR_EE));
+again:
+ if (IS_ENABLED(CONFIG_PREEMPT)) {
+ /* Return to preemptible kernel context */
+ if (unlikely(read_thread_flags() & _TIF_NEED_RESCHED)) {
+ if (preempt_count() == 0)
+ preempt_schedule_irq();
+ }
+ }
+
+ check_return_regs_valid(regs);
+
+ /*
+ * Stack store exit can't be restarted because the interrupt
+ * stack frame might have been clobbered.
+ */
+ if (!prep_irq_for_enabled_exit(unlikely(stack_store))) {
+ /*
+ * Replay pending soft-masked interrupts now. Don't
+ * just local_irq_enabe(); local_irq_disable(); because
+ * if we are returning from an asynchronous interrupt
+ * here, another one might hit after irqs are enabled,
+ * and it would exit via this same path allowing
+ * another to fire, and so on unbounded.
+ */
+ hard_irq_disable();
+ replay_soft_interrupts();
+ /* Took an interrupt, may have more exit work to do. */
+ goto again;
+ }
+#ifdef CONFIG_PPC64
+ /*
+ * An interrupt may clear MSR[EE] and set this concurrently,
+ * but it will be marked pending and the exit will be retried.
+ * This leaves a racy window where MSR[EE]=0 and HARD_DIS is
+ * clear, until interrupt_exit_kernel_restart() calls
+ * hard_irq_disable(), which will set HARD_DIS again.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+
+ } else {
+ check_return_regs_valid(regs);
+
+ if (unlikely(stack_store))
+ __hard_EE_RI_disable();
+#endif /* CONFIG_PPC64 */
+ }
+
+ if (unlikely(stack_store)) {
+ clear_bits(_TIF_EMULATE_STACK_STORE, &current_thread_info()->flags);
+ ret = 1;
+ }
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ local_paca->tm_scratch = regs->msr;
+#endif
+
+ /*
+ * 64s does not want to mfspr(SPRN_AMR) here, because this comes after
+ * mtmsr, which would cause Read-After-Write stalls. Hence, take the
+ * AMR value from the check above.
+ */
+ kuap_kernel_restore(regs, kuap);
+
+ return ret;
+}
+
+#ifdef CONFIG_PPC64
+notrace unsigned long interrupt_exit_user_restart(struct pt_regs *regs)
+{
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ trace_hardirqs_off();
+ user_exit_irqoff();
+ account_cpu_user_entry();
+
+ BUG_ON(!user_mode(regs));
+
+ regs->exit_result |= interrupt_exit_user_prepare(regs);
+
+ return regs->exit_result;
+}
+
+/*
+ * No real need to return a value here because the stack store case does not
+ * get restarted.
+ */
+notrace unsigned long interrupt_exit_kernel_restart(struct pt_regs *regs)
+{
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ set_kuap(AMR_KUAP_BLOCKED);
+#endif
+
+ if (regs->softe == IRQS_ENABLED)
+ trace_hardirqs_off();
+
+ BUG_ON(user_mode(regs));
+
+ return interrupt_exit_kernel_prepare(regs);
+}
+#endif
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
new file mode 100644
index 000000000000..a019ed6fc839
--- /dev/null
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -0,0 +1,734 @@
+#include <asm/asm-offsets.h>
+#include <asm/bug.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/exception-64s.h>
+#else
+#include <asm/exception-64e.h>
+#endif
+#include <asm/feature-fixups.h>
+#include <asm/head-64.h>
+#include <asm/hw_irq.h>
+#include <asm/kup.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
+
+ .align 7
+
+.macro DEBUG_SRR_VALID srr
+#ifdef CONFIG_PPC_RFI_SRR_DEBUG
+ .ifc \srr,srr
+ mfspr r11,SPRN_SRR0
+ ld r12,_NIP(r1)
+ clrrdi r11,r11,2
+ clrrdi r12,r12,2
+100: tdne r11,r12
+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ mfspr r11,SPRN_SRR1
+ ld r12,_MSR(r1)
+100: tdne r11,r12
+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ .else
+ mfspr r11,SPRN_HSRR0
+ ld r12,_NIP(r1)
+ clrrdi r11,r11,2
+ clrrdi r12,r12,2
+100: tdne r11,r12
+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ mfspr r11,SPRN_HSRR1
+ ld r12,_MSR(r1)
+100: tdne r11,r12
+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ .endif
+#endif
+.endm
+
+#ifdef CONFIG_PPC_BOOK3S
+.macro system_call_vectored name trapnr
+ .globl system_call_vectored_\name
+system_call_vectored_\name:
+_ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
+ SCV_INTERRUPT_TO_KERNEL
+ mr r10,r1
+ ld r1,PACAKSAVE(r13)
+ std r10,0(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+ std r2,GPR2(r1)
+ LOAD_PACA_TOC()
+ mfcr r12
+ li r11,0
+ /* Save syscall parameters in r3-r8 */
+ SAVE_GPRS(3, 8, r1)
+ /* Zero r9-r12, this should only be required when restoring all GPRs */
+ std r11,GPR9(r1)
+ std r11,GPR10(r1)
+ std r11,GPR11(r1)
+ std r11,GPR12(r1)
+ std r9,GPR13(r1)
+ SAVE_NVGPRS(r1)
+ std r11,_XER(r1)
+ std r11,_LINK(r1)
+ std r11,_CTR(r1)
+
+ li r11,\trapnr
+ std r11,_TRAP(r1)
+ std r12,_CCR(r1)
+ std r3,ORIG_GPR3(r1)
+ /* Calling convention has r3 = regs, r4 = orig r0 */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ mr r4,r0
+ LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
+ std r11,-16(r3) /* "regshere" marker */
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ /*
+ * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
+ * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
+ * and interrupts may be masked and pending already.
+ * system_call_exception() will call trace_hardirqs_off() which means
+ * interrupts could already have been blocked before trace_hardirqs_off,
+ * but this is the best we can do.
+ */
+
+ bl system_call_exception
+
+.Lsyscall_vectored_\name\()_exit:
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r5,1 /* scv */
+ bl syscall_exit_prepare
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+.Lsyscall_vectored_\name\()_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- syscall_vectored_\name\()_restart
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+ ld r2,_CCR(r1)
+ ld r4,_NIP(r1)
+ ld r5,_MSR(r1)
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ cmpdi r3,0
+ bne .Lsyscall_vectored_\name\()_restore_regs
+
+ /* rfscv returns with LR->NIA and CTR->MSR */
+ mtlr r4
+ mtctr r5
+
+ /* Could zero these as per ABI, but we may consider a stricter ABI
+ * which preserves these if libc implementations can benefit, so
+ * restore them for now until further measurement is done. */
+ REST_GPR(0, r1)
+ REST_GPRS(4, 8, r1)
+ /* Zero volatile regs that may contain sensitive kernel data */
+ ZEROIZE_GPRS(9, 12)
+ mtspr SPRN_XER,r0
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
+ mtcr r2
+ REST_GPRS(2, 3, r1)
+ REST_GPR(13, r1)
+ REST_GPR(1, r1)
+ RFSCV_TO_USER
+ b . /* prevent speculative execution */
+
+.Lsyscall_vectored_\name\()_restore_regs:
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r5
+
+ ld r3,_CTR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_XER(r1)
+
+ REST_NVGPRS(r1)
+ REST_GPR(0, r1)
+ mtcr r2
+ mtctr r3
+ mtlr r4
+ mtspr SPRN_XER,r5
+ REST_GPRS(2, 13, r1)
+ REST_GPR(1, r1)
+ RFI_TO_USER
+.Lsyscall_vectored_\name\()_rst_end:
+
+syscall_vectored_\name\()_restart:
+_ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ LOAD_PACA_TOC()
+ ld r3,RESULT(r1)
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl syscall_exit_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Lsyscall_vectored_\name\()_rst_start
+1:
+
+SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
+RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
+
+.endm
+
+system_call_vectored common 0x3000
+
+/*
+ * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
+ * which is tested by system_call_exception when r0 is -1 (as set by vector
+ * entry code).
+ */
+system_call_vectored sigill 0x7ff0
+
+#endif /* CONFIG_PPC_BOOK3S */
+
+ .balign IFETCH_ALIGN_BYTES
+ .globl system_call_common_real
+system_call_common_real:
+_ASM_NOKPROBE_SYMBOL(system_call_common_real)
+ ld r10,PACAKMSR(r13) /* get MSR value for kernel */
+ mtmsrd r10
+
+ .balign IFETCH_ALIGN_BYTES
+ .globl system_call_common
+system_call_common:
+_ASM_NOKPROBE_SYMBOL(system_call_common)
+ mr r10,r1
+ ld r1,PACAKSAVE(r13)
+ std r10,0(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+ std r2,GPR2(r1)
+#ifdef CONFIG_PPC_E500
+START_BTB_FLUSH_SECTION
+ BTB_FLUSH(r10)
+END_BTB_FLUSH_SECTION
+#endif
+ LOAD_PACA_TOC()
+ mfcr r12
+ li r11,0
+ /* Save syscall parameters in r3-r8 */
+ SAVE_GPRS(3, 8, r1)
+ /* Zero r9-r12, this should only be required when restoring all GPRs */
+ std r11,GPR9(r1)
+ std r11,GPR10(r1)
+ std r11,GPR11(r1)
+ std r11,GPR12(r1)
+ std r9,GPR13(r1)
+ SAVE_NVGPRS(r1)
+ std r11,_XER(r1)
+ std r11,_CTR(r1)
+ mflr r10
+
+ /*
+ * This clears CR0.SO (bit 28), which is the error indication on
+ * return from this system call.
+ */
+ rldimi r12,r11,28,(63-28)
+ li r11,0xc00
+ std r10,_LINK(r1)
+ std r11,_TRAP(r1)
+ std r12,_CCR(r1)
+ std r3,ORIG_GPR3(r1)
+ /* Calling convention has r3 = regs, r4 = orig r0 */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ mr r4,r0
+ LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
+ std r11,-16(r3) /* "regshere" marker */
+
+#ifdef CONFIG_PPC_BOOK3S
+ li r11,1
+ stb r11,PACASRR_VALID(r13)
+#endif
+
+ /*
+ * We always enter kernel from userspace with irq soft-mask enabled and
+ * nothing pending. system_call_exception() will call
+ * trace_hardirqs_off().
+ */
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+#ifdef CONFIG_PPC_BOOK3S
+ li r12,-1 /* Set MSR_EE and MSR_RI */
+ mtmsrd r12,1
+#else
+ wrteei 1
+#endif
+
+ bl system_call_exception
+
+.Lsyscall_exit:
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r5,0 /* !scv */
+ bl syscall_exit_prepare
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+#ifdef CONFIG_PPC_BOOK3S
+.Lsyscall_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- syscall_restart
+#endif
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+ ld r2,_CCR(r1)
+ ld r6,_LINK(r1)
+ mtlr r6
+
+#ifdef CONFIG_PPC_BOOK3S
+ lbz r4,PACASRR_VALID(r13)
+ cmpdi r4,0
+ bne 1f
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
+ ld r4,_NIP(r1)
+ ld r5,_MSR(r1)
+ mtspr SPRN_SRR0,r4
+ mtspr SPRN_SRR1,r5
+1:
+ DEBUG_SRR_VALID srr
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ cmpdi r3,0
+ bne .Lsyscall_restore_regs
+ /* Zero volatile regs that may contain sensitive kernel data */
+ ZEROIZE_GPR(0)
+ ZEROIZE_GPRS(4, 12)
+ mtctr r0
+ mtspr SPRN_XER,r0
+.Lsyscall_restore_regs_cont:
+
+BEGIN_FTR_SECTION
+ HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ /*
+ * We don't need to restore AMR on the way back to userspace for KUAP.
+ * The value of AMR only matters while we're in the kernel.
+ */
+ mtcr r2
+ REST_GPRS(2, 3, r1)
+ REST_GPR(13, r1)
+ REST_GPR(1, r1)
+ RFI_TO_USER
+ b . /* prevent speculative execution */
+
+.Lsyscall_restore_regs:
+ ld r3,_CTR(r1)
+ ld r4,_XER(r1)
+ REST_NVGPRS(r1)
+ mtctr r3
+ mtspr SPRN_XER,r4
+ REST_GPR(0, r1)
+ REST_GPRS(4, 12, r1)
+ b .Lsyscall_restore_regs_cont
+.Lsyscall_rst_end:
+
+#ifdef CONFIG_PPC_BOOK3S
+syscall_restart:
+_ASM_NOKPROBE_SYMBOL(syscall_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ LOAD_PACA_TOC()
+ ld r3,RESULT(r1)
+ addi r4,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl syscall_exit_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Lsyscall_rst_start
+1:
+
+SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
+RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
+#endif
+
+ /*
+ * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
+ * touched, no exit work created, then this can be used.
+ */
+ .balign IFETCH_ALIGN_BYTES
+ .globl fast_interrupt_return_srr
+fast_interrupt_return_srr:
+_ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
+ kuap_check_amr r3, r4
+ ld r5,_MSR(r1)
+ andi. r0,r5,MSR_PR
+#ifdef CONFIG_PPC_BOOK3S
+ beq 1f
+ kuap_user_restore r3, r4
+ b .Lfast_user_interrupt_return_srr
+1: kuap_kernel_restore r3, r4
+ andi. r0,r5,MSR_RI
+ li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
+ bne+ .Lfast_kernel_interrupt_return_srr
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl unrecoverable_exception
+ b . /* should not get here */
+#else
+ bne .Lfast_user_interrupt_return_srr
+ b .Lfast_kernel_interrupt_return_srr
+#endif
+
+.macro interrupt_return_macro srr
+ .balign IFETCH_ALIGN_BYTES
+ .globl interrupt_return_\srr
+interrupt_return_\srr\():
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
+ ld r4,_MSR(r1)
+ andi. r0,r4,MSR_PR
+ beq interrupt_return_\srr\()_kernel
+interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl interrupt_exit_user_prepare
+ cmpdi r3,0
+ bne- .Lrestore_nvgprs_\srr
+.Lrestore_nvgprs_\srr\()_cont:
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+#ifdef CONFIG_PPC_BOOK3S
+.Linterrupt_return_\srr\()_user_rst_start:
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- interrupt_return_\srr\()_user_restart
+#endif
+ li r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
+
+.Lfast_user_interrupt_return_\srr\():
+#ifdef CONFIG_PPC_BOOK3S
+ .ifc \srr,srr
+ lbz r4,PACASRR_VALID(r13)
+ .else
+ lbz r4,PACAHSRR_VALID(r13)
+ .endif
+ cmpdi r4,0
+ li r4,0
+ bne 1f
+#endif
+ ld r11,_NIP(r1)
+ ld r12,_MSR(r1)
+ .ifc \srr,srr
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACASRR_VALID(r13)
+#endif
+ .else
+ mtspr SPRN_HSRR0,r11
+ mtspr SPRN_HSRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACAHSRR_VALID(r13)
+#endif
+ .endif
+ DEBUG_SRR_VALID \srr
+
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ lbz r4,PACAIRQSOFTMASK(r13)
+ tdnei r4,IRQS_ENABLED
+#endif
+
+BEGIN_FTR_SECTION
+ ld r10,_PPR(r1)
+ mtspr SPRN_PPR,r10
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ ld r3,_CCR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_CTR(r1)
+ ld r6,_XER(r1)
+ li r0,0
+
+ REST_GPRS(7, 13, r1)
+
+ mtcr r3
+ mtlr r4
+ mtctr r5
+ mtspr SPRN_XER,r6
+
+ REST_GPRS(2, 6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ .ifc \srr,srr
+ RFI_TO_USER
+ .else
+ HRFI_TO_USER
+ .endif
+ b . /* prevent speculative execution */
+.Linterrupt_return_\srr\()_user_rst_end:
+
+.Lrestore_nvgprs_\srr\():
+ REST_NVGPRS(r1)
+ b .Lrestore_nvgprs_\srr\()_cont
+
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_\srr\()_user_restart:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ LOAD_PACA_TOC()
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl interrupt_exit_user_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Linterrupt_return_\srr\()_user_rst_start
+1:
+
+SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
+RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
+#endif
+
+ .balign IFETCH_ALIGN_BYTES
+interrupt_return_\srr\()_kernel:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl interrupt_exit_kernel_prepare
+
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+.Linterrupt_return_\srr\()_kernel_rst_start:
+ ld r11,SOFTE(r1)
+ cmpwi r11,IRQS_ENABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ beq .Linterrupt_return_\srr\()_soft_enabled
+
+ /*
+ * Returning to soft-disabled context.
+ * Check if a MUST_HARD_MASK interrupt has become pending, in which
+ * case we need to disable MSR[EE] in the return context.
+ *
+ * The MSR[EE] check catches among other things the short incoherency
+ * in hard_irq_disable() between clearing MSR[EE] and setting
+ * PACA_IRQ_HARD_DIS.
+ */
+ ld r12,_MSR(r1)
+ andi. r10,r12,MSR_EE
+ beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r10,r11,PACA_IRQ_MUST_HARD_MASK
+ bne 1f // HARD_MASK is pending
+ // No HARD_MASK pending, clear possible HARD_DIS set by interrupt
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ stb r11,PACAIRQHAPPENED(r13)
+ b .Lfast_kernel_interrupt_return_\srr\()
+
+
+1: /* Must clear MSR_EE from _MSR */
+#ifdef CONFIG_PPC_BOOK3S
+ li r10,0
+ /* Clear valid before changing _MSR */
+ .ifc \srr,srr
+ stb r10,PACASRR_VALID(r13)
+ .else
+ stb r10,PACAHSRR_VALID(r13)
+ .endif
+#endif
+ xori r12,r12,MSR_EE
+ std r12,_MSR(r1)
+ b .Lfast_kernel_interrupt_return_\srr\()
+
+.Linterrupt_return_\srr\()_soft_enabled:
+ /*
+ * In the soft-enabled case, need to double-check that we have no
+ * pending interrupts that might have come in before we reached the
+ * restart section of code, and restart the exit so those can be
+ * handled.
+ *
+ * If there are none, it is be possible that the interrupt still
+ * has PACA_IRQ_HARD_DIS set, which needs to be cleared for the
+ * interrupted context. This clear will not clobber a new pending
+ * interrupt coming in, because we're in the restart section, so
+ * such would return to the restart location.
+ */
+#ifdef CONFIG_PPC_BOOK3S
+ lbz r11,PACAIRQHAPPENED(r13)
+ andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
+ bne- interrupt_return_\srr\()_kernel_restart
+#endif
+ li r11,0
+ stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
+
+.Lfast_kernel_interrupt_return_\srr\():
+ cmpdi cr1,r3,0
+#ifdef CONFIG_PPC_BOOK3S
+ .ifc \srr,srr
+ lbz r4,PACASRR_VALID(r13)
+ .else
+ lbz r4,PACAHSRR_VALID(r13)
+ .endif
+ cmpdi r4,0
+ li r4,0
+ bne 1f
+#endif
+ ld r11,_NIP(r1)
+ ld r12,_MSR(r1)
+ .ifc \srr,srr
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACASRR_VALID(r13)
+#endif
+ .else
+ mtspr SPRN_HSRR0,r11
+ mtspr SPRN_HSRR1,r12
+1:
+#ifdef CONFIG_PPC_BOOK3S
+ stb r4,PACAHSRR_VALID(r13)
+#endif
+ .endif
+ DEBUG_SRR_VALID \srr
+
+BEGIN_FTR_SECTION
+ stdcx. r0,0,r1 /* to clear the reservation */
+FTR_SECTION_ELSE
+ ldarx r0,0,r1
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
+
+ ld r3,_LINK(r1)
+ ld r4,_CTR(r1)
+ ld r5,_XER(r1)
+ ld r6,_CCR(r1)
+ li r0,0
+
+ REST_GPRS(7, 12, r1)
+
+ mtlr r3
+ mtctr r4
+ mtspr SPRN_XER,r5
+
+ /*
+ * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
+ * the reliable stack unwinder later on. Clear it.
+ */
+ std r0,STACK_FRAME_OVERHEAD-16(r1)
+
+ REST_GPRS(2, 5, r1)
+
+ bne- cr1,1f /* emulate stack store */
+ mtcr r6
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ .ifc \srr,srr
+ RFI_TO_KERNEL
+ .else
+ HRFI_TO_KERNEL
+ .endif
+ b . /* prevent speculative execution */
+
+1: /*
+ * Emulate stack store with update. New r1 value was already calculated
+ * and updated in our interrupt regs by emulate_loadstore, but we can't
+ * store the previous value of r1 to the stack before re-loading our
+ * registers from it, otherwise they could be clobbered. Use
+ * PACA_EXGEN as temporary storage to hold the store data, as
+ * interrupts are disabled here so it won't be clobbered.
+ */
+ mtcr r6
+ std r9,PACA_EXGEN+0(r13)
+ addi r9,r1,INT_FRAME_SIZE /* get original r1 */
+ REST_GPR(6, r1)
+ REST_GPR(0, r1)
+ REST_GPR(1, r1)
+ std r9,0(r1) /* perform store component of stdu */
+ ld r9,PACA_EXGEN+0(r13)
+
+ .ifc \srr,srr
+ RFI_TO_KERNEL
+ .else
+ HRFI_TO_KERNEL
+ .endif
+ b . /* prevent speculative execution */
+.Linterrupt_return_\srr\()_kernel_rst_end:
+
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_\srr\()_kernel_restart:
+_ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
+ GET_PACA(r13)
+ ld r1,PACA_EXIT_SAVE_R1(r13)
+ LOAD_PACA_TOC()
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ li r11,IRQS_ALL_DISABLED
+ stb r11,PACAIRQSOFTMASK(r13)
+ bl interrupt_exit_kernel_restart
+ std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
+ b .Linterrupt_return_\srr\()_kernel_rst_start
+1:
+
+SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
+RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
+#endif
+
+.endm
+
+interrupt_return_macro srr
+#ifdef CONFIG_PPC_BOOK3S
+interrupt_return_macro hsrr
+
+ .globl __end_soft_masked
+__end_soft_masked:
+DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
+#endif /* CONFIG_PPC_BOOK3S */
+
+#ifdef CONFIG_PPC_BOOK3S
+_GLOBAL(ret_from_fork_scv)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ li r3,0 /* fork() return value */
+ b .Lsyscall_vectored_common_exit
+#endif
+
+_GLOBAL(ret_from_fork)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ li r3,0 /* fork() return value */
+ b .Lsyscall_exit
+
+_GLOBAL(ret_from_kernel_thread)
+ bl schedule_tail
+ REST_NVGPRS(r1)
+ mtctr r14
+ mr r3,r15
+#ifdef CONFIG_PPC64_ELF_ABI_V2
+ mr r12,r14
+#endif
+ bctrl
+ li r3,0
+ b .Lsyscall_exit
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index 0276bc8c8969..c877f074d174 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -10,10 +10,10 @@
#include <linux/kernel.h>
#include <linux/sched/mm.h> /* for init_mm */
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
#include <asm/io-workarounds.h>
#include <asm/pte-walk.h>
@@ -55,7 +55,6 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
#ifdef CONFIG_PPC_INDIRECT_MMIO
struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
{
- unsigned hugepage_shift;
struct iowa_bus *bus;
int token;
@@ -65,22 +64,13 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
bus = &iowa_busses[token - 1];
else {
unsigned long vaddr, paddr;
- pte_t *ptep;
vaddr = (unsigned long)PCI_FIX_ADDR(addr);
if (vaddr < PHB_IO_BASE || vaddr >= PHB_IO_END)
return NULL;
- /*
- * We won't find huge pages here (iomem). Also can't hit
- * a page table free due to init_mm
- */
- ptep = find_init_mm_pte(vaddr, &hugepage_shift);
- if (ptep == NULL)
- paddr = 0;
- else {
- WARN_ON(hugepage_shift);
- paddr = pte_pfn(*ptep) << PAGE_SHIFT;
- }
+
+ paddr = ppc_find_vmap_phys(vaddr);
+
bus = iowa_pci_find(vaddr, paddr);
if (bus == NULL)
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index 5ac84efc6ede..72862a4d3a5d 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -11,177 +11,11 @@
#include <asm/pci-bridge.h>
#include <asm/isa-bridge.h>
-/*
- * Here comes the ppc64 implementation of the IOMAP
- * interfaces.
- */
-unsigned int ioread8(void __iomem *addr)
-{
- return readb(addr);
-}
-unsigned int ioread16(void __iomem *addr)
-{
- return readw(addr);
-}
-unsigned int ioread16be(void __iomem *addr)
-{
- return readw_be(addr);
-}
-unsigned int ioread32(void __iomem *addr)
-{
- return readl(addr);
-}
-unsigned int ioread32be(void __iomem *addr)
-{
- return readl_be(addr);
-}
-EXPORT_SYMBOL(ioread8);
-EXPORT_SYMBOL(ioread16);
-EXPORT_SYMBOL(ioread16be);
-EXPORT_SYMBOL(ioread32);
-EXPORT_SYMBOL(ioread32be);
-#ifdef __powerpc64__
-u64 ioread64(void __iomem *addr)
-{
- return readq(addr);
-}
-u64 ioread64_lo_hi(void __iomem *addr)
-{
- return readq(addr);
-}
-u64 ioread64_hi_lo(void __iomem *addr)
-{
- return readq(addr);
-}
-u64 ioread64be(void __iomem *addr)
-{
- return readq_be(addr);
-}
-u64 ioread64be_lo_hi(void __iomem *addr)
-{
- return readq_be(addr);
-}
-u64 ioread64be_hi_lo(void __iomem *addr)
-{
- return readq_be(addr);
-}
-EXPORT_SYMBOL(ioread64);
-EXPORT_SYMBOL(ioread64_lo_hi);
-EXPORT_SYMBOL(ioread64_hi_lo);
-EXPORT_SYMBOL(ioread64be);
-EXPORT_SYMBOL(ioread64be_lo_hi);
-EXPORT_SYMBOL(ioread64be_hi_lo);
-#endif /* __powerpc64__ */
-
-void iowrite8(u8 val, void __iomem *addr)
-{
- writeb(val, addr);
-}
-void iowrite16(u16 val, void __iomem *addr)
-{
- writew(val, addr);
-}
-void iowrite16be(u16 val, void __iomem *addr)
-{
- writew_be(val, addr);
-}
-void iowrite32(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-void iowrite32be(u32 val, void __iomem *addr)
-{
- writel_be(val, addr);
-}
-EXPORT_SYMBOL(iowrite8);
-EXPORT_SYMBOL(iowrite16);
-EXPORT_SYMBOL(iowrite16be);
-EXPORT_SYMBOL(iowrite32);
-EXPORT_SYMBOL(iowrite32be);
-#ifdef __powerpc64__
-void iowrite64(u64 val, void __iomem *addr)
-{
- writeq(val, addr);
-}
-void iowrite64_lo_hi(u64 val, void __iomem *addr)
-{
- writeq(val, addr);
-}
-void iowrite64_hi_lo(u64 val, void __iomem *addr)
-{
- writeq(val, addr);
-}
-void iowrite64be(u64 val, void __iomem *addr)
-{
- writeq_be(val, addr);
-}
-void iowrite64be_lo_hi(u64 val, void __iomem *addr)
-{
- writeq_be(val, addr);
-}
-void iowrite64be_hi_lo(u64 val, void __iomem *addr)
-{
- writeq_be(val, addr);
-}
-EXPORT_SYMBOL(iowrite64);
-EXPORT_SYMBOL(iowrite64_lo_hi);
-EXPORT_SYMBOL(iowrite64_hi_lo);
-EXPORT_SYMBOL(iowrite64be);
-EXPORT_SYMBOL(iowrite64be_lo_hi);
-EXPORT_SYMBOL(iowrite64be_hi_lo);
-#endif /* __powerpc64__ */
-
-/*
- * These are the "repeat read/write" functions. Note the
- * non-CPU byte order. We do things in "IO byteorder"
- * here.
- *
- * FIXME! We could make these do EEH handling if we really
- * wanted. Not clear if we do.
- */
-void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
-{
- readsb(addr, dst, count);
-}
-void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
-{
- readsw(addr, dst, count);
-}
-void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
-{
- readsl(addr, dst, count);
-}
-EXPORT_SYMBOL(ioread8_rep);
-EXPORT_SYMBOL(ioread16_rep);
-EXPORT_SYMBOL(ioread32_rep);
-
-void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
-{
- writesb(addr, src, count);
-}
-void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
-{
- writesw(addr, src, count);
-}
-void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
-{
- writesl(addr, src, count);
-}
-EXPORT_SYMBOL(iowrite8_rep);
-EXPORT_SYMBOL(iowrite16_rep);
-EXPORT_SYMBOL(iowrite32_rep);
-
void __iomem *ioport_map(unsigned long port, unsigned int len)
{
return (void __iomem *) (port + _IO_BASE);
}
-
-void ioport_unmap(void __iomem *addr)
-{
- /* Nothing to do */
-}
EXPORT_SYMBOL(ioport_map);
-EXPORT_SYMBOL(ioport_unmap);
#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 9704f3f76e63..caebe1431596 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -25,8 +25,8 @@
#include <linux/pci.h>
#include <linux/iommu.h>
#include <linux/sched.h>
+#include <linux/debugfs.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
@@ -38,6 +38,46 @@
#define DBG(...)
+#ifdef CONFIG_IOMMU_DEBUGFS
+static int iommu_debugfs_weight_get(void *data, u64 *val)
+{
+ struct iommu_table *tbl = data;
+ *val = bitmap_weight(tbl->it_map, tbl->it_size);
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
+
+static void iommu_debugfs_add(struct iommu_table *tbl)
+{
+ char name[10];
+ struct dentry *liobn_entry;
+
+ sprintf(name, "%08lx", tbl->it_index);
+ liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
+
+ debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
+ debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
+ debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
+ debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
+ debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
+ debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
+ debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
+}
+
+static void iommu_debugfs_del(struct iommu_table *tbl)
+{
+ char name[10];
+ struct dentry *liobn_entry;
+
+ sprintf(name, "%08lx", tbl->it_index);
+ liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
+ debugfs_remove(liobn_entry);
+}
+#else
+static void iommu_debugfs_add(struct iommu_table *tbl){}
+static void iommu_debugfs_del(struct iommu_table *tbl){}
+#endif
+
static int novmerge;
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
@@ -172,7 +212,6 @@ static unsigned long iommu_range_alloc(struct device *dev,
int largealloc = npages > 15;
int pass = 0;
unsigned long align_mask;
- unsigned long boundary_size;
unsigned long flags;
unsigned int pool_nr;
struct iommu_pool *pool;
@@ -236,15 +275,9 @@ again:
}
}
- if (dev)
- boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
- 1 << tbl->it_page_shift);
- else
- boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
- /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
-
n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
- boundary_size >> tbl->it_page_shift, align_mask);
+ dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
+ align_mask);
if (n == -1) {
if (likely(pass == 0)) {
/* First try the pool from the start */
@@ -262,6 +295,15 @@ again:
pass++;
goto again;
+ } else if (pass == tbl->nr_pools + 1) {
+ /* Last resort: try largepool */
+ spin_unlock(&pool->lock);
+ pool = &tbl->large_pool;
+ spin_lock(&pool->lock);
+ pool->hint = pool->start;
+ pass++;
+ goto again;
+
} else {
/* Give up */
spin_unlock_irqrestore(&(pool->lock), flags);
@@ -430,7 +472,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
BUG_ON(direction == DMA_NONE);
if ((nelems == 0) || !tbl)
- return 0;
+ return -EINVAL;
outs = s = segstart = &sglist[0];
outcount = 1;
@@ -532,7 +574,6 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
*/
if (outcount < incount) {
outs = sg_next(outs);
- outs->dma_address = DMA_MAPPING_ERROR;
outs->dma_length = 0;
}
@@ -550,13 +591,12 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
npages = iommu_num_pages(s->dma_address, s->dma_length,
IOMMU_PAGE_SIZE(tbl));
__iommu_free(tbl, vaddr, npages);
- s->dma_address = DMA_MAPPING_ERROR;
s->dma_length = 0;
}
if (s == outs)
break;
}
- return 0;
+ return -EIO;
}
@@ -647,32 +687,24 @@ static void iommu_table_reserve_pages(struct iommu_table *tbl,
if (tbl->it_offset == 0)
set_bit(0, tbl->it_map);
- tbl->it_reserved_start = res_start;
- tbl->it_reserved_end = res_end;
-
- /* Check if res_start..res_end isn't empty and overlaps the table */
- if (res_start && res_end &&
- (tbl->it_offset + tbl->it_size < res_start ||
- res_end < tbl->it_offset))
- return;
+ if (res_start < tbl->it_offset)
+ res_start = tbl->it_offset;
- for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
- set_bit(i - tbl->it_offset, tbl->it_map);
-}
+ if (res_end > (tbl->it_offset + tbl->it_size))
+ res_end = tbl->it_offset + tbl->it_size;
-static void iommu_table_release_pages(struct iommu_table *tbl)
-{
- int i;
+ /* Check if res_start..res_end is a valid range in the table */
+ if (res_start >= res_end) {
+ tbl->it_reserved_start = tbl->it_offset;
+ tbl->it_reserved_end = tbl->it_offset;
+ return;
+ }
- /*
- * In case we have reserved the first bit, we should not emit
- * the warning below.
- */
- if (tbl->it_offset == 0)
- clear_bit(0, tbl->it_map);
+ tbl->it_reserved_start = res_start;
+ tbl->it_reserved_end = res_end;
for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
- clear_bit(i - tbl->it_offset, tbl->it_map);
+ set_bit(i - tbl->it_offset, tbl->it_map);
}
/*
@@ -684,7 +716,6 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
{
unsigned long sz;
static int welcomed = 0;
- struct page *page;
unsigned int i;
struct iommu_pool *p;
@@ -693,11 +724,11 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
/* number of bytes needed for the bitmap */
sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
- page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
- if (!page)
- panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
- tbl->it_map = page_address(page);
- memset(tbl->it_map, 0, sz);
+ tbl->it_map = vzalloc_node(sz, nid);
+ if (!tbl->it_map) {
+ pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
+ return NULL;
+ }
iommu_table_reserve_pages(tbl, res_start, res_end);
@@ -732,13 +763,34 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
welcomed = 1;
}
+ iommu_debugfs_add(tbl);
+
return tbl;
}
+bool iommu_table_in_use(struct iommu_table *tbl)
+{
+ unsigned long start = 0, end;
+
+ /* ignore reserved bit0 */
+ if (tbl->it_offset == 0)
+ start = 1;
+
+ /* Simple case with no reserved MMIO32 region */
+ if (!tbl->it_reserved_start && !tbl->it_reserved_end)
+ return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
+
+ end = tbl->it_reserved_start - tbl->it_offset;
+ if (find_next_bit(tbl->it_map, end, start) != end)
+ return true;
+
+ start = tbl->it_reserved_end - tbl->it_offset;
+ end = tbl->it_size;
+ return find_next_bit(tbl->it_map, end, start) != end;
+}
+
static void iommu_table_free(struct kref *kref)
{
- unsigned long bitmap_sz;
- unsigned int order;
struct iommu_table *tbl;
tbl = container_of(kref, struct iommu_table, it_kref);
@@ -751,18 +803,14 @@ static void iommu_table_free(struct kref *kref)
return;
}
- iommu_table_release_pages(tbl);
+ iommu_debugfs_del(tbl);
/* verify that table contains no entries */
- if (!bitmap_empty(tbl->it_map, tbl->it_size))
+ if (iommu_table_in_use(tbl))
pr_warn("%s: Unexpected TCEs\n", __func__);
- /* calculate bitmap size in bytes */
- bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
-
/* free bitmap */
- order = get_order(bitmap_sz);
- free_pages((unsigned long) tbl->it_map, order);
+ vfree(tbl->it_map);
/* free table */
kfree(tbl);
@@ -1021,7 +1069,7 @@ extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
long ret;
unsigned long size = 0;
- ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
+ ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
(*direction == DMA_BIDIRECTIONAL)) &&
!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
@@ -1036,7 +1084,7 @@ void iommu_tce_kill(struct iommu_table *tbl,
unsigned long entry, unsigned long pages)
{
if (tbl->it_ops->tce_kill)
- tbl->it_ops->tce_kill(tbl, entry, pages, false);
+ tbl->it_ops->tce_kill(tbl, entry, pages);
}
EXPORT_SYMBOL_GPL(iommu_tce_kill);
@@ -1057,16 +1105,11 @@ int iommu_take_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
- spin_lock(&tbl->pools[i].lock);
-
- iommu_table_release_pages(tbl);
+ spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
- if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
+ if (iommu_table_in_use(tbl)) {
pr_err("iommu_tce: it_map is not empty");
ret = -EBUSY;
- /* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
- iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
- tbl->it_reserved_end);
} else {
memset(tbl->it_map, 0xff, sz);
}
@@ -1085,7 +1128,7 @@ void iommu_release_ownership(struct iommu_table *tbl)
spin_lock_irqsave(&tbl->large_pool.lock, flags);
for (i = 0; i < tbl->nr_pools; i++)
- spin_lock(&tbl->pools[i].lock);
+ spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
memset(tbl->it_map, 0, sz);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5c9b11878555..9ede61a5a469 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -51,26 +51,22 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
+#include <linux/static_call.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/cache.h>
-#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/smp.h>
-#include <asm/livepatch.h>
-#include <asm/asm-prototypes.h>
#include <asm/hw_irq.h>
+#include <asm/softirq_stack.h>
+#include <asm/ppc_asm.h>
-#ifdef CONFIG_PPC64
-#include <asm/paca.h>
-#include <asm/firmware.h>
-#include <asm/lv1call.h>
-#endif
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
#include <asm/cpu_has_feature.h>
@@ -87,417 +83,6 @@ u32 tau_interrupts(unsigned long cpu);
#endif
#endif /* CONFIG_PPC32 */
-#ifdef CONFIG_PPC64
-
-int distribute_irqs = 1;
-
-static inline notrace unsigned long get_irq_happened(void)
-{
- unsigned long happened;
-
- __asm__ __volatile__("lbz %0,%1(13)"
- : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
-
- return happened;
-}
-
-static inline notrace int decrementer_check_overflow(void)
-{
- u64 now = get_tb_or_rtc();
- u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
-
- return now >= *next_tb;
-}
-
-/* This is called whenever we are re-enabling interrupts
- * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
- * there's an EE, DEC or DBELL to generate.
- *
- * This is called in two contexts: From arch_local_irq_restore()
- * before soft-enabling interrupts, and from the exception exit
- * path when returning from an interrupt from a soft-disabled to
- * a soft enabled context. In both case we have interrupts hard
- * disabled.
- *
- * We take care of only clearing the bits we handled in the
- * PACA irq_happened field since we can only re-emit one at a
- * time and we don't want to "lose" one.
- */
-notrace unsigned int __check_irq_replay(void)
-{
- /*
- * We use local_paca rather than get_paca() to avoid all
- * the debug_smp_processor_id() business in this low level
- * function
- */
- unsigned char happened = local_paca->irq_happened;
-
- /*
- * We are responding to the next interrupt, so interrupt-off
- * latencies should be reset here.
- */
- trace_hardirqs_on();
- trace_hardirqs_off();
-
- /*
- * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
- * not be set, which means interrupts have only just been hard
- * disabled as part of the local_irq_restore or interrupt return
- * code. In that case, skip the decrementr check becaus it's
- * expensive to read the TB.
- *
- * HARD_DIS then gets cleared here, but it's reconciled later.
- * Either local_irq_disable will replay the interrupt and that
- * will reconcile state like other hard interrupts. Or interrupt
- * retur will replay the interrupt and in that case it sets
- * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
- */
- if (happened & PACA_IRQ_HARD_DIS) {
- local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
-
- /*
- * We may have missed a decrementer interrupt if hard disabled.
- * Check the decrementer register in case we had a rollover
- * while hard disabled.
- */
- if (!(happened & PACA_IRQ_DEC)) {
- if (decrementer_check_overflow()) {
- local_paca->irq_happened |= PACA_IRQ_DEC;
- happened |= PACA_IRQ_DEC;
- }
- }
- }
-
- /*
- * Force the delivery of pending soft-disabled interrupts on PS3.
- * Any HV call will have this side effect.
- */
- if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
- u64 tmp, tmp2;
- lv1_get_version_info(&tmp, &tmp2);
- }
-
- /*
- * Check if an hypervisor Maintenance interrupt happened.
- * This is a higher priority interrupt than the others, so
- * replay it first.
- */
- if (happened & PACA_IRQ_HMI) {
- local_paca->irq_happened &= ~PACA_IRQ_HMI;
- return 0xe60;
- }
-
- if (happened & PACA_IRQ_DEC) {
- local_paca->irq_happened &= ~PACA_IRQ_DEC;
- return 0x900;
- }
-
- if (happened & PACA_IRQ_PMI) {
- local_paca->irq_happened &= ~PACA_IRQ_PMI;
- return 0xf00;
- }
-
- if (happened & PACA_IRQ_EE) {
- local_paca->irq_happened &= ~PACA_IRQ_EE;
- return 0x500;
- }
-
-#ifdef CONFIG_PPC_BOOK3E
- /*
- * Check if an EPR external interrupt happened this bit is typically
- * set if we need to handle another "edge" interrupt from within the
- * MPIC "EPR" handler.
- */
- if (happened & PACA_IRQ_EE_EDGE) {
- local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
- return 0x500;
- }
-
- if (happened & PACA_IRQ_DBELL) {
- local_paca->irq_happened &= ~PACA_IRQ_DBELL;
- return 0x280;
- }
-#else
- if (happened & PACA_IRQ_DBELL) {
- local_paca->irq_happened &= ~PACA_IRQ_DBELL;
- return 0xa00;
- }
-#endif /* CONFIG_PPC_BOOK3E */
-
- /* There should be nothing left ! */
- BUG_ON(local_paca->irq_happened != 0);
-
- return 0;
-}
-
-notrace void arch_local_irq_restore(unsigned long mask)
-{
- unsigned char irq_happened;
- unsigned int replay;
-
- /* Write the new soft-enabled value */
- irq_soft_mask_set(mask);
- if (mask)
- return;
-
- /*
- * From this point onward, we can take interrupts, preempt,
- * etc... unless we got hard-disabled. We check if an event
- * happened. If none happened, we know we can just return.
- *
- * We may have preempted before the check below, in which case
- * we are checking the "new" CPU instead of the old one. This
- * is only a problem if an event happened on the "old" CPU.
- *
- * External interrupt events will have caused interrupts to
- * be hard-disabled, so there is no problem, we
- * cannot have preempted.
- */
- irq_happened = get_irq_happened();
- if (!irq_happened) {
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
-#endif
- return;
- }
-
- /*
- * We need to hard disable to get a trusted value from
- * __check_irq_replay(). We also need to soft-disable
- * again to avoid warnings in there due to the use of
- * per-cpu variables.
- */
- if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- WARN_ON_ONCE(!(mfmsr() & MSR_EE));
-#endif
- __hard_irq_disable();
-#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
- } else {
- /*
- * We should already be hard disabled here. We had bugs
- * where that wasn't the case so let's dbl check it and
- * warn if we are wrong. Only do that when IRQ tracing
- * is enabled as mfmsr() can be costly.
- */
- if (WARN_ON_ONCE(mfmsr() & MSR_EE))
- __hard_irq_disable();
-#endif
- }
-
- irq_soft_mask_set(IRQS_ALL_DISABLED);
- trace_hardirqs_off();
-
- /*
- * Check if anything needs to be re-emitted. We haven't
- * soft-enabled yet to avoid warnings in decrementer_check_overflow
- * accessing per-cpu variables
- */
- replay = __check_irq_replay();
-
- /* We can soft-enable now */
- trace_hardirqs_on();
- irq_soft_mask_set(IRQS_ENABLED);
-
- /*
- * And replay if we have to. This will return with interrupts
- * hard-enabled.
- */
- if (replay) {
- __replay_interrupt(replay);
- return;
- }
-
- /* Finally, let's ensure we are hard enabled */
- __hard_irq_enable();
-}
-EXPORT_SYMBOL(arch_local_irq_restore);
-
-/*
- * This is specifically called by assembly code to re-enable interrupts
- * if they are currently disabled. This is typically called before
- * schedule() or do_signal() when returning to userspace. We do it
- * in C to avoid the burden of dealing with lockdep etc...
- *
- * NOTE: This is called with interrupts hard disabled but not marked
- * as such in paca->irq_happened, so we need to resync this.
- */
-void notrace restore_interrupts(void)
-{
- if (irqs_disabled()) {
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
- local_irq_enable();
- } else
- __hard_irq_enable();
-}
-
-/*
- * This is a helper to use when about to go into idle low-power
- * when the latter has the side effect of re-enabling interrupts
- * (such as calling H_CEDE under pHyp).
- *
- * You call this function with interrupts soft-disabled (this is
- * already the case when ppc_md.power_save is called). The function
- * will return whether to enter power save or just return.
- *
- * In the former case, it will have notified lockdep of interrupts
- * being re-enabled and generally sanitized the lazy irq state,
- * and in the latter case it will leave with interrupts hard
- * disabled and marked as such, so the local_irq_enable() call
- * in arch_cpu_idle() will properly re-enable everything.
- */
-bool prep_irq_for_idle(void)
-{
- /*
- * First we need to hard disable to ensure no interrupt
- * occurs before we effectively enter the low power state
- */
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- /*
- * If anything happened while we were soft-disabled,
- * we return now and do not enter the low power state.
- */
- if (lazy_irq_pending())
- return false;
-
- /* Tell lockdep we are about to re-enable */
- trace_hardirqs_on();
-
- /*
- * Mark interrupts as soft-enabled and clear the
- * PACA_IRQ_HARD_DIS from the pending mask since we
- * are about to hard enable as well as a side effect
- * of entering the low power state.
- */
- local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
- irq_soft_mask_set(IRQS_ENABLED);
-
- /* Tell the caller to enter the low power state */
- return true;
-}
-
-#ifdef CONFIG_PPC_BOOK3S
-/*
- * This is for idle sequences that return with IRQs off, but the
- * idle state itself wakes on interrupt. Tell the irq tracer that
- * IRQs are enabled for the duration of idle so it does not get long
- * off times. Must be paired with fini_irq_for_idle_irqsoff.
- */
-bool prep_irq_for_idle_irqsoff(void)
-{
- WARN_ON(!irqs_disabled());
-
- /*
- * First we need to hard disable to ensure no interrupt
- * occurs before we effectively enter the low power state
- */
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- /*
- * If anything happened while we were soft-disabled,
- * we return now and do not enter the low power state.
- */
- if (lazy_irq_pending())
- return false;
-
- /* Tell lockdep we are about to re-enable */
- trace_hardirqs_on();
-
- return true;
-}
-
-/*
- * Take the SRR1 wakeup reason, index into this table to find the
- * appropriate irq_happened bit.
- *
- * Sytem reset exceptions taken in idle state also come through here,
- * but they are NMI interrupts so do not need to wait for IRQs to be
- * restored, and should be taken as early as practical. These are marked
- * with 0xff in the table. The Power ISA specifies 0100b as the system
- * reset interrupt reason.
- */
-#define IRQ_SYSTEM_RESET 0xff
-
-static const u8 srr1_to_lazyirq[0x10] = {
- 0, 0, 0,
- PACA_IRQ_DBELL,
- IRQ_SYSTEM_RESET,
- PACA_IRQ_DBELL,
- PACA_IRQ_DEC,
- 0,
- PACA_IRQ_EE,
- PACA_IRQ_EE,
- PACA_IRQ_HMI,
- 0, 0, 0, 0, 0 };
-
-void replay_system_reset(void)
-{
- struct pt_regs regs;
-
- ppc_save_regs(&regs);
- regs.trap = 0x100;
- get_paca()->in_nmi = 1;
- system_reset_exception(&regs);
- get_paca()->in_nmi = 0;
-}
-EXPORT_SYMBOL_GPL(replay_system_reset);
-
-void irq_set_pending_from_srr1(unsigned long srr1)
-{
- unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
- u8 reason = srr1_to_lazyirq[idx];
-
- /*
- * Take the system reset now, which is immediately after registers
- * are restored from idle. It's an NMI, so interrupts need not be
- * re-enabled before it is taken.
- */
- if (unlikely(reason == IRQ_SYSTEM_RESET)) {
- replay_system_reset();
- return;
- }
-
- /*
- * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
- * so this can be called unconditionally with the SRR1 wake
- * reason as returned by the idle code, which uses 0 to mean no
- * interrupt.
- *
- * If a future CPU was to designate this as an interrupt reason,
- * then a new index for no interrupt must be assigned.
- */
- local_paca->irq_happened |= reason;
-}
-#endif /* CONFIG_PPC_BOOK3S */
-
-/*
- * Force a replay of the external interrupt handler on this CPU.
- */
-void force_external_irq_replay(void)
-{
- /*
- * This must only be called with interrupts soft-disabled,
- * the replay will happen when re-enabling.
- */
- WARN_ON(!arch_irqs_disabled());
-
- /*
- * Interrupts must always be hard disabled before irq_happened is
- * modified (to prevent lost update in case of interrupt between
- * load and store).
- */
- __hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
-
- /* Indicate in the PACA that we have an interrupt to replay */
- local_paca->irq_happened |= PACA_IRQ_EE;
-}
-
-#endif /* CONFIG_PPC64 */
-
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
@@ -541,13 +126,14 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
seq_printf(p, " Machine check exceptions\n");
+#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_HVMODE)) {
seq_printf(p, "%*s: ", prec, "HMI");
for_each_online_cpu(j)
- seq_printf(p, "%10u ",
- per_cpu(irq_stat, j).hmi_exceptions);
+ seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
seq_printf(p, " Hypervisor Maintenance Interrupts\n");
}
+#endif
seq_printf(p, "%*s: ", prec, "NMI");
for_each_online_cpu(j)
@@ -585,7 +171,9 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
sum += per_cpu(irq_stat, cpu).mce_exceptions;
sum += per_cpu(irq_stat, cpu).spurious_irqs;
sum += per_cpu(irq_stat, cpu).timer_irqs_others;
- sum += per_cpu(irq_stat, cpu).hmi_exceptions;
+#ifdef CONFIG_PPC_BOOK3S_64
+ sum += paca_ptrs[cpu]->hmi_irqs;
+#endif
sum += per_cpu(irq_stat, cpu).sreset_irqs;
#ifdef CONFIG_PPC_WATCHDOG
sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
@@ -597,38 +185,61 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
return sum;
}
-static inline void check_stack_overflow(void)
+static inline void check_stack_overflow(unsigned long sp)
{
-#ifdef CONFIG_DEBUG_STACKOVERFLOW
- long sp;
+ if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
+ return;
- sp = current_stack_pointer() & (THREAD_SIZE-1);
+ sp &= THREAD_SIZE - 1;
- /* check for stack overflow: is there less than 2KB free? */
- if (unlikely(sp < 2048)) {
+ /* check for stack overflow: is there less than 1/4th free? */
+ if (unlikely(sp < THREAD_SIZE / 4)) {
pr_err("do_IRQ: stack overflow: %ld\n", sp);
dump_stack();
}
-#endif
}
-void __do_irq(struct pt_regs *regs)
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+static __always_inline void call_do_softirq(const void *sp)
{
- unsigned int irq;
+ /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
+ asm volatile (
+ PPC_STLU " %%r1, %[offset](%[sp]) ;"
+ "mr %%r1, %[sp] ;"
+ "bl %[callee] ;"
+ PPC_LL " %%r1, 0(%%r1) ;"
+ : // Outputs
+ : // Inputs
+ [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
+ [callee] "i" (__do_softirq)
+ : // Clobbers
+ "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
+ "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12"
+ );
+}
+#endif
- irq_enter();
+DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
+
+static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
+{
+ unsigned int irq;
trace_irq_entry(regs);
+ check_stack_overflow(oldsp);
+
/*
* Query the platform PIC for the interrupt & ack it.
*
* This will typically lower the interrupt line to the CPU
*/
- irq = ppc_md.get_irq();
+ irq = static_call(ppc_get_irq)();
/* We can hard enable interrupts now to allow perf interrupts */
- may_hard_irq_enable();
+ if (should_hard_irq_enable())
+ do_hard_irq_enable();
/* And finally process it */
if (unlikely(!irq))
@@ -637,39 +248,59 @@ void __do_irq(struct pt_regs *regs)
generic_handle_irq(irq);
trace_irq_exit(regs);
+}
- irq_exit();
+static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
+{
+ register unsigned long r3 asm("r3") = (unsigned long)regs;
+
+ /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
+ asm volatile (
+ PPC_STLU " %%r1, %[offset](%[sp]) ;"
+ "mr %%r4, %%r1 ;"
+ "mr %%r1, %[sp] ;"
+ "bl %[callee] ;"
+ PPC_LL " %%r1, 0(%%r1) ;"
+ : // Outputs
+ "+r" (r3)
+ : // Inputs
+ [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD),
+ [callee] "i" (__do_irq)
+ : // Clobbers
+ "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
+ "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12"
+ );
}
-void do_IRQ(struct pt_regs *regs)
+void __do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
void *cursp, *irqsp, *sirqsp;
/* Switch to the irq stack to handle this */
- cursp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
+ cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
irqsp = hardirq_ctx[raw_smp_processor_id()];
sirqsp = softirq_ctx[raw_smp_processor_id()];
- check_stack_overflow();
-
- /* Already there ? */
- if (unlikely(cursp == irqsp || cursp == sirqsp)) {
- __do_irq(regs);
- set_irq_regs(old_regs);
- return;
- }
- /* Switch stack and call */
- call_do_irq(regs, irqsp);
+ /* Already there ? If not switch stack and call */
+ if (unlikely(cursp == irqsp || cursp == sirqsp))
+ __do_irq(regs, current_stack_pointer);
+ else
+ call_do_irq(regs, irqsp);
set_irq_regs(old_regs);
}
+DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
+{
+ __do_IRQ(regs);
+}
+
static void *__init alloc_vm_stack(void)
{
- return __vmalloc_node_range(THREAD_SIZE, THREAD_ALIGN, VMALLOC_START,
- VMALLOC_END, THREADINFO_GFP, PAGE_KERNEL,
- 0, NUMA_NO_NODE, (void*)_RET_IP_);
+ return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
+ NUMA_NO_NODE, (void *)_RET_IP_);
}
static void __init vmap_irqstack_init(void)
@@ -690,9 +321,12 @@ void __init init_IRQ(void)
if (ppc_md.init_IRQ)
ppc_md.init_IRQ();
+
+ if (!WARN_ON(!ppc_md.get_irq))
+ static_call_update(ppc_get_irq, ppc_md.get_irq);
}
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
void *critirq_ctx[NR_CPUS] __read_mostly;
void *dbgirq_ctx[NR_CPUS] __read_mostly;
void *mcheckirq_ctx[NR_CPUS] __read_mostly;
@@ -701,10 +335,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly;
void *softirq_ctx[NR_CPUS] __read_mostly;
void *hardirq_ctx[NR_CPUS] __read_mostly;
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
call_do_softirq(softirq_ctx[smp_processor_id()]);
}
+#endif
irq_hw_number_t virq_to_hw(unsigned int virq)
{
@@ -748,13 +384,3 @@ int irq_choose_cpu(const struct cpumask *mask)
return hard_smp_processor_id();
}
#endif
-
-#ifdef CONFIG_PPC64
-static int __init setup_noirqdistrib(char *str)
-{
- distribute_irqs = 0;
- return 1;
-}
-
-__setup("noirqdistrib", setup_noirqdistrib);
-#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c
new file mode 100644
index 000000000000..eb2b380e52a0
--- /dev/null
+++ b/arch/powerpc/kernel/irq_64.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Derived from arch/i386/kernel/irq.c
+ * Copyright (C) 1992 Linus Torvalds
+ * Adapted from arch/i386 by Gary Thomas
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ * Updated and modified by Cort Dougan <cort@fsmlabs.com>
+ * Copyright (C) 1996-2001 Cort Dougan
+ * Adapted for Power Macintosh by Paul Mackerras
+ * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+#undef DEBUG
+
+#include <linux/export.h>
+#include <linux/threads.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/timex.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/cpumask.h>
+#include <linux/profile.h>
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/radix-tree.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
+#include <linux/static_call.h>
+
+#include <linux/uaccess.h>
+#include <asm/interrupt.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/cache.h>
+#include <asm/ptrace.h>
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/smp.h>
+#include <asm/hw_irq.h>
+#include <asm/softirq_stack.h>
+#include <asm/ppc_asm.h>
+
+#include <asm/paca.h>
+#include <asm/firmware.h>
+#include <asm/lv1call.h>
+#include <asm/dbell.h>
+#include <asm/trace.h>
+#include <asm/cpu_has_feature.h>
+
+int distribute_irqs = 1;
+
+static inline void next_interrupt(struct pt_regs *regs)
+{
+ /*
+ * Softirq processing can enable/disable irqs, which will leave
+ * MSR[EE] enabled and the soft mask set to IRQS_DISABLED. Fix
+ * this up.
+ */
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
+ hard_irq_disable();
+ else
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+
+ /*
+ * We are responding to the next interrupt, so interrupt-off
+ * latencies should be reset here.
+ */
+ trace_hardirqs_on();
+ trace_hardirqs_off();
+}
+
+static inline bool irq_happened_test_and_clear(u8 irq)
+{
+ if (local_paca->irq_happened & irq) {
+ local_paca->irq_happened &= ~irq;
+ return true;
+ }
+ return false;
+}
+
+void replay_soft_interrupts(void)
+{
+ struct pt_regs regs;
+
+ /*
+ * Be careful here, calling these interrupt handlers can cause
+ * softirqs to be raised, which they may run when calling irq_exit,
+ * which will cause local_irq_enable() to be run, which can then
+ * recurse into this function. Don't keep any state across
+ * interrupt handler calls which may change underneath us.
+ *
+ * Softirqs can not be disabled over replay to stop this recursion
+ * because interrupts taken in idle code may require RCU softirq
+ * to run in the irq RCU tracking context. This is a hard problem
+ * to fix without changes to the softirq or idle layer.
+ *
+ * We use local_paca rather than get_paca() to avoid all the
+ * debug_smp_processor_id() business in this low level function.
+ */
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+ WARN_ON(!(local_paca->irq_happened & PACA_IRQ_HARD_DIS));
+ }
+
+ ppc_save_regs(&regs);
+ regs.softe = IRQS_ENABLED;
+ regs.msr |= MSR_EE;
+
+again:
+ /*
+ * Force the delivery of pending soft-disabled interrupts on PS3.
+ * Any HV call will have this side effect.
+ */
+ if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
+ u64 tmp, tmp2;
+ lv1_get_version_info(&tmp, &tmp2);
+ }
+
+ /*
+ * Check if an hypervisor Maintenance interrupt happened.
+ * This is a higher priority interrupt than the others, so
+ * replay it first.
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) &&
+ irq_happened_test_and_clear(PACA_IRQ_HMI)) {
+ regs.trap = INTERRUPT_HMI;
+ handle_hmi_exception(&regs);
+ next_interrupt(&regs);
+ }
+
+ if (irq_happened_test_and_clear(PACA_IRQ_DEC)) {
+ regs.trap = INTERRUPT_DECREMENTER;
+ timer_interrupt(&regs);
+ next_interrupt(&regs);
+ }
+
+ if (irq_happened_test_and_clear(PACA_IRQ_EE)) {
+ regs.trap = INTERRUPT_EXTERNAL;
+ do_IRQ(&regs);
+ next_interrupt(&regs);
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_DOORBELL) &&
+ irq_happened_test_and_clear(PACA_IRQ_DBELL)) {
+ regs.trap = INTERRUPT_DOORBELL;
+ doorbell_exception(&regs);
+ next_interrupt(&regs);
+ }
+
+ /* Book3E does not support soft-masking PMI interrupts */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S) &&
+ irq_happened_test_and_clear(PACA_IRQ_PMI)) {
+ regs.trap = INTERRUPT_PERFMON;
+ performance_monitor_exception(&regs);
+ next_interrupt(&regs);
+ }
+
+ /*
+ * Softirq processing can enable and disable interrupts, which can
+ * result in new irqs becoming pending. Must keep looping until we
+ * have cleared out all pending interrupts.
+ */
+ if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS)
+ goto again;
+}
+
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP)
+static inline void replay_soft_interrupts_irqrestore(void)
+{
+ unsigned long kuap_state = get_kuap();
+
+ /*
+ * Check if anything calls local_irq_enable/restore() when KUAP is
+ * disabled (user access enabled). We handle that case here by saving
+ * and re-locking AMR but we shouldn't get here in the first place,
+ * hence the warning.
+ */
+ kuap_assert_locked();
+
+ if (kuap_state != AMR_KUAP_BLOCKED)
+ set_kuap(AMR_KUAP_BLOCKED);
+
+ replay_soft_interrupts();
+
+ if (kuap_state != AMR_KUAP_BLOCKED)
+ set_kuap(kuap_state);
+}
+#else
+#define replay_soft_interrupts_irqrestore() replay_soft_interrupts()
+#endif
+
+notrace void arch_local_irq_restore(unsigned long mask)
+{
+ unsigned char irq_happened;
+
+ /* Write the new soft-enabled value if it is a disable */
+ if (mask) {
+ irq_soft_mask_set(mask);
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(in_nmi() || in_hardirq());
+
+ /*
+ * After the stb, interrupts are unmasked and there are no interrupts
+ * pending replay. The restart sequence makes this atomic with
+ * respect to soft-masked interrupts. If this was just a simple code
+ * sequence, a soft-masked interrupt could become pending right after
+ * the comparison and before the stb.
+ *
+ * This allows interrupts to be unmasked without hard disabling, and
+ * also without new hard interrupts coming in ahead of pending ones.
+ */
+ asm_volatile_goto(
+"1: \n"
+" lbz 9,%0(13) \n"
+" cmpwi 9,0 \n"
+" bne %l[happened] \n"
+" stb 9,%1(13) \n"
+"2: \n"
+ RESTART_TABLE(1b, 2b, 1b)
+ : : "i" (offsetof(struct paca_struct, irq_happened)),
+ "i" (offsetof(struct paca_struct, irq_soft_mask))
+ : "cr0", "r9"
+ : happened);
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!(mfmsr() & MSR_EE));
+
+ return;
+
+happened:
+ irq_happened = READ_ONCE(local_paca->irq_happened);
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(!irq_happened);
+
+ if (irq_happened == PACA_IRQ_HARD_DIS) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+ irq_soft_mask_set(IRQS_ENABLED);
+ local_paca->irq_happened = 0;
+ __hard_irq_enable();
+ return;
+ }
+
+ /* Have interrupts to replay, need to hard disable first */
+ if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (!(mfmsr() & MSR_EE)) {
+ /*
+ * An interrupt could have come in and cleared
+ * MSR[EE] and set IRQ_HARD_DIS, so check
+ * IRQ_HARD_DIS again and warn if it is still
+ * clear.
+ */
+ irq_happened = READ_ONCE(local_paca->irq_happened);
+ WARN_ON_ONCE(!(irq_happened & PACA_IRQ_HARD_DIS));
+ }
+ }
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+ } else {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
+ if (WARN_ON_ONCE(mfmsr() & MSR_EE))
+ __hard_irq_disable();
+ }
+ }
+
+ /*
+ * Disable preempt here, so that the below preempt_enable will
+ * perform resched if required (a replayed interrupt may set
+ * need_resched).
+ */
+ preempt_disable();
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ trace_hardirqs_off();
+
+ replay_soft_interrupts_irqrestore();
+
+ trace_hardirqs_on();
+ irq_soft_mask_set(IRQS_ENABLED);
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
+ local_paca->irq_happened = 0;
+ __hard_irq_enable();
+ preempt_enable();
+}
+EXPORT_SYMBOL(arch_local_irq_restore);
+
+/*
+ * This is a helper to use when about to go into idle low-power
+ * when the latter has the side effect of re-enabling interrupts
+ * (such as calling H_CEDE under pHyp).
+ *
+ * You call this function with interrupts soft-disabled (this is
+ * already the case when ppc_md.power_save is called). The function
+ * will return whether to enter power save or just return.
+ *
+ * In the former case, it will have notified lockdep of interrupts
+ * being re-enabled and generally sanitized the lazy irq state,
+ * and in the latter case it will leave with interrupts hard
+ * disabled and marked as such, so the local_irq_enable() call
+ * in arch_cpu_idle() will properly re-enable everything.
+ */
+bool prep_irq_for_idle(void)
+{
+ /*
+ * First we need to hard disable to ensure no interrupt
+ * occurs before we effectively enter the low power state
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /*
+ * If anything happened while we were soft-disabled,
+ * we return now and do not enter the low power state.
+ */
+ if (lazy_irq_pending())
+ return false;
+
+ /* Tell lockdep we are about to re-enable */
+ trace_hardirqs_on();
+
+ /*
+ * Mark interrupts as soft-enabled and clear the
+ * PACA_IRQ_HARD_DIS from the pending mask since we
+ * are about to hard enable as well as a side effect
+ * of entering the low power state.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ irq_soft_mask_set(IRQS_ENABLED);
+
+ /* Tell the caller to enter the low power state */
+ return true;
+}
+
+#ifdef CONFIG_PPC_BOOK3S
+/*
+ * This is for idle sequences that return with IRQs off, but the
+ * idle state itself wakes on interrupt. Tell the irq tracer that
+ * IRQs are enabled for the duration of idle so it does not get long
+ * off times. Must be paired with fini_irq_for_idle_irqsoff.
+ */
+bool prep_irq_for_idle_irqsoff(void)
+{
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * First we need to hard disable to ensure no interrupt
+ * occurs before we effectively enter the low power state
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /*
+ * If anything happened while we were soft-disabled,
+ * we return now and do not enter the low power state.
+ */
+ if (lazy_irq_pending())
+ return false;
+
+ /* Tell lockdep we are about to re-enable */
+ trace_hardirqs_on();
+
+ return true;
+}
+
+/*
+ * Take the SRR1 wakeup reason, index into this table to find the
+ * appropriate irq_happened bit.
+ *
+ * Sytem reset exceptions taken in idle state also come through here,
+ * but they are NMI interrupts so do not need to wait for IRQs to be
+ * restored, and should be taken as early as practical. These are marked
+ * with 0xff in the table. The Power ISA specifies 0100b as the system
+ * reset interrupt reason.
+ */
+#define IRQ_SYSTEM_RESET 0xff
+
+static const u8 srr1_to_lazyirq[0x10] = {
+ 0, 0, 0,
+ PACA_IRQ_DBELL,
+ IRQ_SYSTEM_RESET,
+ PACA_IRQ_DBELL,
+ PACA_IRQ_DEC,
+ 0,
+ PACA_IRQ_EE,
+ PACA_IRQ_EE,
+ PACA_IRQ_HMI,
+ 0, 0, 0, 0, 0 };
+
+void replay_system_reset(void)
+{
+ struct pt_regs regs;
+
+ ppc_save_regs(&regs);
+ regs.trap = 0x100;
+ get_paca()->in_nmi = 1;
+ system_reset_exception(&regs);
+ get_paca()->in_nmi = 0;
+}
+EXPORT_SYMBOL_GPL(replay_system_reset);
+
+void irq_set_pending_from_srr1(unsigned long srr1)
+{
+ unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
+ u8 reason = srr1_to_lazyirq[idx];
+
+ /*
+ * Take the system reset now, which is immediately after registers
+ * are restored from idle. It's an NMI, so interrupts need not be
+ * re-enabled before it is taken.
+ */
+ if (unlikely(reason == IRQ_SYSTEM_RESET)) {
+ replay_system_reset();
+ return;
+ }
+
+ if (reason == PACA_IRQ_DBELL) {
+ /*
+ * When doorbell triggers a system reset wakeup, the message
+ * is not cleared, so if the doorbell interrupt is replayed
+ * and the IPI handled, the doorbell interrupt would still
+ * fire when EE is enabled.
+ *
+ * To avoid taking the superfluous doorbell interrupt,
+ * execute a msgclr here before the interrupt is replayed.
+ */
+ ppc_msgclr(PPC_DBELL_MSGTYPE);
+ }
+
+ /*
+ * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
+ * so this can be called unconditionally with the SRR1 wake
+ * reason as returned by the idle code, which uses 0 to mean no
+ * interrupt.
+ *
+ * If a future CPU was to designate this as an interrupt reason,
+ * then a new index for no interrupt must be assigned.
+ */
+ local_paca->irq_happened |= reason;
+}
+#endif /* CONFIG_PPC_BOOK3S */
+
+/*
+ * Force a replay of the external interrupt handler on this CPU.
+ */
+void force_external_irq_replay(void)
+{
+ /*
+ * This must only be called with interrupts soft-disabled,
+ * the replay will happen when re-enabling.
+ */
+ WARN_ON(!arch_irqs_disabled());
+
+ /*
+ * Interrupts must always be hard disabled before irq_happened is
+ * modified (to prevent lost update in case of interrupt between
+ * load and store).
+ */
+ __hard_irq_disable();
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ /* Indicate in the PACA that we have an interrupt to replay */
+ local_paca->irq_happened |= PACA_IRQ_EE;
+}
+
+static int __init setup_noirqdistrib(char *str)
+{
+ distribute_irqs = 0;
+ return 1;
+}
+
+__setup("noirqdistrib", setup_noirqdistrib);
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 773671b512df..dc746611ebc0 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -18,10 +18,11 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
+#include <linux/of_address.h>
+#include <linux/vmalloc.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
@@ -38,6 +39,22 @@ EXPORT_SYMBOL_GPL(isa_bridge_pcidev);
#define ISA_SPACE_MASK 0x1
#define ISA_SPACE_IO 0x1
+static void remap_isa_base(phys_addr_t pa, unsigned long size)
+{
+ WARN_ON_ONCE(ISA_IO_BASE & ~PAGE_MASK);
+ WARN_ON_ONCE(pa & ~PAGE_MASK);
+ WARN_ON_ONCE(size & ~PAGE_MASK);
+
+ if (slab_is_available()) {
+ if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa,
+ pgprot_noncached(PAGE_KERNEL)))
+ vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size);
+ } else {
+ early_ioremap_range(ISA_IO_BASE, pa, size,
+ pgprot_noncached(PAGE_KERNEL));
+ }
+}
+
static void pci_process_ISA_OF_ranges(struct device_node *isa_node,
unsigned long phb_io_base_phys)
{
@@ -105,15 +122,13 @@ static void pci_process_ISA_OF_ranges(struct device_node *isa_node,
if (size > 0x10000)
size = 0x10000;
- __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
- size, pgprot_noncached(PAGE_KERNEL));
+ remap_isa_base(phb_io_base_phys, size);
return;
inval_range:
printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
"mapping 64k\n");
- __ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
- 0x10000, pgprot_noncached(PAGE_KERNEL));
+ remap_isa_base(phb_io_base_phys, 0x10000);
}
@@ -248,8 +263,7 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
* and map it
*/
isa_io_base = ISA_IO_BASE;
- __ioremap_at(pbase, (void *)ISA_IO_BASE,
- size, pgprot_noncached(PAGE_KERNEL));
+ remap_isa_base(pbase, size);
pr_debug("ISA: Non-PCI bridge is %pOF\n", np);
}
@@ -297,7 +311,7 @@ static void isa_bridge_remove(void)
isa_bridge_pcidev = NULL;
/* Unmap the ISA area */
- __iounmap_at((void *)ISA_IO_BASE, 0x10000);
+ vunmap_range(ISA_IO_BASE, ISA_IO_BASE + 0x10000);
}
/**
diff --git a/arch/powerpc/kernel/jump_label.c b/arch/powerpc/kernel/jump_label.c
index ca37702bde97..5277cf582c16 100644
--- a/arch/powerpc/kernel/jump_label.c
+++ b/arch/powerpc/kernel/jump_label.c
@@ -6,14 +6,15 @@
#include <linux/kernel.h>
#include <linux/jump_label.h>
#include <asm/code-patching.h>
+#include <asm/inst.h>
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- u32 *addr = (u32 *)(unsigned long)entry->code;
+ u32 *addr = (u32 *)jump_entry_code(entry);
if (type == JUMP_LABEL_JMP)
- patch_branch(addr, entry->target, 0);
+ patch_branch(addr, jump_entry_target(entry), 0);
else
- patch_instruction(addr, PPC_INST_NOP);
+ patch_instruction(addr, ppc_inst(PPC_RAW_NOP()));
}
diff --git a/arch/powerpc/kernel/kdebugfs.c b/arch/powerpc/kernel/kdebugfs.c
new file mode 100644
index 000000000000..36d3124d5a8b
--- /dev/null
+++ b/arch/powerpc/kernel/kdebugfs.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/init.h>
+
+struct dentry *arch_debugfs_dir;
+EXPORT_SYMBOL(arch_debugfs_dir);
+
+static int __init arch_kdebugfs_init(void)
+{
+ arch_debugfs_dir = debugfs_create_dir("powerpc", NULL);
+ return 0;
+}
+arch_initcall(arch_kdebugfs_init);
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 7dd55eb1259d..1a1e9995dae3 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* PowerPC backend to the KGDB stub.
*
@@ -8,10 +9,6 @@
* PPC32 support restored by Vitaly Wool <vwool@ru.mvista.com> and
* Sergei Shtylyov <sshtylyov@ru.mvista.com>
* Copyright (C) 2007-2008 Wind River Systems, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program as licensed "as is" without any warranty of any
- * kind, whether express or implied.
*/
#include <linux/kernel.h>
@@ -26,6 +23,7 @@
#include <asm/debug.h>
#include <asm/code-patching.h>
#include <linux/slab.h>
+#include <asm/inst.h>
/*
* This table contains the mapping between PowerPC hardware trap types, and
@@ -47,9 +45,9 @@ static struct hard_trap_info
{ 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */
{ 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */
{ 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_BOOKE_OR_40x
{ 0x2002, 0x05 /* SIGTRAP */ }, /* debug */
-#if defined(CONFIG_FSL_BOOKE)
+#if defined(CONFIG_PPC_85xx)
{ 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */
{ 0x2020, 0x08 /* SIGFPE */ }, /* spe unavailable */
{ 0x2030, 0x08 /* SIGFPE */ }, /* spe fp data */
@@ -59,14 +57,14 @@ static struct hard_trap_info
{ 0x2900, 0x08 /* SIGFPE */ }, /* apu unavailable */
{ 0x3100, 0x0e /* SIGALRM */ }, /* fixed interval timer */
{ 0x3200, 0x02 /* SIGINT */ }, /* watchdog */
-#else /* ! CONFIG_FSL_BOOKE */
+#else /* ! CONFIG_PPC_85xx */
{ 0x1000, 0x0e /* SIGALRM */ }, /* prog interval timer */
{ 0x1010, 0x0e /* SIGALRM */ }, /* fixed interval timer */
{ 0x1020, 0x02 /* SIGINT */ }, /* watchdog */
{ 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */
{ 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */
#endif
-#else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
+#else /* !CONFIG_BOOKE_OR_40x */
{ 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
#if defined(CONFIG_PPC_8xx)
{ 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
@@ -146,7 +144,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
return 0;
if (*(u32 *)regs->nip == BREAK_INSTR)
- regs->nip += BREAK_INSTR_SIZE;
+ regs_add_return_ip(regs, BREAK_INSTR_SIZE);
return 1;
}
@@ -210,7 +208,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
for (reg = 14; reg < 32; reg++)
PACK64(ptr, regs->gpr[reg]);
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_85xx
#ifdef CONFIG_SPE
for (reg = 0; reg < 32; reg++)
PACK64(ptr, p->thread.evr[reg]);
@@ -236,7 +234,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
#define GDB_SIZEOF_REG sizeof(unsigned long)
#define GDB_SIZEOF_REG_U32 sizeof(u32)
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_85xx
#define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long)
#else
#define GDB_SIZEOF_FLOAT_REG sizeof(u64)
@@ -331,7 +329,7 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
if (regno >= 32 && regno < 64) {
/* FP registers 32 -> 63 */
-#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
+#if defined(CONFIG_PPC_85xx) && defined(CONFIG_SPE)
if (current)
memcpy(mem, &current->thread.evr[regno-32],
dbg_reg_def[regno].size);
@@ -357,7 +355,7 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
if (regno >= 32 && regno < 64) {
/* FP registers 32 -> 63 */
-#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE)
+#if defined(CONFIG_PPC_85xx) && defined(CONFIG_SPE)
memcpy(&current->thread.evr[regno-32], mem,
dbg_reg_def[regno].size);
#else
@@ -371,11 +369,11 @@ int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
{
- regs->nip = pc;
+ regs_set_return_ip(regs, pc);
}
/*
- * This function does PowerPC specific procesing for interfacing to gdb.
+ * This function does PowerPC specific processing for interfacing to gdb.
*/
int kgdb_arch_handle_exception(int vector, int signo, int err_code,
char *remcom_in_buffer, char *remcom_out_buffer,
@@ -393,7 +391,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
case 'c':
/* handle the optional parameter */
if (kgdb_hex2long(&ptr, &addr))
- linux_regs->nip = addr;
+ regs_set_return_ip(linux_regs, addr);
atomic_set(&kgdb_cpu_doing_single_step, -1);
/* set the trace bit if we're stepping */
@@ -401,9 +399,9 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
mtspr(SPRN_DBCR0,
mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
- linux_regs->msr |= MSR_DE;
+ regs_set_return_msr(linux_regs, linux_regs->msr | MSR_DE);
#else
- linux_regs->msr |= MSR_SE;
+ regs_set_return_msr(linux_regs, linux_regs->msr | MSR_SE);
#endif
atomic_set(&kgdb_cpu_doing_single_step,
raw_smp_processor_id());
@@ -416,19 +414,18 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
+ u32 instr, *addr = (u32 *)bpt->bpt_addr;
int err;
- unsigned int instr;
- unsigned int *addr = (unsigned int *)bpt->bpt_addr;
- err = probe_kernel_address(addr, instr);
+ err = get_kernel_nofault(instr, addr);
if (err)
return err;
- err = patch_instruction(addr, BREAK_INSTR);
+ err = patch_instruction(addr, ppc_inst(BREAK_INSTR));
if (err)
return -EFAULT;
- *(unsigned int *)bpt->saved_instr = instr;
+ *(u32 *)bpt->saved_instr = instr;
return 0;
}
@@ -437,9 +434,9 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
unsigned int instr = *(unsigned int *)bpt->saved_instr;
- unsigned int *addr = (unsigned int *)bpt->bpt_addr;
+ u32 *addr = (u32 *)bpt->bpt_addr;
- err = patch_instruction(addr, instr);
+ err = patch_instruction(addr, ppc_inst(instr));
if (err)
return -EFAULT;
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 972cb28174b2..072ebe7f290b 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -14,14 +14,21 @@
/* Ftrace callback handler for kprobes */
void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
- struct ftrace_ops *ops, struct pt_regs *regs)
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
+ struct pt_regs *regs;
+ int bit;
+ bit = ftrace_test_recursion_trylock(nip, parent_nip);
+ if (bit < 0)
+ return;
+
+ regs = ftrace_get_regs(fregs);
p = get_kprobe((kprobe_opcode_t *)nip);
if (unlikely(!p) || kprobe_disabled(p))
- return;
+ goto out;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
@@ -31,7 +38,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
* On powerpc, NIP is *before* this instruction for the
* pre handler
*/
- regs->nip -= MCOUNT_INSN_SIZE;
+ regs_add_return_ip(regs, -MCOUNT_INSN_SIZE);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
@@ -40,7 +47,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
* Emulate singlestep (and also recover regs->nip)
* as if there is a nop
*/
- regs->nip += MCOUNT_INSN_SIZE;
+ regs_add_return_ip(regs, MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
@@ -52,6 +59,8 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
*/
__this_cpu_write(current_kprobe, NULL);
}
+out:
+ ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 2d27ec4feee4..bd7b1a035459 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -19,10 +19,13 @@
#include <linux/extable.h>
#include <linux/kdebug.h>
#include <linux/slab.h>
+#include <linux/moduleloader.h>
#include <asm/code-patching.h>
#include <asm/cacheflush.h>
#include <asm/sstep.h>
#include <asm/sections.h>
+#include <asm/inst.h>
+#include <asm/set_memory.h>
#include <linux/uaccess.h>
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -42,7 +45,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
{
kprobe_opcode_t *addr = NULL;
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
/* PPC64 ABIv2 needs local entry point */
addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
if (addr && !offset) {
@@ -60,7 +63,7 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
#endif
addr = (kprobe_opcode_t *)ppc_function_entry(addr);
}
-#elif defined(PPC64_ELF_ABI_v1)
+#elif defined(CONFIG_PPC64_ELF_ABI_V1)
/*
* 64bit powerpc ABIv1 uses function descriptors:
* - Check for the dot variant of the symbol first.
@@ -102,16 +105,70 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
return addr;
}
+static bool arch_kprobe_on_func_entry(unsigned long offset)
+{
+#ifdef CONFIG_PPC64_ELF_ABI_V2
+#ifdef CONFIG_KPROBES_ON_FTRACE
+ return offset <= 16;
+#else
+ return offset <= 8;
+#endif
+#else
+ return !offset;
+#endif
+}
+
+/* XXX try and fold the magic of kprobe_lookup_name() in this */
+kprobe_opcode_t *arch_adjust_kprobe_addr(unsigned long addr, unsigned long offset,
+ bool *on_func_entry)
+{
+ *on_func_entry = arch_kprobe_on_func_entry(offset);
+ return (kprobe_opcode_t *)(addr + offset);
+}
+
+void *alloc_insn_page(void)
+{
+ void *page;
+
+ page = module_alloc(PAGE_SIZE);
+ if (!page)
+ return NULL;
+
+ if (strict_module_rwx_enabled()) {
+ set_memory_ro((unsigned long)page, 1);
+ set_memory_x((unsigned long)page, 1);
+ }
+ return page;
+}
+
int arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
- kprobe_opcode_t insn = *p->addr;
+ struct kprobe *prev;
+ ppc_inst_t insn = ppc_inst_read(p->addr);
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
ret = -EINVAL;
- } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
- printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
+ } else if (!can_single_step(ppc_inst_val(insn))) {
+ printk("Cannot register a kprobe on instructions that can't be single stepped\n");
+ ret = -EINVAL;
+ } else if ((unsigned long)p->addr & ~PAGE_MASK &&
+ ppc_inst_prefixed(ppc_inst_read(p->addr - 1))) {
+ printk("Cannot register a kprobe on the second word of prefixed instruction\n");
+ ret = -EINVAL;
+ }
+ preempt_disable();
+ prev = get_kprobe(p->addr - 1);
+ preempt_enable_no_resched();
+
+ /*
+ * When prev is a ftrace-based kprobe, we don't have an insn, and it
+ * doesn't probe for prefixed instruction.
+ */
+ if (prev && !kprobe_ftrace(prev) &&
+ ppc_inst_prefixed(ppc_inst_read(prev->ainsn.insn))) {
+ printk("Cannot register a kprobe on the second word of prefixed instruction\n");
ret = -EINVAL;
}
@@ -124,11 +181,8 @@ int arch_prepare_kprobe(struct kprobe *p)
}
if (!ret) {
- memcpy(p->ainsn.insn, p->addr,
- MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
- p->opcode = *p->addr;
- flush_icache_range((unsigned long)p->ainsn.insn,
- (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
+ patch_instruction(p->ainsn.insn, insn);
+ p->opcode = ppc_inst_val(insn);
}
p->ainsn.boostable = 0;
@@ -138,13 +192,13 @@ NOKPROBE_SYMBOL(arch_prepare_kprobe);
void arch_arm_kprobe(struct kprobe *p)
{
- patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
+ WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(BREAKPOINT_INSTRUCTION)));
}
NOKPROBE_SYMBOL(arch_arm_kprobe);
void arch_disarm_kprobe(struct kprobe *p)
{
- patch_instruction(p->addr, p->opcode);
+ WARN_ON_ONCE(patch_instruction(p->addr, ppc_inst(p->opcode)));
}
NOKPROBE_SYMBOL(arch_disarm_kprobe);
@@ -167,7 +221,7 @@ static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs
* variant as values in regs could play a part in
* if the trap is taken or not
*/
- regs->nip = (unsigned long)p->ainsn.insn;
+ regs_set_return_ip(regs, (unsigned long)p->ainsn.insn);
}
static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
@@ -191,32 +245,20 @@ static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs
kcb->kprobe_saved_msr = regs->msr;
}
-bool arch_kprobe_on_func_entry(unsigned long offset)
-{
-#ifdef PPC64_ELF_ABI_v2
-#ifdef CONFIG_KPROBES_ON_FTRACE
- return offset <= 16;
-#else
- return offset <= 8;
-#endif
-#else
- return !offset;
-#endif
-}
-
void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->link;
+ ri->fp = NULL;
/* Replace the return addr with trampoline addr */
- regs->link = (unsigned long)kretprobe_trampoline;
+ regs->link = (unsigned long)__kretprobe_trampoline;
}
NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
- unsigned int insn = *p->ainsn.insn;
+ ppc_inst_t insn = ppc_inst_read(p->ainsn.insn);
/* regs->nip is also adjusted if emulate_step returns 1 */
ret = emulate_step(regs, insn);
@@ -233,7 +275,7 @@ static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
* So, we should never get here... but, its still
* good to catch them, just in case...
*/
- printk("Can't step on instruction %x\n", insn);
+ printk("Can't step on instruction %08lx\n", ppc_inst_as_ulong(insn));
BUG();
} else {
/*
@@ -264,6 +306,10 @@ int kprobe_handler(struct pt_regs *regs)
if (user_mode(regs))
return 0;
+ if (!IS_ENABLED(CONFIG_BOOKE) &&
+ (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
+ return 0;
+
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
@@ -271,64 +317,20 @@ int kprobe_handler(struct pt_regs *regs)
preempt_disable();
kcb = get_kprobe_ctlblk();
- /* Check we're not actually recursing */
- if (kprobe_running()) {
- p = get_kprobe(addr);
- if (p) {
- kprobe_opcode_t insn = *p->ainsn.insn;
- if (kcb->kprobe_status == KPROBE_HIT_SS &&
- is_trap(insn)) {
- /* Turn off 'trace' bits */
- regs->msr &= ~MSR_SINGLESTEP;
- regs->msr |= kcb->kprobe_saved_msr;
- goto no_kprobe;
- }
- /* We have reentered the kprobe_handler(), since
- * another probe was hit while within the handler.
- * We here save the original kprobes variables and
- * just single step on the instruction of the new probe
- * without calling any user handlers.
- */
- save_previous_kprobe(kcb);
- set_current_kprobe(p, regs, kcb);
- kprobes_inc_nmissed_count(p);
- kcb->kprobe_status = KPROBE_REENTER;
- if (p->ainsn.boostable >= 0) {
- ret = try_to_emulate(p, regs);
-
- if (ret > 0) {
- restore_previous_kprobe(kcb);
- preempt_enable_no_resched();
- return 1;
- }
- }
- prepare_singlestep(p, regs);
- return 1;
- } else if (*addr != BREAKPOINT_INSTRUCTION) {
- /* If trap variant, then it belongs not to us */
- kprobe_opcode_t cur_insn = *addr;
-
- if (is_trap(cur_insn))
- goto no_kprobe;
- /* The breakpoint instruction was removed by
- * another cpu right after we hit, no further
- * handling of this interrupt is appropriate
- */
- ret = 1;
- }
- goto no_kprobe;
- }
-
p = get_kprobe(addr);
if (!p) {
- if (*addr != BREAKPOINT_INSTRUCTION) {
+ unsigned int instr;
+
+ if (get_kernel_nofault(instr, addr))
+ goto no_kprobe;
+
+ if (instr != BREAKPOINT_INSTRUCTION) {
/*
* PowerPC has multiple variants of the "trap"
* instruction. If the current instruction is a
* trap variant, it could belong to someone else
*/
- kprobe_opcode_t cur_insn = *addr;
- if (is_trap(cur_insn))
+ if (is_trap(instr))
goto no_kprobe;
/*
* The breakpoint instruction was removed right
@@ -343,6 +345,40 @@ int kprobe_handler(struct pt_regs *regs)
goto no_kprobe;
}
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ kprobe_opcode_t insn = *p->ainsn.insn;
+ if (kcb->kprobe_status == KPROBE_HIT_SS && is_trap(insn)) {
+ /* Turn off 'trace' bits */
+ regs_set_return_msr(regs,
+ (regs->msr & ~MSR_SINGLESTEP) |
+ kcb->kprobe_saved_msr);
+ goto no_kprobe;
+ }
+
+ /*
+ * We have reentered the kprobe_handler(), since another probe
+ * was hit while within the handler. We here save the original
+ * kprobes variables and just single step on the instruction of
+ * the new probe without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ if (p->ainsn.boostable >= 0) {
+ ret = try_to_emulate(p, regs);
+
+ if (ret > 0) {
+ restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
+ return 1;
+ }
+ }
+ prepare_singlestep(p, regs);
+ return 1;
+ }
+
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
set_current_kprobe(p, regs, kcb);
if (p->pre_handler && p->pre_handler(p, regs)) {
@@ -381,62 +417,21 @@ NOKPROBE_SYMBOL(kprobe_handler);
* - When the probed function returns, this probe
* causes the handlers to fire
*/
-asm(".global kretprobe_trampoline\n"
- ".type kretprobe_trampoline, @function\n"
- "kretprobe_trampoline:\n"
+asm(".global __kretprobe_trampoline\n"
+ ".type __kretprobe_trampoline, @function\n"
+ "__kretprobe_trampoline:\n"
"nop\n"
"blr\n"
- ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
+ ".size __kretprobe_trampoline, .-__kretprobe_trampoline\n");
/*
* Called when the probe at kretprobe trampoline is hit
*/
static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct kretprobe_instance *ri = NULL;
- struct hlist_head *head, empty_rp;
- struct hlist_node *tmp;
- unsigned long flags, orig_ret_address = 0;
- unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
-
- INIT_HLIST_HEAD(&empty_rp);
- kretprobe_hash_lock(current, &head, &flags);
-
- /*
- * It is possible to have multiple instances associated with a given
- * task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more than one return
- * return probe was registered for a target function.
- *
- * We can handle this because:
- * - instances are always inserted at the head of the list
- * - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
- */
- hlist_for_each_entry_safe(ri, tmp, head, hlist) {
- if (ri->task != current)
- /* another task is sharing our hash bucket */
- continue;
-
- if (ri->rp && ri->rp->handler)
- ri->rp->handler(ri, regs);
-
- orig_ret_address = (unsigned long)ri->ret_addr;
- recycle_rp_inst(ri, &empty_rp);
-
- if (orig_ret_address != trampoline_address)
- /*
- * This is the real return address. Any other
- * instances associated with this task are for
- * other calls deeper on the call stack
- */
- break;
- }
-
- kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ unsigned long orig_ret_address;
+ orig_ret_address = __kretprobe_trampoline_handler(regs, NULL);
/*
* We get here through one of two paths:
* 1. by taking a trap -> kprobe_handler() -> here
@@ -446,22 +441,15 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* as it is used to determine the return address from the trap.
* For (2), since nip is not honoured with optprobes, we instead setup
* the link register properly so that the subsequent 'blr' in
- * kretprobe_trampoline jumps back to the right instruction.
+ * __kretprobe_trampoline jumps back to the right instruction.
*
* For nip, we should set the address to the previous instruction since
* we end up emulating it in kprobe_handler(), which increments the nip
* again.
*/
- regs->nip = orig_ret_address - 4;
+ regs_set_return_ip(regs, orig_ret_address - 4);
regs->link = orig_ret_address;
- kretprobe_hash_unlock(current, &flags);
-
- hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
- hlist_del(&ri->hlist);
- kfree(ri);
- }
-
return 0;
}
NOKPROBE_SYMBOL(trampoline_probe_handler);
@@ -476,14 +464,16 @@ NOKPROBE_SYMBOL(trampoline_probe_handler);
*/
int kprobe_post_handler(struct pt_regs *regs)
{
+ int len;
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur || user_mode(regs))
return 0;
+ len = ppc_inst_len(ppc_inst_read(cur->ainsn.insn));
/* make sure we got here for instruction we have a kprobe on */
- if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
+ if (((unsigned long)cur->ainsn.insn + len) != regs->nip)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
@@ -492,8 +482,8 @@ int kprobe_post_handler(struct pt_regs *regs)
}
/* Adjust nip to after the single-stepped instruction */
- regs->nip = (unsigned long)cur->addr + 4;
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_ip(regs, (unsigned long)cur->addr + len);
+ regs_set_return_msr(regs, regs->msr | kcb->kprobe_saved_msr);
/*Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
@@ -532,9 +522,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
* and allow the page fault handler to continue as a
* normal page fault.
*/
- regs->nip = (unsigned long)cur->addr;
- regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
- regs->msr |= kcb->kprobe_saved_msr;
+ regs_set_return_ip(regs, (unsigned long)cur->addr);
+ /* Turn off 'trace' bits */
+ regs_set_return_msr(regs,
+ (regs->msr & ~MSR_SINGLESTEP) |
+ kcb->kprobe_saved_msr);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
@@ -544,28 +536,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accounting
- * these specific fault cases.
- */
- kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if ((entry = search_exception_tables(regs->nip)) != NULL) {
- regs->nip = extable_fixup(entry);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
@@ -581,19 +556,8 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
}
NOKPROBE_SYMBOL(kprobe_fault_handler);
-unsigned long arch_deref_entry_point(void *entry)
-{
-#ifdef PPC64_ELF_ABI_v1
- if (!kernel_text_address((unsigned long)entry))
- return ppc_global_function_entry(entry);
- else
-#endif
- return (unsigned long)entry;
-}
-NOKPROBE_SYMBOL(arch_deref_entry_point);
-
static struct kprobe trampoline_p = {
- .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+ .addr = (kprobe_opcode_t *) &__kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
@@ -604,7 +568,7 @@ int __init arch_init_kprobes(void)
int arch_trampoline_kprobe(struct kprobe *p)
{
- if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
+ if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
return 1;
return 0;
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 617eba82531c..5b3c093611ba 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -455,7 +455,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features)
kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
break;
-#ifdef CONFIG_PPC_BOOK3E_MMU
+#ifdef CONFIG_PPC_E500
case KVM_INST_MFSPR(SPRN_MAS0):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
@@ -484,7 +484,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features)
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
break;
-#endif /* CONFIG_PPC_BOOK3E_MMU */
+#endif /* CONFIG_PPC_E500 */
case KVM_INST_MFSPR(SPRN_SPRG4):
#ifdef CONFIG_BOOKE
@@ -557,7 +557,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features)
case KVM_INST_MTSPR(SPRN_DSISR):
kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
break;
-#ifdef CONFIG_PPC_BOOK3E_MMU
+#ifdef CONFIG_PPC_E500
case KVM_INST_MTSPR(SPRN_MAS0):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
@@ -586,7 +586,7 @@ static void __init kvm_check_ins(u32 *inst, u32 features)
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
break;
-#endif /* CONFIG_PPC_BOOK3E_MMU */
+#endif /* CONFIG_PPC_E500 */
case KVM_INST_MTSPR(SPRN_SPRG4):
if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
@@ -669,7 +669,8 @@ static void __init kvm_use_magic_page(void)
on_each_cpu(kvm_map_magic_page, &features, 1);
/* Quick self-test to see if the mapping works */
- if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
+ if (fault_in_readable((const char __user *)KVM_MAGIC_PAGE,
+ sizeof(u32))) {
kvm_patching_worked = false;
return;
}
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
index 2020d255585f..f2e03ed423d0 100644
--- a/arch/powerpc/kernel/l2cr_6xx.S
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -96,7 +96,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
/* Stop DST streams */
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
@@ -256,7 +256,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
sync
/* Restore MSR (restores EE and DR bits to original state) */
- SYNC
mtmsr r7
isync
@@ -293,7 +292,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
isync
/* Stop DST streams */
- DSSALL
+ PPC_DSSALL
sync
/* Get the current enable bit of the L3CR into r4 */
@@ -377,7 +376,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
1: bdnz 1b
/* Restore MSR (restores EE and DR bits to original state) */
-4: SYNC
+4:
mtmsr r7
isync
blr
@@ -402,7 +401,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
_GLOBAL(__flush_disable_L1)
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync
@@ -455,5 +454,6 @@ _GLOBAL(__inval_enable_L1)
sync
blr
+_ASM_NOKPROBE_SYMBOL(__inval_enable_L1)
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index f061e06e9f51..f048c424c525 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -7,14 +7,15 @@
#include <linux/pci.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/serial_reg.h>
#include <asm/io.h>
#include <asm/mmu.h>
-#include <asm/prom.h>
#include <asm/serial.h>
#include <asm/udbg.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
+#include <asm/early_ioremap.h>
#undef DEBUG
@@ -34,6 +35,7 @@ static struct legacy_serial_info {
unsigned int clock;
int irq_check_parent;
phys_addr_t taddr;
+ void __iomem *early_addr;
} legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS];
static const struct of_device_id legacy_serial_parents[] __initconst = {
@@ -325,17 +327,16 @@ static void __init setup_legacy_serial_console(int console)
{
struct legacy_serial_info *info = &legacy_serial_infos[console];
struct plat_serial8250_port *port = &legacy_serial_ports[console];
- void __iomem *addr;
unsigned int stride;
stride = 1 << port->regshift;
/* Check if a translated MMIO address has been found */
if (info->taddr) {
- addr = ioremap(info->taddr, 0x1000);
- if (addr == NULL)
+ info->early_addr = early_ioremap(info->taddr, 0x1000);
+ if (info->early_addr == NULL)
return;
- udbg_uart_init_mmio(addr, stride);
+ udbg_uart_init_mmio(info->early_addr, stride);
} else {
/* Check if it's PIO and we support untranslated PIO */
if (port->iotype == UPIO_PORT && isa_io_special)
@@ -353,6 +354,33 @@ static void __init setup_legacy_serial_console(int console)
udbg_uart_setup(info->speed, info->clock);
}
+static int __init ioremap_legacy_serial_console(void)
+{
+ struct plat_serial8250_port *port;
+ struct legacy_serial_info *info;
+ void __iomem *vaddr;
+
+ if (legacy_serial_console < 0)
+ return 0;
+
+ info = &legacy_serial_infos[legacy_serial_console];
+ port = &legacy_serial_ports[legacy_serial_console];
+
+ if (!info->early_addr)
+ return 0;
+
+ vaddr = ioremap(info->taddr, 0x1000);
+ if (WARN_ON(!vaddr))
+ return -ENOMEM;
+
+ udbg_uart_init_mmio(vaddr, 1 << port->regshift);
+ early_iounmap(info->early_addr, 0x1000);
+ info->early_addr = NULL;
+
+ return 0;
+}
+early_initcall(ioremap_legacy_serial_console);
+
/*
* This is called very early, as part of setup_system() or eventually
* setup_arch(), basically before anything else in this file. This function
@@ -443,6 +471,8 @@ void __init find_legacy_serial_ports(void)
}
#endif
+ of_node_put(stdout);
+
DBG("legacy_serial_console = %d\n", legacy_serial_console);
if (legacy_serial_console >= 0)
setup_legacy_serial_console(legacy_serial_console);
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index 34c1001e9e8b..6c5d30fba766 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -15,37 +15,36 @@
#include <linux/percpu.h>
#include <linux/export.h>
#include <linux/irq_work.h>
+#include <linux/extable.h>
+#include <linux/ftrace.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <asm/interrupt.h>
#include <asm/machdep.h>
#include <asm/mce.h>
#include <asm/nmi.h>
-static DEFINE_PER_CPU(int, mce_nest_count);
-static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
+#include "setup.h"
-/* Queue for delayed MCE events. */
-static DEFINE_PER_CPU(int, mce_queue_count);
-static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
-
-/* Queue for delayed MCE UE events. */
-static DEFINE_PER_CPU(int, mce_ue_count);
-static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT],
- mce_ue_event_queue);
-
-static void machine_check_process_queued_event(struct irq_work *work);
-static void machine_check_ue_irq_work(struct irq_work *work);
static void machine_check_ue_event(struct machine_check_event *evt);
static void machine_process_ue_event(struct work_struct *work);
-static struct irq_work mce_event_process_work = {
- .func = machine_check_process_queued_event,
-};
+static DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
-static struct irq_work mce_ue_event_irq_work = {
- .func = machine_check_ue_irq_work,
-};
+static BLOCKING_NOTIFIER_HEAD(mce_notifier_list);
+
+int mce_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&mce_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(mce_register_notifier);
-DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
+int mce_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&mce_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(mce_unregister_notifier);
static void mce_set_error_info(struct machine_check_event *mce,
struct mce_error_info *mce_err)
@@ -79,6 +78,13 @@ static void mce_set_error_info(struct machine_check_event *mce,
}
}
+void mce_irq_work_queue(void)
+{
+ /* Raise decrementer interrupt */
+ arch_irq_work_raise();
+ set_mce_pending_irq_work();
+}
+
/*
* Decode and save high level MCE information into per cpu buffer which
* is an array of machine_check_event structure.
@@ -87,9 +93,10 @@ void save_mce_event(struct pt_regs *regs, long handled,
struct mce_error_info *mce_err,
uint64_t nip, uint64_t addr, uint64_t phys_addr)
{
- int index = __this_cpu_inc_return(mce_nest_count) - 1;
- struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
+ int index = local_paca->mce_info->mce_nest_count++;
+ struct machine_check_event *mce;
+ mce = &local_paca->mce_info->mce_event[index];
/*
* Return if we don't have enough space to log mce event.
* mce_nest_count may go beyond MAX_MC_EVT but that's ok,
@@ -121,6 +128,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
* Populate the mce error_type and type-specific error_type.
*/
mce_set_error_info(mce, mce_err);
+ if (mce->error_type == MCE_ERROR_TYPE_UE)
+ mce->u.ue_error.ignore_event = mce_err->ignore_event;
if (!addr)
return;
@@ -149,7 +158,6 @@ void save_mce_event(struct pt_regs *regs, long handled,
if (phys_addr != ULONG_MAX) {
mce->u.ue_error.physical_address_provided = true;
mce->u.ue_error.physical_address = phys_addr;
- mce->u.ue_error.ignore_event = mce_err->ignore_event;
machine_check_ue_event(mce);
}
}
@@ -175,7 +183,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
*/
int get_mce_event(struct machine_check_event *mce, bool release)
{
- int index = __this_cpu_read(mce_nest_count) - 1;
+ int index = local_paca->mce_info->mce_nest_count - 1;
struct machine_check_event *mc_evt;
int ret = 0;
@@ -185,7 +193,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
/* Check if we have MCE info to process. */
if (index < MAX_MC_EVT) {
- mc_evt = this_cpu_ptr(&mce_event[index]);
+ mc_evt = &local_paca->mce_info->mce_event[index];
/* Copy the event structure and release the original */
if (mce)
*mce = *mc_evt;
@@ -195,7 +203,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
}
/* Decrement the count to free the slot. */
if (release)
- __this_cpu_dec(mce_nest_count);
+ local_paca->mce_info->mce_nest_count--;
return ret;
}
@@ -205,7 +213,7 @@ void release_mce_event(void)
get_mce_event(NULL, true);
}
-static void machine_check_ue_irq_work(struct irq_work *work)
+static void machine_check_ue_work(void)
{
schedule_work(&mce_ue_event_work);
}
@@ -217,16 +225,17 @@ static void machine_check_ue_event(struct machine_check_event *evt)
{
int index;
- index = __this_cpu_inc_return(mce_ue_count) - 1;
+ index = local_paca->mce_info->mce_ue_count++;
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
- __this_cpu_dec(mce_ue_count);
+ local_paca->mce_info->mce_ue_count--;
return;
}
- memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
+ memcpy(&local_paca->mce_info->mce_ue_event_queue[index],
+ evt, sizeof(*evt));
/* Queue work to process this event later. */
- irq_work_queue(&mce_ue_event_irq_work);
+ mce_irq_work_queue();
}
/*
@@ -240,17 +249,30 @@ void machine_check_queue_event(void)
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
- index = __this_cpu_inc_return(mce_queue_count) - 1;
+ index = local_paca->mce_info->mce_queue_count++;
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
- __this_cpu_dec(mce_queue_count);
+ local_paca->mce_info->mce_queue_count--;
return;
}
- memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
+ memcpy(&local_paca->mce_info->mce_event_queue[index],
+ &evt, sizeof(evt));
+
+ mce_irq_work_queue();
+}
- /* Queue irq work to process this event later. */
- irq_work_queue(&mce_event_process_work);
+void mce_common_process_ue(struct pt_regs *regs,
+ struct mce_error_info *mce_err)
+{
+ const struct exception_table_entry *entry;
+
+ entry = search_kernel_exception_table(regs->nip);
+ if (entry) {
+ mce_err->ignore_event = true;
+ regs_set_return_ip(regs, extable_fixup(entry));
+ }
}
+
/*
* process pending MCE event from the mce event queue. This function will be
* called during syscall exit.
@@ -260,9 +282,10 @@ static void machine_process_ue_event(struct work_struct *work)
int index;
struct machine_check_event *evt;
- while (__this_cpu_read(mce_ue_count) > 0) {
- index = __this_cpu_read(mce_ue_count) - 1;
- evt = this_cpu_ptr(&mce_ue_event_queue[index]);
+ while (local_paca->mce_info->mce_ue_count > 0) {
+ index = local_paca->mce_info->mce_ue_count - 1;
+ evt = &local_paca->mce_info->mce_ue_event_queue[index];
+ blocking_notifier_call_chain(&mce_notifier_list, 0, evt);
#ifdef CONFIG_MEMORY_FAILURE
/*
* This should probably queued elsewhere, but
@@ -274,7 +297,7 @@ static void machine_process_ue_event(struct work_struct *work)
*/
if (evt->error_type == MCE_ERROR_TYPE_UE) {
if (evt->u.ue_error.ignore_event) {
- __this_cpu_dec(mce_ue_count);
+ local_paca->mce_info->mce_ue_count--;
continue;
}
@@ -290,14 +313,14 @@ static void machine_process_ue_event(struct work_struct *work)
"was generated\n");
}
#endif
- __this_cpu_dec(mce_ue_count);
+ local_paca->mce_info->mce_ue_count--;
}
}
/*
* process pending MCE event from the mce event queue. This function will be
* called during syscall exit.
*/
-static void machine_check_process_queued_event(struct irq_work *work)
+static void machine_check_process_queued_event(void)
{
int index;
struct machine_check_event *evt;
@@ -308,17 +331,38 @@ static void machine_check_process_queued_event(struct irq_work *work)
* For now just print it to console.
* TODO: log this error event to FSP or nvram.
*/
- while (__this_cpu_read(mce_queue_count) > 0) {
- index = __this_cpu_read(mce_queue_count) - 1;
- evt = this_cpu_ptr(&mce_event_queue[index]);
+ while (local_paca->mce_info->mce_queue_count > 0) {
+ index = local_paca->mce_info->mce_queue_count - 1;
+ evt = &local_paca->mce_info->mce_event_queue[index];
if (evt->error_type == MCE_ERROR_TYPE_UE &&
evt->u.ue_error.ignore_event) {
- __this_cpu_dec(mce_queue_count);
+ local_paca->mce_info->mce_queue_count--;
continue;
}
machine_check_print_event_info(evt, false, false);
- __this_cpu_dec(mce_queue_count);
+ local_paca->mce_info->mce_queue_count--;
+ }
+}
+
+void set_mce_pending_irq_work(void)
+{
+ local_paca->mce_pending_irq_work = 1;
+}
+
+void clear_mce_pending_irq_work(void)
+{
+ local_paca->mce_pending_irq_work = 0;
+}
+
+void mce_run_irq_context_handlers(void)
+{
+ if (unlikely(local_paca->mce_pending_irq_work)) {
+ if (ppc_md.machine_check_log_err)
+ ppc_md.machine_check_log_err();
+ machine_check_process_queued_event();
+ machine_check_ue_work();
+ clear_mce_pending_irq_work();
}
}
@@ -355,18 +399,19 @@ void machine_check_print_event_info(struct machine_check_event *evt,
static const char *mc_user_types[] = {
"Indeterminate",
"tlbie(l) invalid",
+ "scv invalid",
};
static const char *mc_ra_types[] = {
"Indeterminate",
"Instruction fetch (bad)",
- "Instruction fetch (foreign)",
+ "Instruction fetch (foreign/control memory)",
"Page table walk ifetch (bad)",
- "Page table walk ifetch (foreign)",
+ "Page table walk ifetch (foreign/control memory)",
"Load (bad)",
"Store (bad)",
"Page table walk Load/Store (bad)",
- "Page table walk Load/Store (foreign)",
- "Load/Store (foreign)",
+ "Page table walk Load/Store (foreign/control memory)",
+ "Load/Store (foreign/control memory)",
};
static const char *mc_link_types[] = {
"Indeterminate",
@@ -524,7 +569,7 @@ void machine_check_print_event_info(struct machine_check_event *evt,
}
printk("%sMCE: CPU%d: machine check (%s) %s %s %s %s[%s]\n",
- level, evt->cpu, sevstr, in_guest ? "Guest" : "Host",
+ level, evt->cpu, sevstr, in_guest ? "Guest" : "",
err_type, subtype, dar_str,
evt->disposition == MCE_DISPOSITION_RECOVERED ?
"Recovered" : "Not recovered");
@@ -544,9 +589,9 @@ void machine_check_print_event_info(struct machine_check_event *evt,
mc_error_class[evt->error_class] : "Unknown";
printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
/* Display faulty slb contents for SLB errors. */
- if (evt->error_type == MCE_ERROR_TYPE_SLB)
+ if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
slb_dump_contents(local_paca->mce_faulty_slbs);
#endif
}
@@ -557,7 +602,7 @@ EXPORT_SYMBOL_GPL(machine_check_print_event_info);
*
* regs->nip and regs->msr contains srr0 and ssr1.
*/
-long machine_check_early(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_NMI(machine_check_early)
{
long handled = 0;
@@ -568,6 +613,7 @@ long machine_check_early(struct pt_regs *regs)
*/
if (ppc_md.machine_check_early)
handled = ppc_md.machine_check_early(regs);
+
return handled;
}
@@ -679,11 +725,11 @@ long hmi_handle_debugtrig(struct pt_regs *regs)
/*
* Return values:
*/
-long hmi_exception_realmode(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_NMI(hmi_exception_realmode)
{
int ret;
- __this_cpu_inc(irq_stat.hmi_exceptions);
+ local_paca->hmi_irqs++;
ret = hmi_handle_debugtrig(regs);
if (ret >= 0)
@@ -698,3 +744,24 @@ long hmi_exception_realmode(struct pt_regs *regs)
return 1;
}
+
+void __init mce_init(void)
+{
+ struct mce_info *mce_info;
+ u64 limit;
+ int i;
+
+ limit = min(ppc64_bolted_size(), ppc64_rma_size);
+ for_each_possible_cpu(i) {
+ mce_info = memblock_alloc_try_nid(sizeof(*mce_info),
+ __alignof__(*mce_info),
+ MEMBLOCK_LOW_LIMIT,
+ limit, early_cpu_to_node(i));
+ if (!mce_info)
+ goto err;
+ paca_ptrs[i]->mce_info = mce_info;
+ }
+ return;
+err:
+ panic("Failed to allocate memory for MCE event data\n");
+}
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 1cbf7f1a4e3d..71e8f2a92e36 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -12,14 +12,15 @@
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/extable.h>
+#include <linux/pgtable.h>
#include <asm/mmu.h>
#include <asm/mce.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/pte-walk.h>
#include <asm/sstep.h>
#include <asm/exception-64s.h>
#include <asm/extable.h>
+#include <asm/inst.h>
/*
* Convert an address related to an mm to a PFN. NOTE: we are in real
@@ -27,7 +28,7 @@
*/
unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
{
- pte_t *ptep;
+ pte_t *ptep, pte;
unsigned int shift;
unsigned long pfn, flags;
struct mm_struct *mm;
@@ -39,42 +40,52 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
local_irq_save(flags);
ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
+ if (!ptep) {
+ pfn = ULONG_MAX;
+ goto out;
+ }
+ pte = READ_ONCE(*ptep);
- if (!ptep || pte_special(*ptep)) {
+ if (!pte_present(pte) || pte_special(pte)) {
pfn = ULONG_MAX;
goto out;
}
if (shift <= PAGE_SHIFT)
- pfn = pte_pfn(*ptep);
+ pfn = pte_pfn(pte);
else {
unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
- pfn = pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
+ pfn = pte_pfn(__pte(pte_val(pte) | (addr & rpnmask)));
}
-
out:
local_irq_restore(flags);
return pfn;
}
-/* flush SLBs and reload */
-#ifdef CONFIG_PPC_BOOK3S_64
-void flush_and_reload_slb(void)
+static bool mce_in_guest(void)
{
- /* Invalidate all SLBs */
- slb_flush_all_realmode();
-
#ifdef CONFIG_KVM_BOOK3S_HANDLER
/*
- * If machine check is hit when in guest or in transition, we will
- * only flush the SLBs and continue.
+ * If machine check is hit when in guest context or low level KVM
+ * code, avoid looking up any translations or making any attempts
+ * to recover, just record the event and pass to KVM.
*/
if (get_paca()->kvm_hstate.in_guest)
- return;
+ return true;
#endif
+ return false;
+}
+
+/* flush SLBs and reload */
+#ifdef CONFIG_PPC_64S_HASH_MMU
+void flush_and_reload_slb(void)
+{
if (early_radix_enabled())
return;
+ /* Invalidate all SLBs */
+ slb_flush_all_realmode();
+
/*
* This probably shouldn't happen, but it may be possible it's
* called in early boot before SLB shadows are allocated.
@@ -86,9 +97,9 @@ void flush_and_reload_slb(void)
}
#endif
-static void flush_erat(void)
+void flush_erat(void)
{
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
flush_and_reload_slb();
return;
@@ -103,7 +114,7 @@ static void flush_erat(void)
static int mce_flush(int what)
{
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (what == MCE_FLUSH_SLB) {
flush_and_reload_slb();
return 1;
@@ -238,6 +249,45 @@ static const struct mce_ierror_table mce_p9_ierror_table[] = {
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0, 0, 0, 0, 0, 0, 0 } };
+static const struct mce_ierror_table mce_p10_ierror_table[] = {
+{ 0x00000000081c0000, 0x0000000000040000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_IFETCH, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000000081c0000, 0x0000000000080000, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000000081c0000, 0x00000000000c0000, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
+{ 0x00000000081c0000, 0x0000000000100000, true,
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
+{ 0x00000000081c0000, 0x0000000000140000, true,
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
+{ 0x00000000081c0000, 0x0000000000180000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000000081c0000, 0x00000000001c0000, true,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH_FOREIGN, MCE_ECLASS_SOFTWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000000081c0000, 0x0000000008080000, true,
+ MCE_ERROR_TYPE_USER,MCE_USER_ERROR_SCV, MCE_ECLASS_SOFTWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
+{ 0x00000000081c0000, 0x00000000080c0000, true,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_IFETCH, MCE_ECLASS_SOFTWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000000081c0000, 0x0000000008100000, true,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH, MCE_ECLASS_SOFTWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000000081c0000, 0x0000000008140000, false,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_STORE, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_FATAL, false }, /* ASYNC is fatal */
+{ 0x00000000081c0000, 0x00000000081c0000, true, MCE_ECLASS_HARDWARE,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0, 0, 0, 0, 0, 0, 0 } };
+
struct mce_derror_table {
unsigned long dsisr_value;
bool dar_valid; /* dar is a valid indicator of faulting address */
@@ -356,6 +406,46 @@ static const struct mce_derror_table mce_p9_derror_table[] = {
MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
{ 0, false, 0, 0, 0, 0, 0 } };
+static const struct mce_derror_table mce_p10_derror_table[] = {
+{ 0x00008000, false,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_LOAD_STORE, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00004000, true,
+ MCE_ERROR_TYPE_UE, MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000800, true,
+ MCE_ERROR_TYPE_ERAT, MCE_ERAT_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
+{ 0x00000400, true,
+ MCE_ERROR_TYPE_TLB, MCE_TLB_ERROR_MULTIHIT, MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
+{ 0x00000200, false,
+ MCE_ERROR_TYPE_USER, MCE_USER_ERROR_TLBIE, MCE_ECLASS_SOFTWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
+{ 0x00000080, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_MULTIHIT, /* Before PARITY */
+ MCE_ECLASS_SOFT_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_WARNING, true },
+{ 0x00000100, true,
+ MCE_ERROR_TYPE_SLB, MCE_SLB_ERROR_PARITY, MCE_ECLASS_HARD_INDETERMINATE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000040, true,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000020, false,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE,
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000010, false,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN,
+ MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0x00000008, false,
+ MCE_ERROR_TYPE_RA, MCE_RA_ERROR_LOAD_STORE_FOREIGN, MCE_ECLASS_HARDWARE,
+ MCE_INITIATOR_CPU, MCE_SEV_SEVERE, true },
+{ 0, false, 0, 0, 0, 0, 0 } };
+
static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
uint64_t *phys_addr)
{
@@ -365,7 +455,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
* in real-mode is tricky and can lead to recursive
* faults
*/
- int instr;
+ ppc_inst_t instr;
unsigned long pfn, instr_addr;
struct instruction_op op;
struct pt_regs tmp = *regs;
@@ -373,7 +463,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
pfn = addr_to_pfn(regs, regs->nip);
if (pfn != ULONG_MAX) {
instr_addr = (pfn << PAGE_SHIFT) + (regs->nip & ~PAGE_MASK);
- instr = *(unsigned int *)(instr_addr);
+ instr = ppc_inst_read((u32 *)instr_addr);
if (!analyse_instr(&op, &tmp, instr)) {
pfn = addr_to_pfn(regs, op.ea);
*addr = op.ea;
@@ -391,12 +481,11 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
return -1;
}
-static int mce_handle_ierror(struct pt_regs *regs,
+static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
const struct mce_ierror_table table[],
struct mce_error_info *mce_err, uint64_t *addr,
uint64_t *phys_addr)
{
- uint64_t srr1 = regs->msr;
int handled = 0;
int i;
@@ -406,19 +495,23 @@ static int mce_handle_ierror(struct pt_regs *regs,
if ((srr1 & table[i].srr1_mask) != table[i].srr1_value)
continue;
- /* attempt to correct the error */
- switch (table[i].error_type) {
- case MCE_ERROR_TYPE_SLB:
- if (local_paca->in_mce == 1)
- slb_save_contents(local_paca->mce_faulty_slbs);
- handled = mce_flush(MCE_FLUSH_SLB);
- break;
- case MCE_ERROR_TYPE_ERAT:
- handled = mce_flush(MCE_FLUSH_ERAT);
- break;
- case MCE_ERROR_TYPE_TLB:
- handled = mce_flush(MCE_FLUSH_TLB);
- break;
+ if (!mce_in_guest()) {
+ /* attempt to correct the error */
+ switch (table[i].error_type) {
+ case MCE_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ if (local_paca->in_mce == 1)
+ slb_save_contents(local_paca->mce_faulty_slbs);
+#endif
+ handled = mce_flush(MCE_FLUSH_SLB);
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ handled = mce_flush(MCE_FLUSH_ERAT);
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ handled = mce_flush(MCE_FLUSH_TLB);
+ break;
+ }
}
/* now fill in mce_error_info */
@@ -450,7 +543,7 @@ static int mce_handle_ierror(struct pt_regs *regs,
mce_err->sync_error = table[i].sync_error;
mce_err->severity = table[i].severity;
mce_err->initiator = table[i].initiator;
- if (table[i].nip_valid) {
+ if (table[i].nip_valid && !mce_in_guest()) {
*addr = regs->nip;
if (mce_err->sync_error &&
table[i].error_type == MCE_ERROR_TYPE_UE) {
@@ -493,22 +586,26 @@ static int mce_handle_derror(struct pt_regs *regs,
if (!(dsisr & table[i].dsisr_value))
continue;
- /* attempt to correct the error */
- switch (table[i].error_type) {
- case MCE_ERROR_TYPE_SLB:
- if (local_paca->in_mce == 1)
- slb_save_contents(local_paca->mce_faulty_slbs);
- if (mce_flush(MCE_FLUSH_SLB))
- handled = 1;
- break;
- case MCE_ERROR_TYPE_ERAT:
- if (mce_flush(MCE_FLUSH_ERAT))
- handled = 1;
- break;
- case MCE_ERROR_TYPE_TLB:
- if (mce_flush(MCE_FLUSH_TLB))
- handled = 1;
- break;
+ if (!mce_in_guest()) {
+ /* attempt to correct the error */
+ switch (table[i].error_type) {
+ case MCE_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ if (local_paca->in_mce == 1)
+ slb_save_contents(local_paca->mce_faulty_slbs);
+#endif
+ if (mce_flush(MCE_FLUSH_SLB))
+ handled = 1;
+ break;
+ case MCE_ERROR_TYPE_ERAT:
+ if (mce_flush(MCE_FLUSH_ERAT))
+ handled = 1;
+ break;
+ case MCE_ERROR_TYPE_TLB:
+ if (mce_flush(MCE_FLUSH_TLB))
+ handled = 1;
+ break;
+ }
}
/*
@@ -550,7 +647,7 @@ static int mce_handle_derror(struct pt_regs *regs,
mce_err->initiator = table[i].initiator;
if (table[i].dar_valid)
*addr = regs->dar;
- else if (mce_err->sync_error &&
+ else if (mce_err->sync_error && !mce_in_guest() &&
table[i].error_type == MCE_ERROR_TYPE_UE) {
/*
* We do a maximum of 4 nested MCE calls, see
@@ -578,15 +675,12 @@ static int mce_handle_derror(struct pt_regs *regs,
static long mce_handle_ue_error(struct pt_regs *regs,
struct mce_error_info *mce_err)
{
- long handled = 0;
- const struct exception_table_entry *entry;
+ if (mce_in_guest())
+ return 0;
- entry = search_kernel_exception_table(regs->nip);
- if (entry) {
- mce_err->ignore_event = true;
- regs->nip = extable_fixup(entry);
+ mce_common_process_ue(regs, mce_err);
+ if (mce_err->ignore_event)
return 1;
- }
/*
* On specific SCOM read via MMIO we may get a machine check
@@ -597,25 +691,26 @@ static long mce_handle_ue_error(struct pt_regs *regs,
if (ppc_md.mce_check_early_recovery) {
if (ppc_md.mce_check_early_recovery(regs))
- handled = 1;
+ return 1;
}
- return handled;
+
+ return 0;
}
static long mce_handle_error(struct pt_regs *regs,
+ unsigned long srr1,
const struct mce_derror_table dtable[],
const struct mce_ierror_table itable[])
{
struct mce_error_info mce_err = { 0 };
uint64_t addr, phys_addr = ULONG_MAX;
- uint64_t srr1 = regs->msr;
long handled;
if (SRR1_MC_LOADSTORE(srr1))
handled = mce_handle_derror(regs, dtable, &mce_err, &addr,
&phys_addr);
else
- handled = mce_handle_ierror(regs, itable, &mce_err, &addr,
+ handled = mce_handle_ierror(regs, srr1, itable, &mce_err, &addr,
&phys_addr);
if (!handled && mce_err.error_type == MCE_ERROR_TYPE_UE)
@@ -631,16 +726,20 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
/* P7 DD1 leaves top bits of DSISR undefined */
regs->dsisr &= 0x0000ffff;
- return mce_handle_error(regs, mce_p7_derror_table, mce_p7_ierror_table);
+ return mce_handle_error(regs, regs->msr,
+ mce_p7_derror_table, mce_p7_ierror_table);
}
long __machine_check_early_realmode_p8(struct pt_regs *regs)
{
- return mce_handle_error(regs, mce_p8_derror_table, mce_p8_ierror_table);
+ return mce_handle_error(regs, regs->msr,
+ mce_p8_derror_table, mce_p8_ierror_table);
}
long __machine_check_early_realmode_p9(struct pt_regs *regs)
{
+ unsigned long srr1 = regs->msr;
+
/*
* On POWER9 DD2.1 and below, it's possible to get a machine check
* caused by a paste instruction where only DSISR bit 25 is set. This
@@ -654,5 +753,39 @@ long __machine_check_early_realmode_p9(struct pt_regs *regs)
if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000)
return 1;
- return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table);
+ /*
+ * Async machine check due to bad real address from store or foreign
+ * link time out comes with the load/store bit (PPC bit 42) set in
+ * SRR1, but the cause comes in SRR1 not DSISR. Clear bit 42 so we're
+ * directed to the ierror table so it will find the cause (which
+ * describes it correctly as a store error).
+ */
+ if (SRR1_MC_LOADSTORE(srr1) &&
+ ((srr1 & 0x081c0000) == 0x08140000 ||
+ (srr1 & 0x081c0000) == 0x08180000)) {
+ srr1 &= ~PPC_BIT(42);
+ }
+
+ return mce_handle_error(regs, srr1,
+ mce_p9_derror_table, mce_p9_ierror_table);
+}
+
+long __machine_check_early_realmode_p10(struct pt_regs *regs)
+{
+ unsigned long srr1 = regs->msr;
+
+ /*
+ * Async machine check due to bad real address from store comes with
+ * the load/store bit (PPC bit 42) set in SRR1, but the cause comes in
+ * SRR1 not DSISR. Clear bit 42 so we're directed to the ierror table
+ * so it will find the cause (which describes it correctly as a store
+ * error).
+ */
+ if (SRR1_MC_LOADSTORE(srr1) &&
+ (srr1 & 0x081c0000) == 0x08140000) {
+ srr1 &= ~PPC_BIT(42);
+ }
+
+ return mce_handle_error(regs, srr1,
+ mce_p10_derror_table, mce_p10_ierror_table);
}
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 974f65f79a8e..fb7de3543c03 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -29,13 +29,15 @@ _GLOBAL(reloc_offset)
li r3, 0
_GLOBAL(add_reloc_offset)
mflr r0
- bl 1f
+ bcl 20,31,$+4
1: mflr r5
PPC_LL r4,(2f-1b)(r5)
subf r5,r4,r5
add r3,r3,r5
mtlr r0
blr
+_ASM_NOKPROBE_SYMBOL(reloc_offset)
+_ASM_NOKPROBE_SYMBOL(add_reloc_offset)
.align 3
2: PPC_LONG 1b
@@ -110,7 +112,7 @@ _GLOBAL(longjmp)
li r3, 1
blr
-_GLOBAL(current_stack_pointer)
+_GLOBAL(current_stack_frame)
PPC_LL r3,0(r1)
blr
-EXPORT_SYMBOL(current_stack_pointer)
+EXPORT_SYMBOL(current_stack_frame)
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index d80212be8698..e5127b19fec2 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -28,45 +28,6 @@
.text
/*
- * We store the saved ksp_limit in the unused part
- * of the STACK_FRAME_OVERHEAD
- */
-_GLOBAL(call_do_softirq)
- mflr r0
- stw r0,4(r1)
- lwz r10,THREAD+KSP_LIMIT(r2)
- stw r3, THREAD+KSP_LIMIT(r2)
- stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
- mr r1,r3
- stw r10,8(r1)
- bl __do_softirq
- lwz r10,8(r1)
- lwz r1,0(r1)
- lwz r0,4(r1)
- stw r10,THREAD+KSP_LIMIT(r2)
- mtlr r0
- blr
-
-/*
- * void call_do_irq(struct pt_regs *regs, void *sp);
- */
-_GLOBAL(call_do_irq)
- mflr r0
- stw r0,4(r1)
- lwz r10,THREAD+KSP_LIMIT(r2)
- stw r4, THREAD+KSP_LIMIT(r2)
- stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
- mr r1,r4
- stw r10,8(r1)
- bl __do_irq
- lwz r10,8(r1)
- lwz r1,0(r1)
- lwz r0,4(r1)
- stw r10,THREAD+KSP_LIMIT(r2)
- mtlr r0
- blr
-
-/*
* This returns the high 64 bits of the product of two 64-bit numbers.
*/
_GLOBAL(mulhdu)
@@ -106,7 +67,7 @@ _GLOBAL(reloc_got2)
srwi. r8,r8,2
beqlr
mtctr r8
- bl 1f
+ bcl 20,31,$+4
1: mflr r0
lis r4,1b@ha
addi r4,r4,1b@l
@@ -215,19 +176,6 @@ _GLOBAL(low_choose_7447a_dfs)
#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */
-/*
- * complement mask on the msr then "or" some values on.
- * _nmask_and_or_msr(nmask, value_to_or)
- */
-_GLOBAL(_nmask_and_or_msr)
- mfmsr r0 /* Get current msr */
- andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
- or r0,r0,r4 /* Or on the bits in r4 (second parm) */
- SYNC /* Some chip revs have problems here... */
- mtmsr r0 /* Update machine state */
- isync
- blr /* Done */
-
#ifdef CONFIG_40x
/*
@@ -246,6 +194,7 @@ _GLOBAL(real_readb)
sync
isync
blr
+_ASM_NOKPROBE_SYMBOL(real_readb)
/*
* Do an IO access in real mode
@@ -263,53 +212,10 @@ _GLOBAL(real_writeb)
sync
isync
blr
+_ASM_NOKPROBE_SYMBOL(real_writeb)
#endif /* CONFIG_40x */
-
-/*
- * Flush instruction cache.
- * This is a no-op on the 601.
- */
-#ifndef CONFIG_PPC_8xx
-_GLOBAL(flush_instruction_cache)
-#if defined(CONFIG_4xx)
-#ifdef CONFIG_403GCX
- li r3, 512
- mtctr r3
- lis r4, KERNELBASE@h
-1: iccci 0, r4
- addi r4, r4, 16
- bdnz 1b
-#else
- lis r3, KERNELBASE@h
- iccci 0,r3
-#endif
-#elif defined(CONFIG_FSL_BOOKE)
-#ifdef CONFIG_E200
- mfspr r3,SPRN_L1CSR0
- ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
- /* msync; isync recommended here */
- mtspr SPRN_L1CSR0,r3
- isync
- blr
-#endif
- mfspr r3,SPRN_L1CSR1
- ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
- mtspr SPRN_L1CSR1,r3
-#elif defined(CONFIG_PPC_BOOK3S_601)
- blr /* for 601, do nothing */
-#else
- /* 603/604 processor - use invalidate-all bit in HID0 */
- mfspr r3,SPRN_HID0
- ori r3,r3,HID0_ICFI
- mtspr SPRN_HID0,r3
-#endif /* CONFIG_4xx */
- isync
- blr
-EXPORT_SYMBOL(flush_instruction_cache)
-#endif /* CONFIG_PPC_8xx */
-
/*
* Copy a whole page. We use the dcbz instruction on the destination
* to reduce memory traffic (it eliminates the unnecessary reads of
@@ -331,7 +237,7 @@ _GLOBAL(copy_page)
addi r3,r3,-4
0: twnei r5, 0 /* WARN if r3 is not cache aligned */
- EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
+ EMIT_WARN_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
addi r4,r4,-4
@@ -482,9 +388,3 @@ _GLOBAL(start_secondary_resume)
bl start_secondary
b .
#endif /* CONFIG_SMP */
-
-/*
- * This routine is just here to keep GCC happy - sigh...
- */
-_GLOBAL(__main)
- blr
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 1864605eca29..36184cada00b 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -27,28 +27,6 @@
.text
-_GLOBAL(call_do_softirq)
- mflr r0
- std r0,16(r1)
- stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
- mr r1,r3
- bl __do_softirq
- ld r1,0(r1)
- ld r0,16(r1)
- mtlr r0
- blr
-
-_GLOBAL(call_do_irq)
- mflr r0
- std r0,16(r1)
- stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
- mr r1,r4
- bl __do_irq
- ld r1,0(r1)
- ld r0,16(r1)
- mtlr r0
- blr
-
_GLOBAL(__bswapdi2)
EXPORT_SYMBOL(__bswapdi2)
srdi r8,r3,32
@@ -277,7 +255,7 @@ _GLOBAL(scom970_write)
* Physical (hardware) cpu id should be in r3.
*/
_GLOBAL(kexec_wait)
- bl 1f
+ bcl 20,31,$+4
1: mflr r5
addi r5,r5,kexec_flag-1b
@@ -308,7 +286,7 @@ kexec_flag:
#ifdef CONFIG_KEXEC_CORE
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/*
* BOOK3E has no real MMU mode, so we have to setup the initial TLB
* for a core to identity map v:0 to p:0. This current implementation
@@ -365,7 +343,6 @@ _GLOBAL(kexec_smp_wait)
li r4,KEXEC_STATE_REAL_MODE
stb r4,PACAKEXECSTATE(r13)
- SYNC
b kexec_wait
@@ -377,7 +354,7 @@ _GLOBAL(kexec_smp_wait)
* don't overwrite r3 here, it is live for kexec_wait above.
*/
real_mode: /* assume normal blr return */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* Create an identity mapping. */
b kexec_create_tlb
#else
@@ -413,20 +390,6 @@ _GLOBAL(kexec_sequence)
li r0,0
std r0,16(r1)
-BEGIN_FTR_SECTION
- /*
- * This is the best time to turn AMR/IAMR off.
- * key 0 is used in radix for supervisor<->user
- * protection, but on hash key 0 is reserved
- * ideally we want to enter with a clean state.
- * NOTE, we rely on r0 being 0 from above.
- */
- mtspr SPRN_IAMR,r0
-BEGIN_FTR_SECTION_NESTED(42)
- mtspr SPRN_AMOR,r0
-END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
/* save regs for local vars on new stack.
* yes, we won't go back, but ...
*/
@@ -450,7 +413,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
/* disable interrupts, we are overwriting kernel data next */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
wrteei 0
#else
mfmsr r3
@@ -491,7 +454,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
beq 1f
/* clear out hardware hash page table and tlb */
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
ld r12,0(r27) /* deref function descriptor */
#else
mr r12,r27
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index df649acb5631..f6d6ae0a1692 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -8,12 +8,14 @@
#include <linux/moduleloader.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include <linux/bug.h>
#include <asm/module.h>
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/sort.h>
#include <asm/setup.h>
+#include <asm/sections.h>
static LIST_HEAD(module_bug_list);
@@ -62,13 +64,13 @@ int module_finalize(const Elf_Ehdr *hdr,
(void *)sect->sh_addr + sect->sh_size);
#endif /* CONFIG_PPC64 */
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
sect = find_section(hdr, sechdrs, ".opd");
if (sect != NULL) {
me->arch.start_opd = sect->sh_addr;
me->arch.end_opd = sect->sh_addr + sect->sh_size;
}
-#endif /* PPC64_ELF_ABI_v1 */
+#endif /* CONFIG_PPC64_ELF_ABI_V1 */
#ifdef CONFIG_PPC_BARRIER_NOSPEC
sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
@@ -86,3 +88,40 @@ int module_finalize(const Elf_Ehdr *hdr,
return 0;
}
+
+static __always_inline void *
+__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn)
+{
+ pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
+ gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0);
+
+ /*
+ * Don't do huge page allocations for modules yet until more testing
+ * is done. STRICT_MODULE_RWX may require extra work to support this
+ * too.
+ */
+ return __vmalloc_node_range(size, 1, start, end, gfp, prot,
+ VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE, __builtin_return_address(0));
+}
+
+void *module_alloc(unsigned long size)
+{
+#ifdef MODULES_VADDR
+ unsigned long limit = (unsigned long)_etext - SZ_32M;
+ void *ptr = NULL;
+
+ BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
+
+ /* First try within 32M limit from _etext to avoid branch trampolines */
+ if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
+ ptr = __module_alloc(size, limit, MODULES_END, true);
+
+ if (!ptr)
+ ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false);
+
+ return ptr;
+#else
+ return __module_alloc(size, VMALLOC_START, VMALLOC_END, false);
+#endif
+}
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index d7134c614c16..ea6536171778 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -18,6 +18,7 @@
#include <linux/bug.h>
#include <linux/sort.h>
#include <asm/setup.h>
+#include <asm/code-patching.h>
/* Count how many different relocations (different symbol, different
addend) */
@@ -67,21 +68,6 @@ static int relacmp(const void *_x, const void *_y)
return 0;
}
-static void relaswap(void *_x, void *_y, int size)
-{
- uint32_t *x, *y, tmp;
- int i;
-
- y = (uint32_t *)_x;
- x = (uint32_t *)_y;
-
- for (i = 0; i < sizeof(Elf32_Rela) / sizeof(uint32_t); i++) {
- tmp = x[i];
- x[i] = y[i];
- y[i] = tmp;
- }
-}
-
/* Get the potential trampolines size required of the init and
non-init sections */
static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
@@ -113,12 +99,12 @@ static unsigned long get_plt_size(const Elf32_Ehdr *hdr,
/* Sort the relocation information based on a symbol and
* addend key. This is a stable O(n*log n) complexity
- * alogrithm but it will reduce the complexity of
+ * algorithm but it will reduce the complexity of
* count_relocs() to linear complexity O(n)
*/
sort((void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size / sizeof(Elf32_Rela),
- sizeof(Elf32_Rela), relacmp, relaswap);
+ sizeof(Elf32_Rela), relacmp, NULL);
ret += count_relocs((void *)hdr
+ sechdrs[i].sh_offset,
@@ -160,10 +146,9 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
{
- if (entry->jump[0] != (PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val)))
+ if (entry->jump[0] != PPC_RAW_LIS(_R12, PPC_HA(val)))
return 0;
- if (entry->jump[1] != (PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) |
- PPC_LO(val)))
+ if (entry->jump[1] != PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))
return 0;
return 1;
}
@@ -190,21 +175,25 @@ static uint32_t do_plt_call(void *location,
entry++;
}
- /*
- * lis r12, sym@ha
- * addi r12, r12, sym@l
- * mtctr r12
- * bctr
- */
- entry->jump[0] = PPC_INST_ADDIS | __PPC_RT(R12) | PPC_HA(val);
- entry->jump[1] = PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12) | PPC_LO(val);
- entry->jump[2] = PPC_INST_MTCTR | __PPC_RS(R12);
- entry->jump[3] = PPC_INST_BCTR;
+ if (patch_instruction(&entry->jump[0], ppc_inst(PPC_RAW_LIS(_R12, PPC_HA(val)))))
+ return 0;
+ if (patch_instruction(&entry->jump[1], ppc_inst(PPC_RAW_ADDI(_R12, _R12, PPC_LO(val)))))
+ return 0;
+ if (patch_instruction(&entry->jump[2], ppc_inst(PPC_RAW_MTCTR(_R12))))
+ return 0;
+ if (patch_instruction(&entry->jump[3], ppc_inst(PPC_RAW_BCTR())))
+ return 0;
pr_debug("Initialized plt for 0x%x at %p\n", val, entry);
return (uint32_t)entry;
}
+static int patch_location_16(uint32_t *loc, u16 value)
+{
+ loc = PTR_ALIGN_DOWN(loc, sizeof(u32));
+ return patch_instruction(loc, ppc_inst((*loc & 0xffff0000) | value));
+}
+
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
@@ -238,44 +227,46 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
case R_PPC_ADDR16_LO:
/* Low half of the symbol */
- *(uint16_t *)location = value;
+ if (patch_location_16(location, PPC_LO(value)))
+ return -EFAULT;
break;
case R_PPC_ADDR16_HI:
/* Higher half of the symbol */
- *(uint16_t *)location = (value >> 16);
+ if (patch_location_16(location, PPC_HI(value)))
+ return -EFAULT;
break;
case R_PPC_ADDR16_HA:
- /* Sign-adjusted lower 16 bits: PPC ELF ABI says:
- (((x >> 16) + ((x & 0x8000) ? 1 : 0))) & 0xFFFF.
- This is the same, only sane.
- */
- *(uint16_t *)location = (value + 0x8000) >> 16;
+ if (patch_location_16(location, PPC_HA(value)))
+ return -EFAULT;
break;
case R_PPC_REL24:
if ((int)(value - (uint32_t)location) < -0x02000000
- || (int)(value - (uint32_t)location) >= 0x02000000)
+ || (int)(value - (uint32_t)location) >= 0x02000000) {
value = do_plt_call(location, value,
sechdrs, module);
+ if (!value)
+ return -EFAULT;
+ }
/* Only replace bits 2 through 26 */
pr_debug("REL24 value = %08X. location = %08X\n",
value, (uint32_t)location);
pr_debug("Location before: %08X.\n",
*(uint32_t *)location);
- *(uint32_t *)location
- = (*(uint32_t *)location & ~0x03fffffc)
- | ((value - (uint32_t)location)
- & 0x03fffffc);
+ value = (*(uint32_t *)location & ~PPC_LI_MASK) |
+ PPC_LI(value - (uint32_t)location);
+
+ if (patch_instruction(location, ppc_inst(value)))
+ return -EFAULT;
+
pr_debug("Location after: %08X.\n",
*(uint32_t *)location);
pr_debug("ie. jump to %08X+%08X = %08X\n",
- *(uint32_t *)location & 0x03fffffc,
- (uint32_t)location,
- (*(uint32_t *)location & 0x03fffffc)
- + (uint32_t)location);
+ *(uint32_t *)PPC_LI((uint32_t)location), (uint32_t)location,
+ (*(uint32_t *)PPC_LI((uint32_t)location)) + (uint32_t)location);
break;
case R_PPC_REL32:
@@ -295,6 +286,40 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
#ifdef CONFIG_DYNAMIC_FTRACE
+notrace int module_trampoline_target(struct module *mod, unsigned long addr,
+ unsigned long *target)
+{
+ ppc_inst_t jmp[4];
+
+ /* Find where the trampoline jumps to */
+ if (copy_inst_from_kernel_nofault(jmp, (void *)addr))
+ return -EFAULT;
+ if (__copy_inst_from_kernel_nofault(jmp + 1, (void *)addr + 4))
+ return -EFAULT;
+ if (__copy_inst_from_kernel_nofault(jmp + 2, (void *)addr + 8))
+ return -EFAULT;
+ if (__copy_inst_from_kernel_nofault(jmp + 3, (void *)addr + 12))
+ return -EFAULT;
+
+ /* verify that this is what we expect it to be */
+ if ((ppc_inst_val(jmp[0]) & 0xffff0000) != PPC_RAW_LIS(_R12, 0))
+ return -EINVAL;
+ if ((ppc_inst_val(jmp[1]) & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0))
+ return -EINVAL;
+ if (ppc_inst_val(jmp[2]) != PPC_RAW_MTCTR(_R12))
+ return -EINVAL;
+ if (ppc_inst_val(jmp[3]) != PPC_RAW_BCTR())
+ return -EINVAL;
+
+ addr = (ppc_inst_val(jmp[1]) & 0xffff) | ((ppc_inst_val(jmp[0]) & 0xffff) << 16);
+ if (addr & 0x8000)
+ addr -= 0x10000;
+
+ *target = addr;
+
+ return 0;
+}
+
int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
{
module->arch.tramp = do_plt_call(module->core_layout.base,
@@ -303,6 +328,14 @@ int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
if (!module->arch.tramp)
return -ENOENT;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ module->arch.tramp_regs = do_plt_call(module->core_layout.base,
+ (unsigned long)ftrace_regs_caller,
+ sechdrs, module);
+ if (!module->arch.tramp_regs)
+ return -ENOENT;
+#endif
+
return 0;
}
#endif
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 007606a48fd9..7e45dc98df8a 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -14,12 +14,14 @@
#include <linux/ftrace.h>
#include <linux/bug.h>
#include <linux/uaccess.h>
+#include <linux/kernel.h>
#include <asm/module.h>
#include <asm/firmware.h>
#include <asm/code-patching.h>
#include <linux/sort.h>
#include <asm/setup.h>
#include <asm/sections.h>
+#include <asm/inst.h>
/* FIXME: We don't do .init separately. To do this, we'd need to have
a separate r2 value in the init and core section, and stub between
@@ -29,22 +31,15 @@
this, and makes other things simpler. Anton?
--RR. */
-#ifdef PPC64_ELF_ABI_v2
-
-/* An address is simply the address of the function. */
-typedef unsigned long func_desc_t;
+#ifdef CONFIG_PPC64_ELF_ABI_V2
static func_desc_t func_desc(unsigned long addr)
{
- return addr;
-}
-static unsigned long func_addr(unsigned long addr)
-{
- return addr;
-}
-static unsigned long stub_func_addr(func_desc_t func)
-{
- return func;
+ func_desc_t desc = {
+ .addr = addr,
+ };
+
+ return desc;
}
/* PowerPC64 specific values for the Elf64_Sym st_other field. */
@@ -62,20 +57,9 @@ static unsigned int local_entry_offset(const Elf64_Sym *sym)
}
#else
-/* An address is address of the OPD entry, which contains address of fn. */
-typedef struct ppc64_opd_entry func_desc_t;
-
static func_desc_t func_desc(unsigned long addr)
{
- return *(struct ppc64_opd_entry *)addr;
-}
-static unsigned long func_addr(unsigned long addr)
-{
- return func_desc(addr).funcaddr;
-}
-static unsigned long stub_func_addr(func_desc_t func)
-{
- return func.funcaddr;
+ return *(struct func_desc *)addr;
}
static unsigned int local_entry_offset(const Elf64_Sym *sym)
{
@@ -92,6 +76,16 @@ void *dereference_module_function_descriptor(struct module *mod, void *ptr)
}
#endif
+static unsigned long func_addr(unsigned long addr)
+{
+ return func_desc(addr).addr;
+}
+
+static unsigned long stub_func_addr(func_desc_t func)
+{
+ return func.addr;
+}
+
#define STUB_MAGIC 0x73747562 /* stub */
/* Like PPC32, we need little trampolines to do > 24-bit jumps (into
@@ -121,65 +115,21 @@ struct ppc64_stub_entry
* the stub, but it's significantly shorter to put these values at the
* end of the stub code, and patch the stub address (32-bits relative
* to the TOC ptr, r2) into the stub.
- *
- * addis r11,r2, <high>
- * addi r11,r11, <low>
- * std r2,R2_STACK_OFFSET(r1)
- * ld r12,32(r11)
- * ld r2,40(r11)
- * mtctr r12
- * bctr
*/
static u32 ppc64_stub_insns[] = {
- PPC_INST_ADDIS | __PPC_RT(R11) | __PPC_RA(R2),
- PPC_INST_ADDI | __PPC_RT(R11) | __PPC_RA(R11),
+ PPC_RAW_ADDIS(_R11, _R2, 0),
+ PPC_RAW_ADDI(_R11, _R11, 0),
/* Save current r2 value in magic place on the stack. */
- PPC_INST_STD | __PPC_RS(R2) | __PPC_RA(R1) | R2_STACK_OFFSET,
- PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R11) | 32,
-#ifdef PPC64_ELF_ABI_v1
+ PPC_RAW_STD(_R2, _R1, R2_STACK_OFFSET),
+ PPC_RAW_LD(_R12, _R11, 32),
+#ifdef CONFIG_PPC64_ELF_ABI_V1
/* Set up new r2 from function descriptor */
- PPC_INST_LD | __PPC_RT(R2) | __PPC_RA(R11) | 40,
+ PPC_RAW_LD(_R2, _R11, 40),
#endif
- PPC_INST_MTCTR | __PPC_RS(R12),
- PPC_INST_BCTR,
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR(),
};
-#ifdef CONFIG_DYNAMIC_FTRACE
-int module_trampoline_target(struct module *mod, unsigned long addr,
- unsigned long *target)
-{
- struct ppc64_stub_entry *stub;
- func_desc_t funcdata;
- u32 magic;
-
- if (!within_module_core(addr, mod)) {
- pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
- return -EFAULT;
- }
-
- stub = (struct ppc64_stub_entry *)addr;
-
- if (probe_kernel_read(&magic, &stub->magic, sizeof(magic))) {
- pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
- return -EFAULT;
- }
-
- if (magic != STUB_MAGIC) {
- pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
- return -EFAULT;
- }
-
- if (probe_kernel_read(&funcdata, &stub->funcdata, sizeof(funcdata))) {
- pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
- return -EFAULT;
- }
-
- *target = stub_func_addr(funcdata);
-
- return 0;
-}
-#endif
-
/* Count how many different 24-bit relocations (different symbol,
different addend) */
static unsigned int count_relocs(const Elf64_Rela *rela, unsigned int num)
@@ -226,26 +176,11 @@ static int relacmp(const void *_x, const void *_y)
return 0;
}
-static void relaswap(void *_x, void *_y, int size)
-{
- uint64_t *x, *y, tmp;
- int i;
-
- y = (uint64_t *)_x;
- x = (uint64_t *)_y;
-
- for (i = 0; i < sizeof(Elf64_Rela) / sizeof(uint64_t); i++) {
- tmp = x[i];
- x[i] = y[i];
- y[i] = tmp;
- }
-}
-
/* Get size of potential trampolines required. */
static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
const Elf64_Shdr *sechdrs)
{
- /* One extra reloc so it's always 0-funcaddr terminated */
+ /* One extra reloc so it's always 0-addr terminated */
unsigned long relocs = 1;
unsigned i;
@@ -259,12 +194,12 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr,
/* Sort the relocation information based on a symbol and
* addend key. This is a stable O(n*log n) complexity
- * alogrithm but it will reduce the complexity of
+ * algorithm but it will reduce the complexity of
* count_relocs() to linear complexity O(n)
*/
sort((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size / sizeof(Elf64_Rela),
- sizeof(Elf64_Rela), relacmp, relaswap);
+ sizeof(Elf64_Rela), relacmp, NULL);
relocs += count_relocs((void *)sechdrs[i].sh_addr,
sechdrs[i].sh_size
@@ -335,6 +270,12 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
return NULL;
}
+bool module_init_section(const char *name)
+{
+ /* We don't handle .init for the moment: always return false. */
+ return false;
+}
+
int module_frob_arch_sections(Elf64_Ehdr *hdr,
Elf64_Shdr *sechdrs,
char *secstrings,
@@ -344,7 +285,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
/* Find .toc and .stubs sections, symtab and strtab */
for (i = 1; i < hdr->e_shnum; i++) {
- char *p;
if (strcmp(secstrings + sechdrs[i].sh_name, ".stubs") == 0)
me->arch.stubs_section = i;
else if (strcmp(secstrings + sechdrs[i].sh_name, ".toc") == 0) {
@@ -356,10 +296,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size);
- /* We don't handle .init for the moment: rename to _init */
- while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
- p[0] = '_';
-
if (sechdrs[i].sh_type == SHT_SYMTAB)
dedotify((void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size / sizeof(Elf64_Sym),
@@ -384,6 +320,83 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
return 0;
}
+#ifdef CONFIG_MPROFILE_KERNEL
+
+static u32 stub_insns[] = {
+ PPC_RAW_LD(_R12, _R13, offsetof(struct paca_struct, kernel_toc)),
+ PPC_RAW_ADDIS(_R12, _R12, 0),
+ PPC_RAW_ADDI(_R12, _R12, 0),
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR(),
+};
+
+/*
+ * For mprofile-kernel we use a special stub for ftrace_caller() because we
+ * can't rely on r2 containing this module's TOC when we enter the stub.
+ *
+ * That can happen if the function calling us didn't need to use the toc. In
+ * that case it won't have setup r2, and the r2 value will be either the
+ * kernel's toc, or possibly another modules toc.
+ *
+ * To deal with that this stub uses the kernel toc, which is always accessible
+ * via the paca (in r13). The target (ftrace_caller()) is responsible for
+ * saving and restoring the toc before returning.
+ */
+static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
+ unsigned long addr,
+ struct module *me)
+{
+ long reladdr;
+
+ memcpy(entry->jump, stub_insns, sizeof(stub_insns));
+
+ /* Stub uses address relative to kernel toc (from the paca) */
+ reladdr = addr - kernel_toc_addr();
+ if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+ pr_err("%s: Address of %ps out of range of kernel_toc.\n",
+ me->name, (void *)addr);
+ return 0;
+ }
+
+ entry->jump[1] |= PPC_HA(reladdr);
+ entry->jump[2] |= PPC_LO(reladdr);
+
+ /* Even though we don't use funcdata in the stub, it's needed elsewhere. */
+ entry->funcdata = func_desc(addr);
+ entry->magic = STUB_MAGIC;
+
+ return 1;
+}
+
+static bool is_mprofile_ftrace_call(const char *name)
+{
+ if (!strcmp("_mcount", name))
+ return true;
+#ifdef CONFIG_DYNAMIC_FTRACE
+ if (!strcmp("ftrace_caller", name))
+ return true;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ if (!strcmp("ftrace_regs_caller", name))
+ return true;
+#endif
+#endif
+
+ return false;
+}
+#else
+static inline int create_ftrace_stub(struct ppc64_stub_entry *entry,
+ unsigned long addr,
+ struct module *me)
+{
+ return 0;
+}
+
+static bool is_mprofile_ftrace_call(const char *name)
+{
+ return false;
+}
+#endif
+
/*
* r2 is the TOC pointer: it actually points 0x8000 into the TOC (this gives the
* value maximum span in an instruction which uses a signed offset). Round down
@@ -399,11 +412,21 @@ static inline unsigned long my_r2(const Elf64_Shdr *sechdrs, struct module *me)
static inline int create_stub(const Elf64_Shdr *sechdrs,
struct ppc64_stub_entry *entry,
unsigned long addr,
- struct module *me)
+ struct module *me,
+ const char *name)
{
long reladdr;
+ func_desc_t desc;
+ int i;
- memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns));
+ if (is_mprofile_ftrace_call(name))
+ return create_ftrace_stub(entry, addr, me);
+
+ for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) {
+ if (patch_instruction(&entry->jump[i],
+ ppc_inst(ppc64_stub_insns[i])))
+ return 0;
+ }
/* Stub uses address relative to r2. */
reladdr = (unsigned long)entry - my_r2(sechdrs, me);
@@ -414,10 +437,24 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
}
pr_debug("Stub %p get data from reladdr %li\n", entry, reladdr);
- entry->jump[0] |= PPC_HA(reladdr);
- entry->jump[1] |= PPC_LO(reladdr);
- entry->funcdata = func_desc(addr);
- entry->magic = STUB_MAGIC;
+ if (patch_instruction(&entry->jump[0],
+ ppc_inst(entry->jump[0] | PPC_HA(reladdr))))
+ return 0;
+
+ if (patch_instruction(&entry->jump[1],
+ ppc_inst(entry->jump[1] | PPC_LO(reladdr))))
+ return 0;
+
+ // func_desc_t is 8 bytes if ABIv2, else 16 bytes
+ desc = func_desc(addr);
+ for (i = 0; i < sizeof(func_desc_t) / sizeof(u32); i++) {
+ if (patch_instruction(((u32 *)&entry->funcdata) + i,
+ ppc_inst(((u32 *)(&desc))[i])))
+ return 0;
+ }
+
+ if (patch_instruction(&entry->magic, ppc_inst(STUB_MAGIC)))
+ return 0;
return 1;
}
@@ -426,7 +463,8 @@ static inline int create_stub(const Elf64_Shdr *sechdrs,
stub to set up the TOC ptr (r2) for the function. */
static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
unsigned long addr,
- struct module *me)
+ struct module *me,
+ const char *name)
{
struct ppc64_stub_entry *stubs;
unsigned int i, num_stubs;
@@ -443,62 +481,19 @@ static unsigned long stub_for_addr(const Elf64_Shdr *sechdrs,
return (unsigned long)&stubs[i];
}
- if (!create_stub(sechdrs, &stubs[i], addr, me))
+ if (!create_stub(sechdrs, &stubs[i], addr, me, name))
return 0;
return (unsigned long)&stubs[i];
}
-#ifdef CONFIG_MPROFILE_KERNEL
-static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction)
-{
- if (strcmp("_mcount", name))
- return false;
-
- /*
- * Check if this is one of the -mprofile-kernel sequences.
- */
- if (instruction[-1] == PPC_INST_STD_LR &&
- instruction[-2] == PPC_INST_MFLR)
- return true;
-
- if (instruction[-1] == PPC_INST_MFLR)
- return true;
-
- return false;
-}
-
-/*
- * In case of _mcount calls, do not save the current callee's TOC (in r2) into
- * the original caller's stack frame. If we did we would clobber the saved TOC
- * value of the original caller.
- */
-static void squash_toc_save_inst(const char *name, unsigned long addr)
-{
- struct ppc64_stub_entry *stub = (struct ppc64_stub_entry *)addr;
-
- /* Only for calls to _mcount */
- if (strcmp("_mcount", name) != 0)
- return;
-
- stub->jump[2] = PPC_INST_NOP;
-}
-#else
-static void squash_toc_save_inst(const char *name, unsigned long addr) { }
-
-static bool is_mprofile_mcount_callsite(const char *name, u32 *instruction)
-{
- return false;
-}
-#endif
-
/* We expect a noop next: if it is, replace it with instruction to
restore r2. */
static int restore_r2(const char *name, u32 *instruction, struct module *me)
{
u32 *prev_insn = instruction - 1;
- if (is_mprofile_mcount_callsite(name, prev_insn))
+ if (is_mprofile_ftrace_call(name))
return 1;
/*
@@ -506,16 +501,19 @@ static int restore_r2(const char *name, u32 *instruction, struct module *me)
* "link" branches and they don't return, so they don't need the r2
* restore afterwards.
*/
- if (!instr_is_relative_link_branch(*prev_insn))
+ if (!instr_is_relative_link_branch(ppc_inst(*prev_insn)))
return 1;
- if (*instruction != PPC_INST_NOP) {
+ if (*instruction != PPC_RAW_NOP()) {
pr_err("%s: Expected nop after call, got %08x at %pS\n",
me->name, *instruction, instruction);
return 0;
}
+
/* ld r2,R2_STACK_OFFSET(r1) */
- *instruction = PPC_INST_LD_TOC;
+ if (patch_instruction(instruction, ppc_inst(PPC_INST_LD_TOC)))
+ return 0;
+
return 1;
}
@@ -636,14 +634,13 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (sym->st_shndx == SHN_UNDEF ||
sym->st_shndx == SHN_LIVEPATCH) {
/* External: go via stub */
- value = stub_for_addr(sechdrs, value, me);
+ value = stub_for_addr(sechdrs, value, me,
+ strtab + sym->st_name);
if (!value)
return -ENOENT;
if (!restore_r2(strtab + sym->st_name,
(u32 *)location + 1, me))
return -ENOEXEC;
-
- squash_toc_save_inst(strtab + sym->st_name, value);
} else
value += local_entry_offset(sym);
@@ -656,9 +653,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
/* Only replace bits 2 through 26 */
- *(uint32_t *)location
- = (*(uint32_t *)location & ~0x03fffffc)
- | (value & 0x03fffffc);
+ value = (*(uint32_t *)location & ~PPC_LI_MASK) | PPC_LI(value);
+
+ if (patch_instruction((u32 *)location, ppc_inst(value)))
+ return -EFAULT;
+
break;
case R_PPC64_REL64:
@@ -699,21 +698,17 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
* ld r2, ...(r12)
* add r2, r2, r12
*/
- if ((((uint32_t *)location)[0] & ~0xfffc) !=
- (PPC_INST_LD | __PPC_RT(R2) | __PPC_RA(R12)))
+ if ((((uint32_t *)location)[0] & ~0xfffc) != PPC_RAW_LD(_R2, _R12, 0))
break;
- if (((uint32_t *)location)[1] !=
- (PPC_INST_ADD | __PPC_RT(R2) | __PPC_RA(R2) | __PPC_RB(R12)))
+ if (((uint32_t *)location)[1] != PPC_RAW_ADD(_R2, _R2, _R12))
break;
/*
* If found, replace it with:
* addis r2, r12, (.TOC.-func)@ha
* addi r2, r2, (.TOC.-func)@l
*/
- ((uint32_t *)location)[0] = PPC_INST_ADDIS | __PPC_RT(R2) |
- __PPC_RA(R12) | PPC_HA(value);
- ((uint32_t *)location)[1] = PPC_INST_ADDI | __PPC_RT(R2) |
- __PPC_RA(R2) | PPC_LO(value);
+ ((uint32_t *)location)[0] = PPC_RAW_ADDIS(_R2, _R12, PPC_HA(value));
+ ((uint32_t *)location)[1] = PPC_RAW_ADDI(_R2, _R2, PPC_LO(value));
break;
case R_PPC64_REL16_HA:
@@ -745,89 +740,53 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
}
#ifdef CONFIG_DYNAMIC_FTRACE
-
-#ifdef CONFIG_MPROFILE_KERNEL
-
-#define PACATOC offsetof(struct paca_struct, kernel_toc)
-
-/*
- * For mprofile-kernel we use a special stub for ftrace_caller() because we
- * can't rely on r2 containing this module's TOC when we enter the stub.
- *
- * That can happen if the function calling us didn't need to use the toc. In
- * that case it won't have setup r2, and the r2 value will be either the
- * kernel's toc, or possibly another modules toc.
- *
- * To deal with that this stub uses the kernel toc, which is always accessible
- * via the paca (in r13). The target (ftrace_caller()) is responsible for
- * saving and restoring the toc before returning.
- */
-static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs,
- struct module *me, unsigned long addr)
+int module_trampoline_target(struct module *mod, unsigned long addr,
+ unsigned long *target)
{
- struct ppc64_stub_entry *entry;
- unsigned int i, num_stubs;
- /*
- * ld r12,PACATOC(r13)
- * addis r12,r12,<high>
- * addi r12,r12,<low>
- * mtctr r12
- * bctr
- */
- static u32 stub_insns[] = {
- PPC_INST_LD | __PPC_RT(R12) | __PPC_RA(R13) | PACATOC,
- PPC_INST_ADDIS | __PPC_RT(R12) | __PPC_RA(R12),
- PPC_INST_ADDI | __PPC_RT(R12) | __PPC_RA(R12),
- PPC_INST_MTCTR | __PPC_RS(R12),
- PPC_INST_BCTR,
- };
- long reladdr;
+ struct ppc64_stub_entry *stub;
+ func_desc_t funcdata;
+ u32 magic;
- num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*entry);
+ if (!within_module_core(addr, mod)) {
+ pr_err("%s: stub %lx not in module %s\n", __func__, addr, mod->name);
+ return -EFAULT;
+ }
- /* Find the next available stub entry */
- entry = (void *)sechdrs[me->arch.stubs_section].sh_addr;
- for (i = 0; i < num_stubs && stub_func_addr(entry->funcdata); i++, entry++);
+ stub = (struct ppc64_stub_entry *)addr;
- if (i >= num_stubs) {
- pr_err("%s: Unable to find a free slot for ftrace stub.\n", me->name);
- return 0;
+ if (copy_from_kernel_nofault(&magic, &stub->magic,
+ sizeof(magic))) {
+ pr_err("%s: fault reading magic for stub %lx for %s\n", __func__, addr, mod->name);
+ return -EFAULT;
}
- memcpy(entry->jump, stub_insns, sizeof(stub_insns));
-
- /* Stub uses address relative to kernel toc (from the paca) */
- reladdr = addr - kernel_toc_addr();
- if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
- pr_err("%s: Address of %ps out of range of kernel_toc.\n",
- me->name, (void *)addr);
- return 0;
+ if (magic != STUB_MAGIC) {
+ pr_err("%s: bad magic for stub %lx for %s\n", __func__, addr, mod->name);
+ return -EFAULT;
}
- entry->jump[1] |= PPC_HA(reladdr);
- entry->jump[2] |= PPC_LO(reladdr);
+ if (copy_from_kernel_nofault(&funcdata, &stub->funcdata,
+ sizeof(funcdata))) {
+ pr_err("%s: fault reading funcdata for stub %lx for %s\n", __func__, addr, mod->name);
+ return -EFAULT;
+ }
- /* Eventhough we don't use funcdata in the stub, it's needed elsewhere. */
- entry->funcdata = func_desc(addr);
- entry->magic = STUB_MAGIC;
+ *target = stub_func_addr(funcdata);
- return (unsigned long)entry;
-}
-#else
-static unsigned long create_ftrace_stub(const Elf64_Shdr *sechdrs,
- struct module *me, unsigned long addr)
-{
- return stub_for_addr(sechdrs, addr, me);
+ return 0;
}
-#endif
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
{
- mod->arch.tramp = create_ftrace_stub(sechdrs, mod,
- (unsigned long)ftrace_caller);
+ mod->arch.tramp = stub_for_addr(sechdrs,
+ (unsigned long)ftrace_caller,
+ mod,
+ "ftrace_caller");
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- mod->arch.tramp_regs = create_ftrace_stub(sechdrs, mod,
- (unsigned long)ftrace_regs_caller);
+ mod->arch.tramp_regs = stub_for_addr(sechdrs,
+ (unsigned long)ftrace_regs_caller,
+ mod,
+ "ftrace_regs_caller");
if (!mod->arch.tramp_regs)
return -ENOENT;
#endif
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index fb4f61096613..e385d3164648 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -19,9 +19,9 @@
#include <linux/pstore.h>
#include <linux/zlib.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#undef DEBUG_NVRAM
@@ -540,7 +540,7 @@ static struct pstore_info nvram_pstore_info = {
.write = nvram_pstore_write,
};
-static int nvram_pstore_init(void)
+static int __init nvram_pstore_init(void)
{
int rc = 0;
@@ -562,7 +562,7 @@ static int nvram_pstore_init(void)
return rc;
}
#else
-static int nvram_pstore_init(void)
+static int __init nvram_pstore_init(void)
{
return -1;
}
@@ -647,6 +647,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
{
struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
static unsigned int oops_count = 0;
+ static struct kmsg_dump_iter iter;
static bool panicking = false;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
@@ -655,9 +656,7 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
int rc = -1;
switch (reason) {
- case KMSG_DUMP_RESTART:
- case KMSG_DUMP_HALT:
- case KMSG_DUMP_POWEROFF:
+ case KMSG_DUMP_SHUTDOWN:
/* These are almost always orderly shutdowns. */
return;
case KMSG_DUMP_OOPS:
@@ -683,13 +682,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
return;
if (big_oops_buf) {
- kmsg_dump_get_buffer(dumper, false,
+ kmsg_dump_rewind(&iter);
+ kmsg_dump_get_buffer(&iter, false,
big_oops_buf, big_oops_buf_sz, &text_len);
rc = zip_oops(text_len);
}
if (rc != 0) {
- kmsg_dump_rewind(dumper);
- kmsg_dump_get_buffer(dumper, false,
+ kmsg_dump_rewind(&iter);
+ kmsg_dump_get_buffer(&iter, false,
oops_data, oops_data_sz, &text_len);
err_type = ERR_TYPE_KERNEL_PANIC;
oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
@@ -755,7 +755,7 @@ static unsigned char __init nvram_checksum(struct nvram_header *p)
* Per the criteria passed via nvram_remove_partition(), should this
* partition be removed? 1=remove, 0=keep
*/
-static int nvram_can_remove_partition(struct nvram_partition *part,
+static int __init nvram_can_remove_partition(struct nvram_partition *part,
const char *name, int sig, const char *exceptions[])
{
if (part->header.signature != sig)
@@ -854,8 +854,8 @@ loff_t __init nvram_create_partition(const char *name, int sig,
BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16);
/* Convert sizes from bytes to blocks */
- req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
- min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+ req_size = ALIGN(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
+ min_size = ALIGN(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN;
/* If no minimum size specified, make it the same as the
* requested size
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 427fc22f72b6..f89376ff633e 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -62,12 +62,8 @@ static int of_pci_phb_probe(struct platform_device *dev)
/* Init pci_dn data structures */
pci_devs_phb_init_dynamic(phb);
- /* Create EEH devices for the PHB */
- eeh_dev_phb_init_dynamic(phb);
-
- /* Register devices with EEH */
- if (dev->dev.of_node->child)
- eeh_add_device_tree_early(PCI_DN(dev->dev.of_node));
+ /* Create EEH PE for the PHB */
+ eeh_phb_pe_create(phb);
/* Scan the bus */
pcibios_scan_phb(phb);
@@ -80,15 +76,9 @@ static int of_pci_phb_probe(struct platform_device *dev)
*/
pcibios_claim_one_bus(phb->bus);
- /* Finish EEH setup */
- eeh_add_device_tree_late(phb->bus);
-
/* Add probed PCI devices to the device model */
pci_bus_add_devices(phb->bus);
- /* sysfs files should only be added after devices are added */
- eeh_add_sysfs_files(phb->bus);
-
return 0;
}
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 024f7aad1952..3b1c2236cbee 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -16,25 +16,18 @@
#include <asm/code-patching.h>
#include <asm/sstep.h>
#include <asm/ppc-opcode.h>
+#include <asm/inst.h>
-#define TMPL_CALL_HDLR_IDX \
- (optprobe_template_call_handler - optprobe_template_entry)
-#define TMPL_EMULATE_IDX \
- (optprobe_template_call_emulate - optprobe_template_entry)
-#define TMPL_RET_IDX \
- (optprobe_template_ret - optprobe_template_entry)
-#define TMPL_OP_IDX \
- (optprobe_template_op_address - optprobe_template_entry)
-#define TMPL_INSN_IDX \
- (optprobe_template_insn - optprobe_template_entry)
-#define TMPL_END_IDX \
- (optprobe_template_end - optprobe_template_entry)
-
-DEFINE_INSN_CACHE_OPS(ppc_optinsn);
+#define TMPL_CALL_HDLR_IDX (optprobe_template_call_handler - optprobe_template_entry)
+#define TMPL_EMULATE_IDX (optprobe_template_call_emulate - optprobe_template_entry)
+#define TMPL_RET_IDX (optprobe_template_ret - optprobe_template_entry)
+#define TMPL_OP_IDX (optprobe_template_op_address - optprobe_template_entry)
+#define TMPL_INSN_IDX (optprobe_template_insn - optprobe_template_entry)
+#define TMPL_END_IDX (optprobe_template_end - optprobe_template_entry)
static bool insn_page_in_use;
-static void *__ppc_alloc_insn_page(void)
+void *alloc_optinsn_page(void)
{
if (insn_page_in_use)
return NULL;
@@ -42,20 +35,11 @@ static void *__ppc_alloc_insn_page(void)
return &optinsn_slot;
}
-static void __ppc_free_insn_page(void *page __maybe_unused)
+void free_optinsn_page(void *page)
{
insn_page_in_use = false;
}
-struct kprobe_insn_cache kprobe_ppc_optinsn_slots = {
- .mutex = __MUTEX_INITIALIZER(kprobe_ppc_optinsn_slots.mutex),
- .pages = LIST_HEAD_INIT(kprobe_ppc_optinsn_slots.pages),
- /* insn_size initialized later */
- .alloc = __ppc_alloc_insn_page,
- .free = __ppc_free_insn_page,
- .nr_garbage = 0,
-};
-
/*
* Check if we can optimize this probe. Returns NIP post-emulation if this can
* be optimized and 0 otherwise.
@@ -65,14 +49,15 @@ static unsigned long can_optimize(struct kprobe *p)
struct pt_regs regs;
struct instruction_op op;
unsigned long nip = 0;
+ unsigned long addr = (unsigned long)p->addr;
/*
* kprobe placed for kretprobe during boot time
* has a 'nop' instruction, which can be emulated.
* So further checks can be skipped.
*/
- if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
- return (unsigned long)p->addr + sizeof(kprobe_opcode_t);
+ if (p->addr == (kprobe_opcode_t *)&__kretprobe_trampoline)
+ return addr + sizeof(kprobe_opcode_t);
/*
* We only support optimizing kernel addresses, but not
@@ -80,11 +65,11 @@ static unsigned long can_optimize(struct kprobe *p)
*
* FIXME: Optimize kprobes placed in module addresses.
*/
- if (!is_kernel_addr((unsigned long)p->addr))
+ if (!is_kernel_addr(addr))
return 0;
memset(&regs, 0, sizeof(struct pt_regs));
- regs.nip = (unsigned long)p->addr;
+ regs.nip = addr;
regs.trap = 0x0;
regs.msr = MSR_KERNEL;
@@ -99,8 +84,8 @@ static unsigned long can_optimize(struct kprobe *p)
* Ensure that the instruction is not a conditional branch,
* and that can be emulated.
*/
- if (!is_conditional_branch(*p->ainsn.insn) &&
- analyse_instr(&op, &regs, *p->ainsn.insn) == 1) {
+ if (!is_conditional_branch(ppc_inst_read(p->ainsn.insn)) &&
+ analyse_instr(&op, &regs, ppc_inst_read(p->ainsn.insn)) == 1) {
emulate_update_regs(&regs, &op);
nip = regs.nip;
}
@@ -121,7 +106,7 @@ static void optimized_callback(struct optimized_kprobe *op,
kprobes_inc_nmissed_count(&op->kp);
} else {
__this_cpu_write(current_kprobe, &op->kp);
- regs->nip = (unsigned long)op->kp.addr;
+ regs_set_return_ip(regs, (unsigned long)op->kp.addr);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
opt_pre_handler(&op->kp, regs);
__this_cpu_write(current_kprobe, NULL);
@@ -134,75 +119,53 @@ NOKPROBE_SYMBOL(optimized_callback);
void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
{
if (op->optinsn.insn) {
- free_ppc_optinsn_slot(op->optinsn.insn, 1);
+ free_optinsn_slot(op->optinsn.insn, 1);
op->optinsn.insn = NULL;
}
}
-/*
- * emulate_step() requires insn to be emulated as
- * second parameter. Load register 'r4' with the
- * instruction.
- */
-void patch_imm32_load_insns(unsigned int val, kprobe_opcode_t *addr)
+static void patch_imm32_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
{
- /* addis r4,0,(insn)@h */
- patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(4) |
- ((val >> 16) & 0xffff));
- addr++;
-
- /* ori r4,r4,(insn)@l */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(4) |
- ___PPC_RS(4) | (val & 0xffff));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HI(val))));
+ patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
}
/*
* Generate instructions to load provided immediate 64-bit value
- * to register 'r3' and patch these instructions at 'addr'.
+ * to register 'reg' and patch these instructions at 'addr'.
*/
-void patch_imm64_load_insns(unsigned long val, kprobe_opcode_t *addr)
+static void patch_imm64_load_insns(unsigned long long val, int reg, kprobe_opcode_t *addr)
{
- /* lis r3,(op)@highest */
- patch_instruction(addr, PPC_INST_ADDIS | ___PPC_RT(3) |
- ((val >> 48) & 0xffff));
- addr++;
-
- /* ori r3,r3,(op)@higher */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
- ___PPC_RS(3) | ((val >> 32) & 0xffff));
- addr++;
-
- /* rldicr r3,r3,32,31 */
- patch_instruction(addr, PPC_INST_RLDICR | ___PPC_RA(3) |
- ___PPC_RS(3) | __PPC_SH64(32) | __PPC_ME64(31));
- addr++;
-
- /* oris r3,r3,(op)@h */
- patch_instruction(addr, PPC_INST_ORIS | ___PPC_RA(3) |
- ___PPC_RS(3) | ((val >> 16) & 0xffff));
- addr++;
-
- /* ori r3,r3,(op)@l */
- patch_instruction(addr, PPC_INST_ORI | ___PPC_RA(3) |
- ___PPC_RS(3) | (val & 0xffff));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_LIS(reg, PPC_HIGHEST(val))));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_HIGHER(val))));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_SLDI(reg, reg, 32)));
+ patch_instruction(addr++, ppc_inst(PPC_RAW_ORIS(reg, reg, PPC_HI(val))));
+ patch_instruction(addr, ppc_inst(PPC_RAW_ORI(reg, reg, PPC_LO(val))));
+}
+
+static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *addr)
+{
+ if (IS_ENABLED(CONFIG_PPC64))
+ patch_imm64_load_insns(val, reg, addr);
+ else
+ patch_imm32_load_insns(val, reg, addr);
}
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{
- kprobe_opcode_t *buff, branch_op_callback, branch_emulate_step;
- kprobe_opcode_t *op_callback_addr, *emulate_step_addr;
+ ppc_inst_t branch_op_callback, branch_emulate_step, temp;
+ unsigned long op_callback_addr, emulate_step_addr;
+ kprobe_opcode_t *buff;
long b_offset;
unsigned long nip, size;
int rc, i;
- kprobe_ppc_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
-
nip = can_optimize(p);
if (!nip)
return -EILSEQ;
/* Allocate instruction slot for detour buffer */
- buff = get_ppc_optinsn_slot();
+ buff = get_optinsn_slot();
if (!buff)
return -ENOMEM;
@@ -220,8 +183,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
goto error;
/* Check if the return address is also within 32MB range */
- b_offset = (unsigned long)(buff + TMPL_RET_IDX) -
- (unsigned long)nip;
+ b_offset = (unsigned long)(buff + TMPL_RET_IDX) - nip;
if (!is_offset_in_branch_range(b_offset))
goto error;
@@ -230,7 +192,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
size = (TMPL_END_IDX * sizeof(kprobe_opcode_t)) / sizeof(int);
pr_devel("Copying template to %p, size %lu\n", buff, size);
for (i = 0; i < size; i++) {
- rc = patch_instruction(buff + i, *(optprobe_template_entry + i));
+ rc = patch_instruction(buff + i, ppc_inst(*(optprobe_template_entry + i)));
if (rc < 0)
goto error;
}
@@ -239,27 +201,25 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
* Fixup the template with instructions to:
* 1. load the address of the actual probepoint
*/
- patch_imm64_load_insns((unsigned long)op, buff + TMPL_OP_IDX);
+ patch_imm_load_insns((unsigned long)op, 3, buff + TMPL_OP_IDX);
/*
* 2. branch to optimized_callback() and emulate_step()
*/
- op_callback_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("optimized_callback");
- emulate_step_addr = (kprobe_opcode_t *)ppc_kallsyms_lookup_name("emulate_step");
+ op_callback_addr = ppc_kallsyms_lookup_name("optimized_callback");
+ emulate_step_addr = ppc_kallsyms_lookup_name("emulate_step");
if (!op_callback_addr || !emulate_step_addr) {
WARN(1, "Unable to lookup optimized_callback()/emulate_step()\n");
goto error;
}
- branch_op_callback = create_branch((unsigned int *)buff + TMPL_CALL_HDLR_IDX,
- (unsigned long)op_callback_addr,
- BRANCH_SET_LINK);
+ rc = create_branch(&branch_op_callback, buff + TMPL_CALL_HDLR_IDX,
+ op_callback_addr, BRANCH_SET_LINK);
- branch_emulate_step = create_branch((unsigned int *)buff + TMPL_EMULATE_IDX,
- (unsigned long)emulate_step_addr,
- BRANCH_SET_LINK);
+ rc |= create_branch(&branch_emulate_step, buff + TMPL_EMULATE_IDX,
+ emulate_step_addr, BRANCH_SET_LINK);
- if (!branch_op_callback || !branch_emulate_step)
+ if (rc)
goto error;
patch_instruction(buff + TMPL_CALL_HDLR_IDX, branch_op_callback);
@@ -268,22 +228,22 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
/*
* 3. load instruction to be emulated into relevant register, and
*/
- patch_imm32_load_insns(*p->ainsn.insn, buff + TMPL_INSN_IDX);
+ temp = ppc_inst_read(p->ainsn.insn);
+ patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
/*
* 4. branch back from trampoline
*/
- patch_branch(buff + TMPL_RET_IDX, (unsigned long)nip, 0);
+ patch_branch(buff + TMPL_RET_IDX, nip, 0);
- flush_icache_range((unsigned long)buff,
- (unsigned long)(&buff[TMPL_END_IDX]));
+ flush_icache_range((unsigned long)buff, (unsigned long)(&buff[TMPL_END_IDX]));
op->optinsn.insn = buff;
return 0;
error:
- free_ppc_optinsn_slot(buff, 0);
+ free_optinsn_slot(buff, 0);
return -ERANGE;
}
@@ -305,6 +265,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
void arch_optimize_kprobes(struct list_head *oplist)
{
+ ppc_inst_t instr;
struct optimized_kprobe *op;
struct optimized_kprobe *tmp;
@@ -313,11 +274,9 @@ void arch_optimize_kprobes(struct list_head *oplist)
* Backup instructions which will be replaced
* by jump address
*/
- memcpy(op->optinsn.copied_insn, op->kp.addr,
- RELATIVEJUMP_SIZE);
- patch_instruction(op->kp.addr,
- create_branch((unsigned int *)op->kp.addr,
- (unsigned long)op->optinsn.insn, 0));
+ memcpy(op->optinsn.copied_insn, op->kp.addr, RELATIVEJUMP_SIZE);
+ create_branch(&instr, op->kp.addr, (unsigned long)op->optinsn.insn, 0);
+ patch_instruction(op->kp.addr, instr);
list_del_init(&op->list);
}
}
@@ -327,8 +286,7 @@ void arch_unoptimize_kprobe(struct optimized_kprobe *op)
arch_arm_kprobe(&op->kp);
}
-void arch_unoptimize_kprobes(struct list_head *oplist,
- struct list_head *done_list)
+void arch_unoptimize_kprobes(struct list_head *oplist, struct list_head *done_list)
{
struct optimized_kprobe *op;
struct optimized_kprobe *tmp;
@@ -339,9 +297,8 @@ void arch_unoptimize_kprobes(struct list_head *oplist,
}
}
-int arch_within_optimized_kprobe(struct optimized_kprobe *op,
- unsigned long addr)
+int arch_within_optimized_kprobe(struct optimized_kprobe *op, kprobe_opcode_t *addr)
{
- return ((unsigned long)op->kp.addr <= addr &&
- (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
+ return (op->kp.addr <= addr &&
+ op->kp.addr + (RELATIVEJUMP_SIZE / sizeof(kprobe_opcode_t)) > addr);
}
diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S
index cf383520843f..cd4e7bc32609 100644
--- a/arch/powerpc/kernel/optprobes_head.S
+++ b/arch/powerpc/kernel/optprobes_head.S
@@ -9,6 +9,16 @@
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
+#ifdef CONFIG_PPC64
+#define SAVE_30GPRS(base) SAVE_GPRS(2, 31, base)
+#define REST_30GPRS(base) REST_GPRS(2, 31, base)
+#define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop; nop; nop
+#else
+#define SAVE_30GPRS(base) stmw r2, GPR2(base)
+#define REST_30GPRS(base) lmw r2, GPR2(base)
+#define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop
+#endif
+
#define OPT_SLOT_SIZE 65536
.balign 4
@@ -30,39 +40,41 @@ optinsn_slot:
.global optprobe_template_entry
optprobe_template_entry:
/* Create an in-memory pt_regs */
- stdu r1,-INT_FRAME_SIZE(r1)
+ PPC_STLU r1,-INT_FRAME_SIZE(r1)
SAVE_GPR(0,r1)
/* Save the previous SP into stack */
addi r0,r1,INT_FRAME_SIZE
- std r0,GPR1(r1)
- SAVE_10GPRS(2,r1)
- SAVE_10GPRS(12,r1)
- SAVE_10GPRS(22,r1)
+ PPC_STL r0,GPR1(r1)
+ SAVE_30GPRS(r1)
/* Save SPRS */
mfmsr r5
- std r5,_MSR(r1)
+ PPC_STL r5,_MSR(r1)
li r5,0x700
- std r5,_TRAP(r1)
+ PPC_STL r5,_TRAP(r1)
li r5,0
- std r5,ORIG_GPR3(r1)
- std r5,RESULT(r1)
+ PPC_STL r5,ORIG_GPR3(r1)
+ PPC_STL r5,RESULT(r1)
mfctr r5
- std r5,_CTR(r1)
+ PPC_STL r5,_CTR(r1)
mflr r5
- std r5,_LINK(r1)
+ PPC_STL r5,_LINK(r1)
mfspr r5,SPRN_XER
- std r5,_XER(r1)
+ PPC_STL r5,_XER(r1)
mfcr r5
- std r5,_CCR(r1)
+ PPC_STL r5,_CCR(r1)
+#ifdef CONFIG_PPC64
lbz r5,PACAIRQSOFTMASK(r13)
std r5,SOFTE(r1)
+#endif
/*
* We may get here from a module, so load the kernel TOC in r2.
* The original TOC gets restored when pt_regs is restored
* further below.
*/
- ld r2,PACATOC(r13)
+#ifdef CONFIG_PPC64
+ LOAD_PACA_TOC()
+#endif
.global optprobe_template_op_address
optprobe_template_op_address:
@@ -70,11 +82,8 @@ optprobe_template_op_address:
* Parameters to optimized_callback():
* 1. optimized_kprobe structure in r3
*/
- nop
- nop
- nop
- nop
- nop
+ TEMPLATE_FOR_IMM_LOAD_INSNS
+
/* 2. pt_regs pointer in r4 */
addi r4,r1,STACK_FRAME_OVERHEAD
@@ -92,8 +101,7 @@ optprobe_template_call_handler:
.global optprobe_template_insn
optprobe_template_insn:
/* 2, Pass instruction to be emulated in r4 */
- nop
- nop
+ TEMPLATE_FOR_IMM_LOAD_INSNS
.global optprobe_template_call_emulate
optprobe_template_call_emulate:
@@ -104,20 +112,18 @@ optprobe_template_call_emulate:
* All done.
* Now, restore the registers...
*/
- ld r5,_MSR(r1)
+ PPC_LL r5,_MSR(r1)
mtmsr r5
- ld r5,_CTR(r1)
+ PPC_LL r5,_CTR(r1)
mtctr r5
- ld r5,_LINK(r1)
+ PPC_LL r5,_LINK(r1)
mtlr r5
- ld r5,_XER(r1)
+ PPC_LL r5,_XER(r1)
mtxer r5
- ld r5,_CCR(r1)
+ PPC_LL r5,_CCR(r1)
mtcr r5
REST_GPR(0,r1)
- REST_10GPRS(2,r1)
- REST_10GPRS(12,r1)
- REST_10GPRS(22,r1)
+ REST_30GPRS(r1)
/* Restore the previous SP */
addi r1,r1,INT_FRAME_SIZE
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 949eceb254d8..be8db402e963 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -8,11 +8,11 @@
#include <linux/memblock.h>
#include <linux/sched/task.h>
#include <linux/numa.h>
+#include <linux/pgtable.h>
#include <asm/lppaca.h>
#include <asm/paca.h>
#include <asm/sections.h>
-#include <asm/pgtable.h>
#include <asm/kexec.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>
@@ -56,8 +56,8 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align,
#define LPPACA_SIZE 0x400
-static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align,
- unsigned long limit, int cpu)
+static void *__init alloc_shared_lppaca(unsigned long size, unsigned long limit,
+ int cpu)
{
size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE);
static unsigned long shared_lppaca_size;
@@ -67,6 +67,13 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align,
if (!shared_lppaca) {
memblock_set_bottom_up(true);
+ /*
+ * See Documentation/powerpc/ultravisor.rst for more details.
+ *
+ * UV/HV data sharing is in PAGE_SIZE granularity. In order to
+ * minimize the number of pages shared, align the allocation to
+ * PAGE_SIZE.
+ */
shared_lppaca =
memblock_alloc_try_nid(shared_lppaca_total_size,
PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
@@ -86,7 +93,7 @@ static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align,
* This is very early in boot, so no harm done if the kernel crashes at
* this point.
*/
- BUG_ON(shared_lppaca_size >= shared_lppaca_total_size);
+ BUG_ON(shared_lppaca_size > shared_lppaca_total_size);
return ptr;
}
@@ -121,7 +128,7 @@ static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
return NULL;
if (is_secure_guest())
- lp = alloc_shared_lppaca(LPPACA_SIZE, 0x400, limit, cpu);
+ lp = alloc_shared_lppaca(LPPACA_SIZE, limit, cpu);
else
lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
@@ -131,8 +138,7 @@ static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
}
#endif /* CONFIG_PPC_PSERIES */
-#ifdef CONFIG_PPC_BOOK3S_64
-
+#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* 3 persistent SLBs are allocated here. The buffer will be zero
* initially, hence will all be invaild until we actually write them.
@@ -161,8 +167,7 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
return s;
}
-
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
/* The Paca is an array with one entry per processor. Each contains an
* lppaca, which contains the information shared between the
@@ -181,7 +186,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
#ifdef CONFIG_PPC_PSERIES
new_paca->lppaca_ptr = NULL;
#endif
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
new_paca->kernel_pgd = swapper_pg_dir;
#endif
new_paca->lock_token = 0x8000;
@@ -194,11 +199,11 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->kexec_state = KEXEC_STATE_NONE;
new_paca->__current = &init_task;
new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
new_paca->slb_shadow_ptr = NULL;
#endif
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* For now -- if we have threads this will be adjusted later */
new_paca->tcd_ptr = &new_paca->tcd;
#endif
@@ -210,15 +215,19 @@ void setup_paca(struct paca_struct *new_paca)
/* Setup r13 */
local_paca = new_paca;
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* On Book3E, initialize the TLB miss exception frames */
mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
#else
- /* In HV mode, we setup both HPACA and PACA to avoid problems
+ /*
+ * In HV mode, we setup both HPACA and PACA to avoid problems
* if we do a GET_PACA() before the feature fixups have been
- * applied
+ * applied.
+ *
+ * Normally you should test against CPU_FTR_HVMODE, but CPU features
+ * are not yet set up when we first reach here.
*/
- if (early_cpu_has_feature(CPU_FTR_HVMODE))
+ if (mfmsr() & MSR_HV)
mtspr(SPRN_SPRG_HPACA, local_paca);
#endif
mtspr(SPRN_SPRG_PACA, local_paca);
@@ -267,7 +276,7 @@ void __init allocate_paca(int cpu)
#ifdef CONFIG_PPC_PSERIES
paca->lppaca_ptr = new_lppaca(cpu, limit);
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
#endif
paca_struct_size += sizeof(struct paca_struct);
@@ -279,17 +288,17 @@ void __init free_unused_pacas(void)
new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
if (new_ptrs_size < paca_ptrs_size)
- memblock_free(__pa(paca_ptrs) + new_ptrs_size,
- paca_ptrs_size - new_ptrs_size);
+ memblock_phys_free(__pa(paca_ptrs) + new_ptrs_size,
+ paca_ptrs_size - new_ptrs_size);
paca_nr_cpu_ids = nr_cpu_ids;
paca_ptrs_size = new_ptrs_size;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (early_radix_enabled()) {
/* Ugly fixup, see new_slb_shadow() */
- memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
- sizeof(struct slb_shadow));
+ memblock_phys_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
+ sizeof(struct slb_shadow));
paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
}
#endif
@@ -298,24 +307,15 @@ void __init free_unused_pacas(void)
paca_ptrs_size + paca_struct_size, nr_cpu_ids);
}
+#ifdef CONFIG_PPC_64S_HASH_MMU
void copy_mm_to_paca(struct mm_struct *mm)
{
-#ifdef CONFIG_PPC_BOOK3S
mm_context_t *context = &mm->context;
- get_paca()->mm_ctx_id = context->id;
-#ifdef CONFIG_PPC_MM_SLICES
VM_BUG_ON(!mm_ctx_slb_addr_limit(context));
- get_paca()->mm_ctx_slb_addr_limit = mm_ctx_slb_addr_limit(context);
memcpy(&get_paca()->mm_ctx_low_slices_psize, mm_ctx_low_slices(context),
LOW_SLICE_ARRAY_SZ);
memcpy(&get_paca()->mm_ctx_high_slices_psize, mm_ctx_high_slices(context),
TASK_SLICE_ARRAY_SZ(context));
-#else /* CONFIG_PPC_MM_SLICES */
- get_paca()->mm_ctx_user_psize = context->user_psize;
- get_paca()->mm_ctx_sllp = context->sllp;
-#endif
-#else /* !CONFIG_PPC_BOOK3S */
- return;
-#endif
}
+#endif /* CONFIG_PPC_64S_HASH_MMU */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index c6c03416a151..d67cf79bf5d0 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -29,19 +29,21 @@
#include <linux/slab.h>
#include <linux/vgaarb.h>
#include <linux/numa.h>
+#include <linux/msi.h>
+#include <linux/irqdomain.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/machdep.h>
#include <asm/ppc-pci.h>
#include <asm/eeh.h>
+#include <asm/setup.h>
#include "../../../drivers/pci/pci.h"
-/* hose_spinlock protects accesses to the the phb_bitmap. */
+/* hose_spinlock protects accesses to the phb_bitmap. */
static DEFINE_SPINLOCK(hose_spinlock);
LIST_HEAD(hose_list);
@@ -61,28 +63,40 @@ EXPORT_SYMBOL(isa_mem_base);
static const struct dma_map_ops *pci_dma_ops;
-void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
+void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops)
{
pci_dma_ops = dma_ops;
}
-/*
- * This function should run under locking protection, specifically
- * hose_spinlock.
- */
static int get_phb_number(struct device_node *dn)
{
int ret, phb_id = -1;
- u32 prop_32;
u64 prop;
/*
* Try fixed PHB numbering first, by checking archs and reading
- * the respective device-tree properties. Firstly, try powernv by
- * reading "ibm,opal-phbid", only present in OPAL environment.
+ * the respective device-tree properties. Firstly, try reading
+ * standard "linux,pci-domain", then try reading "ibm,opal-phbid"
+ * (only present in powernv OPAL environment), then try device-tree
+ * alias and as the last try to use lower bits of "reg" property.
*/
- ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+ ret = of_get_pci_domain_nr(dn);
+ if (ret >= 0) {
+ prop = ret;
+ ret = 0;
+ }
+ if (ret)
+ ret = of_property_read_u64(dn, "ibm,opal-phbid", &prop);
+
if (ret) {
+ ret = of_alias_get_id(dn, "pci");
+ if (ret >= 0) {
+ prop = ret;
+ ret = 0;
+ }
+ }
+ if (ret) {
+ u32 prop_32;
ret = of_property_read_u32_index(dn, "reg", 1, &prop_32);
prop = prop_32;
}
@@ -90,18 +104,20 @@ static int get_phb_number(struct device_node *dn)
if (!ret)
phb_id = (int)(prop & (MAX_PHBS - 1));
+ spin_lock(&hose_spinlock);
+
/* We need to be sure to not use the same PHB number twice. */
if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
- return phb_id;
+ goto out_unlock;
- /*
- * If not pseries nor powernv, or if fixed PHB numbering tried to add
- * the same PHB number twice, then fallback to dynamic PHB numbering.
- */
+ /* If everything fails then fallback to dynamic PHB numbering. */
phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
BUG_ON(phb_id >= MAX_PHBS);
set_bit(phb_id, phb_bitmap);
+out_unlock:
+ spin_unlock(&hose_spinlock);
+
return phb_id;
}
@@ -112,11 +128,14 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
if (phb == NULL)
return NULL;
- spin_lock(&hose_spinlock);
+
phb->global_number = get_phb_number(dev);
+
+ spin_lock(&hose_spinlock);
list_add_tail(&phb->list_node, &hose_list);
spin_unlock(&hose_spinlock);
- phb->dn = dev;
+
+ phb->dn = of_node_get(dev);
phb->is_dynamic = slab_is_available();
#ifdef CONFIG_PPC64
if (dev) {
@@ -139,7 +158,7 @@ void pcibios_free_controller(struct pci_controller *phb)
/* Clear bit of phb_bitmap to allow reuse of this PHB number. */
if (phb->global_number < MAX_PHBS)
clear_bit(phb->global_number, phb_bitmap);
-
+ of_node_put(phb->dn);
list_del(&phb->list_node);
spin_unlock(&hose_spinlock);
@@ -353,6 +372,55 @@ struct pci_controller *pci_find_controller_for_domain(int domain_nr)
return NULL;
}
+struct pci_intx_virq {
+ int virq;
+ struct kref kref;
+ struct list_head list_node;
+};
+
+static LIST_HEAD(intx_list);
+static DEFINE_MUTEX(intx_mutex);
+
+static void ppc_pci_intx_release(struct kref *kref)
+{
+ struct pci_intx_virq *vi = container_of(kref, struct pci_intx_virq, kref);
+
+ list_del(&vi->list_node);
+ irq_dispose_mapping(vi->virq);
+ kfree(vi);
+}
+
+static int ppc_pci_unmap_irq_line(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(data);
+
+ if (action == BUS_NOTIFY_DEL_DEVICE) {
+ struct pci_intx_virq *vi;
+
+ mutex_lock(&intx_mutex);
+ list_for_each_entry(vi, &intx_list, list_node) {
+ if (vi->virq == pdev->irq) {
+ kref_put(&vi->kref, ppc_pci_intx_release);
+ break;
+ }
+ }
+ mutex_unlock(&intx_mutex);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_pci_unmap_irq_notifier = {
+ .notifier_call = ppc_pci_unmap_irq_line,
+};
+
+static int ppc_pci_register_irq_notifier(void)
+{
+ return bus_register_notifier(&pci_bus_type, &ppc_pci_unmap_irq_notifier);
+}
+arch_initcall(ppc_pci_register_irq_notifier);
+
/*
* Reads the interrupt pin to determine if interrupt is use by card.
* If the interrupt is used, then gets the interrupt line from the
@@ -361,6 +429,12 @@ struct pci_controller *pci_find_controller_for_domain(int domain_nr)
static int pci_read_irq_line(struct pci_dev *pci_dev)
{
int virq;
+ struct pci_intx_virq *vi, *vitmp;
+
+ /* Preallocate vi as rewind is complex if this fails after mapping */
+ vi = kzalloc(sizeof(struct pci_intx_virq), GFP_KERNEL);
+ if (!vi)
+ return -1;
pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
@@ -377,12 +451,12 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
* function.
*/
if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
- return -1;
+ goto error_exit;
if (pin == 0)
- return -1;
+ goto error_exit;
if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
line == 0xff || line == 0) {
- return -1;
+ goto error_exit;
}
pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
line, pin);
@@ -394,14 +468,33 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
if (!virq) {
pr_debug(" Failed to map !\n");
- return -1;
+ goto error_exit;
}
pr_debug(" Mapped to linux irq %d\n", virq);
pci_dev->irq = virq;
+ mutex_lock(&intx_mutex);
+ list_for_each_entry(vitmp, &intx_list, list_node) {
+ if (vitmp->virq == virq) {
+ kref_get(&vitmp->kref);
+ kfree(vi);
+ vi = NULL;
+ break;
+ }
+ }
+ if (vi) {
+ vi->virq = virq;
+ kref_init(&vi->kref);
+ list_add_tail(&vi->list_node, &intx_list);
+ }
+ mutex_unlock(&intx_mutex);
+
return 0;
+error_exit:
+ kfree(vi);
+ return -1;
}
/*
@@ -728,7 +821,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
range.cpu_addr, range.cpu_addr + range.size - 1,
range.pci_addr,
- (range.pci_space & 0x40000000) ?
+ (range.flags & IORESOURCE_PREFETCH) ?
"Prefetch" : "");
/* We support only 3 memory ranges */
@@ -984,13 +1077,18 @@ void pcibios_bus_add_device(struct pci_dev *dev)
ppc_md.pcibios_bus_add_device(dev);
}
-int pcibios_add_device(struct pci_dev *dev)
+int pcibios_device_add(struct pci_dev *dev)
{
+ struct irq_domain *d;
+
#ifdef CONFIG_PCI_IOV
if (ppc_md.pcibios_fixup_sriov)
ppc_md.pcibios_fixup_sriov(dev);
#endif /* CONFIG_PCI_IOV */
+ d = dev_get_msi_domain(&dev->bus->dev);
+ if (d)
+ dev_set_msi_domain(&dev->dev, d);
return 0;
}
@@ -1007,7 +1105,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
*/
pci_read_bridge_bases(bus);
- /* Now fixup the bus bus */
+ /* Now fixup the bus */
pcibios_setup_bus_self(bus);
}
EXPORT_SYMBOL(pcibios_fixup_bus);
@@ -1399,14 +1497,8 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
pci_assign_unassigned_bus_resources(bus);
}
- /* Fixup EEH */
- eeh_add_device_tree_late(bus);
-
/* Add new devices to global lists. Register in proc, sysfs. */
pci_bus_add_devices(bus);
-
- /* sysfs files should only be added after devices are added */
- eeh_add_sysfs_files(bus);
}
EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
@@ -1614,7 +1706,7 @@ EXPORT_SYMBOL_GPL(pcibios_scan_phb);
static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
{
int i, class = dev->class >> 8;
- /* When configured as agent, programing interface = 1 */
+ /* When configured as agent, programming interface = 1 */
int prog_if = dev->class & 0xf;
if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
@@ -1631,3 +1723,13 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
+
+
+static int __init discover_phbs(void)
+{
+ if (ppc_md.discover_phbs)
+ ppc_md.discover_phbs();
+
+ return 0;
+}
+core_initcall(discover_phbs);
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index d6a67f814983..0fe251c6ac2c 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/of.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
@@ -57,8 +58,6 @@ void pcibios_release_device(struct pci_dev *dev)
struct pci_controller *phb = pci_bus_to_host(dev->bus);
struct pci_dn *pdn = pci_get_pdn(dev);
- eeh_remove_device(dev);
-
if (phb->controller_ops.release_device)
phb->controller_ops.release_device(dev);
@@ -112,8 +111,6 @@ void pci_hp_add_devices(struct pci_bus *bus)
struct pci_controller *phb;
struct device_node *dn = pci_bus_to_OF_node(bus);
- eeh_add_device_tree_early(PCI_DN(dn));
-
phb = pci_bus_to_host(bus);
mode = PCI_PROBE_NORMAL;
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index b49e1060a3bf..855b59892c5c 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -21,7 +21,6 @@
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
@@ -37,18 +36,13 @@ int pcibios_assign_bus_offset = 1;
EXPORT_SYMBOL(isa_io_base);
EXPORT_SYMBOL(pci_dram_offset);
-void pcibios_make_OF_bus_map(void);
-
static void fixup_cpc710_pci64(struct pci_dev* dev);
-static u8* pci_to_OF_bus_map;
/* By default, we don't re-assign bus numbers. We do this only on
* some pmacs
*/
static int pci_assign_all_buses;
-static int pci_bus_count;
-
/* This will remain NULL for now, until isa-bridge.c is made common
* to both 32-bit and 64-bit.
*/
@@ -68,6 +62,11 @@ fixup_cpc710_pci64(struct pci_dev* dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CPC710_PCI64, fixup_cpc710_pci64);
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_CHRP)
+
+static u8* pci_to_OF_bus_map;
+static int pci_bus_count;
+
/*
* Functions below are used on OpenFirmware machines.
*/
@@ -109,7 +108,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
}
}
-void
+static void __init
pcibios_make_OF_bus_map(void)
{
int i;
@@ -155,6 +154,7 @@ pcibios_make_OF_bus_map(void)
}
+#ifdef CONFIG_PPC_PMAC
/*
* Returns the PCI device matching a given OF node
*/
@@ -194,7 +194,9 @@ int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
return -ENODEV;
}
EXPORT_SYMBOL(pci_device_from_OF_node);
+#endif
+#ifdef CONFIG_PPC_CHRP
/* We create the "pci-OF-bus-map" property now so it appears in the
* /proc device tree
*/
@@ -219,6 +221,9 @@ pci_create_OF_bus_map(void)
of_node_put(dn);
}
}
+#endif
+
+#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_CHRP) */
void pcibios_setup_phb_io_space(struct pci_controller *hose)
{
@@ -234,23 +239,40 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose)
static int __init pcibios_init(void)
{
struct pci_controller *hose, *tmp;
+#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
int next_busno = 0;
+#endif
printk(KERN_INFO "PCI: Probing PCI hardware\n");
+#ifdef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
+ /*
+ * Enable PCI domains in /proc when PCI bus numbers are not unique
+ * across all PCI domains to prevent conflicts. And keep PCI domain 0
+ * backward compatible in /proc for video cards.
+ */
+ pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
+#endif
+
if (pci_has_flag(PCI_REASSIGN_ALL_BUS))
pci_assign_all_buses = 1;
/* Scan all of the recorded PCI controllers. */
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
if (pci_assign_all_buses)
hose->first_busno = next_busno;
+#endif
hose->last_busno = 0xff;
pcibios_scan_phb(hose);
pci_bus_add_devices(hose->bus);
+#ifndef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
if (pci_assign_all_buses || next_busno <= hose->last_busno)
next_busno = hose->last_busno + pcibios_assign_bus_offset;
+#endif
}
+
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_CHRP)
pci_bus_count = next_busno;
/* OpenFirmware based machines need a map of OF bus
@@ -259,6 +281,7 @@ static int __init pcibios_init(void)
*/
if (pci_assign_all_buses)
pcibios_make_OF_bus_map();
+#endif
/* Call common code to handle resource allocation */
pcibios_resource_survey();
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index f83d1f69b1dd..0c7cfb9fab04 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -19,10 +19,10 @@
#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/vmalloc.h>
+#include <linux/of.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/byteorder.h>
#include <asm/machdep.h>
@@ -100,7 +100,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
pci_name(bus->self));
#ifdef CONFIG_PPC_BOOK3S_64
- __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
+ __flush_hash_table_range(res->start + _IO_BASE,
res->end + _IO_BASE + 1);
#endif
return 0;
@@ -109,29 +109,53 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
/* Get the host bridge */
hose = pci_bus_to_host(bus);
- /* Check if we have IOs allocated */
- if (hose->io_base_alloc == NULL)
- return 0;
-
pr_debug("IO unmapping for PHB %pOF\n", hose->dn);
pr_debug(" alloc=0x%p\n", hose->io_base_alloc);
- /* This is a PHB, we fully unmap the IO area */
- vunmap(hose->io_base_alloc);
-
+ iounmap(hose->io_base_alloc);
return 0;
}
EXPORT_SYMBOL_GPL(pcibios_unmap_io_space);
-static int pcibios_map_phb_io_space(struct pci_controller *hose)
+void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size)
{
struct vm_struct *area;
+ unsigned long addr;
+
+ WARN_ON_ONCE(paddr & ~PAGE_MASK);
+ WARN_ON_ONCE(size & ~PAGE_MASK);
+
+ /*
+ * Let's allocate some IO space for that guy. We don't pass VM_IOREMAP
+ * because we don't care about alignment tricks that the core does in
+ * that case. Maybe we should due to stupid card with incomplete
+ * address decoding but I'd rather not deal with those outside of the
+ * reserved 64K legacy region.
+ */
+ area = __get_vm_area_caller(size, 0, PHB_IO_BASE, PHB_IO_END,
+ __builtin_return_address(0));
+ if (!area)
+ return NULL;
+
+ addr = (unsigned long)area->addr;
+ if (ioremap_page_range(addr, addr + size, paddr,
+ pgprot_noncached(PAGE_KERNEL))) {
+ vunmap_range(addr, addr + size);
+ return NULL;
+ }
+
+ return (void __iomem *)addr;
+}
+EXPORT_SYMBOL_GPL(ioremap_phb);
+
+static int pcibios_map_phb_io_space(struct pci_controller *hose)
+{
unsigned long phys_page;
unsigned long size_page;
unsigned long io_virt_offset;
- phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
- size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE);
+ phys_page = ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE);
+ size_page = ALIGN(hose->pci_io_size, PAGE_SIZE);
/* Make sure IO area address is clear */
hose->io_base_alloc = NULL;
@@ -146,12 +170,11 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
* with incomplete address decoding but I'd rather not deal with
* those outside of the reserved 64K legacy region.
*/
- area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END);
- if (area == NULL)
+ hose->io_base_alloc = ioremap_phb(phys_page, size_page);
+ if (!hose->io_base_alloc)
return -ENOMEM;
- hose->io_base_alloc = area->addr;
- hose->io_base_virt = (void __iomem *)(area->addr +
- hose->io_base_phys - phys_page);
+ hose->io_base_virt = hose->io_base_alloc +
+ hose->io_base_phys - phys_page;
pr_debug("IO mapping for PHB %pOF\n", hose->dn);
pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
@@ -159,11 +182,6 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
pr_debug(" size=0x%016llx (alloc=0x%016lx)\n",
hose->pci_io_size, size_page);
- /* Establish the mapping */
- if (__ioremap_at(phys_page, area->addr, size_page,
- pgprot_noncached(PAGE_KERNEL)) == NULL)
- return -ENOMEM;
-
/* Fixup hose IO resource */
io_virt_offset = pcibios_io_space_offset(hose);
hose->io_resource.start += io_virt_offset;
@@ -267,3 +285,14 @@ int pcibus_to_node(struct pci_bus *bus)
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
+
+#ifdef CONFIG_PPC_PMAC
+int pci_device_from_OF_node(struct device_node *np, u8 *bus, u8 *devfn)
+{
+ if (!PCI_DN(np))
+ return -ENODEV;
+ *bus = PCI_DN(np)->busno;
+ *devfn = PCI_DN(np)->devfn;
+ return 0;
+}
+#endif
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 4e654df55969..38561d6a2079 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -12,9 +12,9 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/gfp.h>
+#include <linux/of.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
@@ -124,9 +124,28 @@ struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
return NULL;
}
+#ifdef CONFIG_EEH
+static struct eeh_dev *eeh_dev_init(struct pci_dn *pdn)
+{
+ struct eeh_dev *edev;
+
+ /* Allocate EEH device */
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return NULL;
+
+ /* Associate EEH device with OF node */
+ pdn->edev = edev;
+ edev->pdn = pdn;
+ edev->bdfn = (pdn->busno << 8) | pdn->devfn;
+ edev->controller = pdn->phb;
+
+ return edev;
+}
+#endif /* CONFIG_EEH */
+
#ifdef CONFIG_PCI_IOV
static struct pci_dn *add_one_sriov_vf_pdn(struct pci_dn *parent,
- int vf_index,
int busno, int devfn)
{
struct pci_dn *pdn;
@@ -143,7 +162,6 @@ static struct pci_dn *add_one_sriov_vf_pdn(struct pci_dn *parent,
pdn->parent = parent;
pdn->busno = busno;
pdn->devfn = devfn;
- pdn->vf_index = vf_index;
pdn->pe_number = IODA_INVALID_PE;
INIT_LIST_HEAD(&pdn->child_list);
INIT_LIST_HEAD(&pdn->list);
@@ -174,7 +192,7 @@ struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev)
for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
struct eeh_dev *edev __maybe_unused;
- pdn = add_one_sriov_vf_pdn(parent, i,
+ pdn = add_one_sriov_vf_pdn(parent,
pci_iov_virtfn_bus(pdev, i),
pci_iov_virtfn_devfn(pdev, i));
if (!pdn) {
@@ -187,7 +205,10 @@ struct pci_dn *add_sriov_vf_pdns(struct pci_dev *pdev)
/* Create the EEH device for the VF */
edev = eeh_dev_init(pdn);
BUG_ON(!edev);
+
+ /* FIXME: these should probably be populated by the EEH probe */
edev->physfn = pdev;
+ edev->vf_index = i;
#endif /* CONFIG_EEH */
}
return pci_get_pdn(pdev);
@@ -238,11 +259,11 @@ void remove_sriov_vf_pdns(struct pci_dev *pdev)
if (edev) {
/*
* We allocate pci_dn's for the totalvfs count,
- * but only only the vfs that were activated
+ * but only the vfs that were activated
* have a configured PE.
*/
if (edev->pe)
- eeh_rmv_from_parent_pe(edev);
+ eeh_pe_tree_remove(edev);
pdn->edev = NULL;
kfree(edev);
@@ -309,6 +330,7 @@ struct pci_dn *pci_add_device_node_info(struct pci_controller *hose,
INIT_LIST_HEAD(&pdn->list);
parent = of_get_parent(dn);
pdn->parent = parent ? PCI_DN(parent) : NULL;
+ of_node_put(parent);
if (pdn->parent)
list_add_tail(&pdn->list, &pdn->parent->child_list);
@@ -422,46 +444,6 @@ void *pci_traverse_device_nodes(struct device_node *start,
}
EXPORT_SYMBOL_GPL(pci_traverse_device_nodes);
-static struct pci_dn *pci_dn_next_one(struct pci_dn *root,
- struct pci_dn *pdn)
-{
- struct list_head *next = pdn->child_list.next;
-
- if (next != &pdn->child_list)
- return list_entry(next, struct pci_dn, list);
-
- while (1) {
- if (pdn == root)
- return NULL;
-
- next = pdn->list.next;
- if (next != &pdn->parent->child_list)
- break;
-
- pdn = pdn->parent;
- }
-
- return list_entry(next, struct pci_dn, list);
-}
-
-void *traverse_pci_dn(struct pci_dn *root,
- void *(*fn)(struct pci_dn *, void *),
- void *data)
-{
- struct pci_dn *pdn = root;
- void *ret;
-
- /* Only scan the child nodes */
- for (pdn = pci_dn_next_one(root, pdn); pdn;
- pdn = pci_dn_next_one(root, pdn)) {
- ret = fn(pdn, data);
- if (ret)
- return ret;
- }
-
- return NULL;
-}
-
static void *add_pdn(struct device_node *dn, void *data)
{
struct pci_controller *hose = data;
@@ -500,28 +482,6 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb)
pci_traverse_device_nodes(dn, add_pdn, phb);
}
-/**
- * pci_devs_phb_init - Initialize phbs and pci devs under them.
- *
- * This routine walks over all phb's (pci-host bridges) on the
- * system, and sets up assorted pci-related structures
- * (including pci info in the device node structs) for each
- * pci device found underneath. This routine runs once,
- * early in the boot sequence.
- */
-static int __init pci_devs_phb_init(void)
-{
- struct pci_controller *phb, *tmp;
-
- /* This must be done first so the device nodes have valid pci info! */
- list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
- pci_devs_phb_init_dynamic(phb);
-
- return 0;
-}
-
-core_initcall(pci_devs_phb_init);
-
static void pci_dev_pdn_setup(struct pci_dev *pdev)
{
struct pci_dn *pdn;
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index c3024f104765..756043dd06e9 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -13,8 +13,8 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/of.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
/**
* get_int_prop - Decode a u32 from a device tree property
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(of_create_pci_dev);
* @dev: pci_dev structure for the bridge
*
* of_scan_bus() calls this routine for each PCI bridge that it finds, and
- * this routine in turn call of_scan_bus() recusively to scan for more child
+ * this routine in turn call of_scan_bus() recursively to scan for more child
* devices.
*/
void of_scan_pci_bridge(struct pci_dev *dev)
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S
index f3bd0bbf2ae8..2d4d21bb46a9 100644
--- a/arch/powerpc/kernel/ppc_save_regs.S
+++ b/arch/powerpc/kernel/ppc_save_regs.S
@@ -55,14 +55,17 @@ _GLOBAL(ppc_save_regs)
PPC_STL r29,29*SZL(r3)
PPC_STL r30,30*SZL(r3)
PPC_STL r31,31*SZL(r3)
+ lbz r0,PACAIRQSOFTMASK(r13)
+ PPC_STL r0,SOFTE-STACK_FRAME_OVERHEAD(r3)
#endif
/* go up one stack frame for SP */
PPC_LL r4,0(r1)
PPC_STL r4,1*SZL(r3)
/* get caller's LR */
PPC_LL r0,LRSAVE(r4)
- PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
+ mflr r0
+ PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
mfmsr r0
PPC_STL r0,_MSR-STACK_FRAME_OVERHEAD(r3)
mfctr r0
@@ -73,4 +76,5 @@ _GLOBAL(ppc_save_regs)
PPC_STL r0,_CCR-STACK_FRAME_OVERHEAD(r3)
li r0,0
PPC_STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,ORIG_GPR3-STACK_FRAME_OVERHEAD(r3)
blr
diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c
index 877817471e3c..b109cd7b5d01 100644
--- a/arch/powerpc/kernel/proc_powerpc.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -7,12 +7,12 @@
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/vdso_datapage.h>
#include <asm/rtas.h>
#include <linux/uaccess.h>
-#include <asm/prom.h>
#ifdef CONFIG_PPC64
@@ -25,7 +25,7 @@ static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes
loff_t *ppos)
{
return simple_read_from_buffer(buf, nbytes, ppos,
- PDE_DATA(file_inode(file)), PAGE_SIZE);
+ pde_data(file_inode(file)), PAGE_SIZE);
}
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
@@ -34,7 +34,7 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
return -EINVAL;
remap_pfn_range(vma, vma->vm_start,
- __pa(PDE_DATA(file_inode(file))) >> PAGE_SHIFT,
+ __pa(pde_data(file_inode(file))) >> PAGE_SHIFT,
PAGE_SIZE, vma->vm_page_prot);
return 0;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index fad50db9dcf2..67da147fe34d 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -34,18 +34,15 @@
#include <linux/ftrace.h>
#include <linux/kernel_stat.h>
#include <linux/personality.h>
-#include <linux/random.h>
#include <linux/hw_breakpoint.h>
#include <linux/uaccess.h>
-#include <linux/elf-randomize.h>
#include <linux/pkeys.h>
#include <linux/seq_buf.h>
-#include <asm/pgtable.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/mmu.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/runlatch.h>
@@ -96,7 +93,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
if (tsk == current && tsk->thread.regs &&
MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
!test_thread_flag(TIF_RESTORE_TM)) {
- tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
+ regs_set_return_msr(&tsk->thread.ckpt_regs,
+ tsk->thread.regs->msr);
set_thread_flag(TIF_RESTORE_TM);
}
}
@@ -125,13 +123,11 @@ unsigned long notrace msr_check_and_set(unsigned long bits)
newmsr = oldmsr | bits;
-#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
newmsr |= MSR_VSX;
-#endif
if (oldmsr != newmsr)
- mtmsr_isync(newmsr);
+ newmsr = mtmsr_isync_irqsafe(newmsr);
return newmsr;
}
@@ -145,13 +141,11 @@ void notrace __msr_check_and_clear(unsigned long bits)
newmsr = oldmsr & ~bits;
-#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
newmsr &= ~MSR_VSX;
-#endif
if (oldmsr != newmsr)
- mtmsr_isync(newmsr);
+ mtmsr_isync_irqsafe(newmsr);
}
EXPORT_SYMBOL(__msr_check_and_clear);
@@ -163,11 +157,9 @@ static void __giveup_fpu(struct task_struct *tsk)
save_fpu(tsk);
msr = tsk->thread.regs->msr;
msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
-#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
-#endif
- tsk->thread.regs->msr = msr;
+ regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_fpu(struct task_struct *tsk)
@@ -236,23 +228,11 @@ void enable_kernel_fp(void)
}
}
EXPORT_SYMBOL(enable_kernel_fp);
-
-static int restore_fp(struct task_struct *tsk)
-{
- if (tsk->thread.load_fp) {
- load_fp_state(&current->thread.fp_state);
- current->thread.load_fp++;
- return 1;
- }
- return 0;
-}
#else
-static int restore_fp(struct task_struct *tsk) { return 0; }
+static inline void __giveup_fpu(struct task_struct *tsk) { }
#endif /* CONFIG_PPC_FPU */
#ifdef CONFIG_ALTIVEC
-#define loadvec(thr) ((thr).load_vec)
-
static void __giveup_altivec(struct task_struct *tsk)
{
unsigned long msr;
@@ -260,11 +240,9 @@ static void __giveup_altivec(struct task_struct *tsk)
save_altivec(tsk);
msr = tsk->thread.regs->msr;
msr &= ~MSR_VEC;
-#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
msr &= ~MSR_VSX;
-#endif
- tsk->thread.regs->msr = msr;
+ regs_set_return_msr(tsk->thread.regs, msr);
}
void giveup_altivec(struct task_struct *tsk)
@@ -318,21 +296,6 @@ void flush_altivec_to_thread(struct task_struct *tsk)
}
}
EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
-
-static int restore_altivec(struct task_struct *tsk)
-{
- if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
- load_vr_state(&tsk->thread.vr_state);
- tsk->thread.used_vr = 1;
- tsk->thread.load_vec++;
-
- return 1;
- }
- return 0;
-}
-#else
-#define loadvec(thr) 0
-static inline int restore_altivec(struct task_struct *tsk) { return 0; }
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
@@ -341,7 +304,7 @@ static void __giveup_vsx(struct task_struct *tsk)
unsigned long msr = tsk->thread.regs->msr;
/*
- * We should never be ssetting MSR_VSX without also setting
+ * We should never be setting MSR_VSX without also setting
* MSR_FP and MSR_VEC
*/
WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
@@ -400,18 +363,6 @@ void flush_vsx_to_thread(struct task_struct *tsk)
}
}
EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
-
-static int restore_vsx(struct task_struct *tsk)
-{
- if (cpu_has_feature(CPU_FTR_VSX)) {
- tsk->thread.used_vsr = 1;
- return 1;
- }
-
- return 0;
-}
-#else
-static inline int restore_vsx(struct task_struct *tsk) { return 0; }
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
@@ -456,21 +407,14 @@ static unsigned long msr_all_available;
static int __init init_msr_all_available(void)
{
-#ifdef CONFIG_PPC_FPU
- msr_all_available |= MSR_FP;
-#endif
-#ifdef CONFIG_ALTIVEC
+ if (IS_ENABLED(CONFIG_PPC_FPU))
+ msr_all_available |= MSR_FP;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
msr_all_available |= MSR_VEC;
-#endif
-#ifdef CONFIG_VSX
if (cpu_has_feature(CPU_FTR_VSX))
msr_all_available |= MSR_VSX;
-#endif
-#ifdef CONFIG_SPE
if (cpu_has_feature(CPU_FTR_SPE))
msr_all_available |= MSR_SPE;
-#endif
return 0;
}
@@ -494,23 +438,72 @@ void giveup_all(struct task_struct *tsk)
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
-#ifdef CONFIG_PPC_FPU
if (usermsr & MSR_FP)
__giveup_fpu(tsk);
-#endif
-#ifdef CONFIG_ALTIVEC
if (usermsr & MSR_VEC)
__giveup_altivec(tsk);
-#endif
-#ifdef CONFIG_SPE
if (usermsr & MSR_SPE)
__giveup_spe(tsk);
-#endif
msr_check_and_clear(msr_all_available);
}
EXPORT_SYMBOL(giveup_all);
+#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_FPU
+static bool should_restore_fp(void)
+{
+ if (current->thread.load_fp) {
+ current->thread.load_fp++;
+ return true;
+ }
+ return false;
+}
+
+static void do_restore_fp(void)
+{
+ load_fp_state(&current->thread.fp_state);
+}
+#else
+static bool should_restore_fp(void) { return false; }
+static void do_restore_fp(void) { }
+#endif /* CONFIG_PPC_FPU */
+
+#ifdef CONFIG_ALTIVEC
+static bool should_restore_altivec(void)
+{
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
+ current->thread.load_vec++;
+ return true;
+ }
+ return false;
+}
+
+static void do_restore_altivec(void)
+{
+ load_vr_state(&current->thread.vr_state);
+ current->thread.used_vr = 1;
+}
+#else
+static bool should_restore_altivec(void) { return false; }
+static void do_restore_altivec(void) { }
+#endif /* CONFIG_ALTIVEC */
+
+static bool should_restore_vsx(void)
+{
+ if (cpu_has_feature(CPU_FTR_VSX))
+ return true;
+ return false;
+}
+#ifdef CONFIG_VSX
+static void do_restore_vsx(void)
+{
+ current->thread.used_vsr = 1;
+}
+#else
+static void do_restore_vsx(void) { }
+#endif /* CONFIG_VSX */
+
/*
* The exception exit path calls restore_math() with interrupts hard disabled
* but the soft irq state not "reconciled". ftrace code that calls
@@ -524,33 +517,50 @@ EXPORT_SYMBOL(giveup_all);
void notrace restore_math(struct pt_regs *regs)
{
unsigned long msr;
-
- if (!MSR_TM_ACTIVE(regs->msr) &&
- !current->thread.load_fp && !loadvec(current->thread))
- return;
+ unsigned long new_msr = 0;
msr = regs->msr;
- msr_check_and_set(msr_all_available);
/*
- * Only reload if the bit is not set in the user MSR, the bit BEING set
- * indicates that the registers are hot
+ * new_msr tracks the facilities that are to be restored. Only reload
+ * if the bit is not set in the user MSR (if it is set, the registers
+ * are live for the user thread).
*/
- if ((!(msr & MSR_FP)) && restore_fp(current))
- msr |= MSR_FP | current->thread.fpexc_mode;
+ if ((!(msr & MSR_FP)) && should_restore_fp())
+ new_msr |= MSR_FP;
- if ((!(msr & MSR_VEC)) && restore_altivec(current))
- msr |= MSR_VEC;
+ if ((!(msr & MSR_VEC)) && should_restore_altivec())
+ new_msr |= MSR_VEC;
- if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
- restore_vsx(current)) {
- msr |= MSR_VSX;
+ if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
+ if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
+ new_msr |= MSR_VSX;
}
- msr_check_and_clear(msr_all_available);
+ if (new_msr) {
+ unsigned long fpexc_mode = 0;
+
+ msr_check_and_set(new_msr);
+
+ if (new_msr & MSR_FP) {
+ do_restore_fp();
+
+ // This also covers VSX, because VSX implies FP
+ fpexc_mode = current->thread.fpexc_mode;
+ }
+
+ if (new_msr & MSR_VEC)
+ do_restore_altivec();
- regs->msr = msr;
+ if (new_msr & MSR_VSX)
+ do_restore_vsx();
+
+ msr_check_and_clear(new_msr);
+
+ regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
+ }
}
+#endif /* CONFIG_PPC_BOOK3S_64 */
static void save_all(struct task_struct *tsk)
{
@@ -578,7 +588,6 @@ static void save_all(struct task_struct *tsk)
__giveup_spe(tsk);
msr_check_and_clear(msr_all_available);
- thread_pkey_regs_save(&tsk->thread);
}
void flush_all_to_thread(struct task_struct *tsk)
@@ -611,26 +620,70 @@ void do_send_trap(struct pt_regs *regs, unsigned long address,
(void __user *)address);
}
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
-void do_break (struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+
+static void do_break_handler(struct pt_regs *regs)
+{
+ struct arch_hw_breakpoint null_brk = {0};
+ struct arch_hw_breakpoint *info;
+ ppc_inst_t instr = ppc_inst(0);
+ int type = 0;
+ int size = 0;
+ unsigned long ea;
+ int i;
+
+ /*
+ * If underneath hw supports only one watchpoint, we know it
+ * caused exception. 8xx also falls into this category.
+ */
+ if (nr_wp_slots() == 1) {
+ __set_breakpoint(0, &null_brk);
+ current->thread.hw_brk[0] = null_brk;
+ current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
+ return;
+ }
+
+ /* Otherwise find out which DAWR caused exception and disable it. */
+ wp_get_instr_detail(regs, &instr, &type, &size, &ea);
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ info = &current->thread.hw_brk[i];
+ if (!info->address)
+ continue;
+
+ if (wp_check_constraints(regs, instr, ea, type, size, info)) {
+ __set_breakpoint(i, &null_brk);
+ current->thread.hw_brk[i] = null_brk;
+ current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
+ }
+ }
+}
+
+DEFINE_INTERRUPT_HANDLER(do_break)
{
current->thread.trap_nr = TRAP_HWBKPT;
- if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
+ if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
11, SIGSEGV) == NOTIFY_STOP)
return;
if (debugger_break_match(regs))
return;
- /* Clear the breakpoint */
- hw_breakpoint_disable();
+ /*
+ * We reach here only when watchpoint exception is generated by ptrace
+ * event (or hw is buggy!). Now if CONFIG_HAVE_HW_BREAKPOINT is set,
+ * watchpoint is already handled by hw_breakpoint_handler() so we don't
+ * have to do anything. But when CONFIG_HAVE_HW_BREAKPOINT is not set,
+ * we need to manually handle the watchpoint here.
+ */
+ if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
+ do_break_handler(regs);
/* Deliver the signal to userspace */
- force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
+ force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar);
}
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
+static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/*
@@ -704,48 +757,52 @@ void switch_booke_debug_regs(struct debug_reg *new_debug)
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
#ifndef CONFIG_HAVE_HW_BREAKPOINT
-static void set_breakpoint(struct arch_hw_breakpoint *brk)
+static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
{
preempt_disable();
- __set_breakpoint(brk);
+ __set_breakpoint(i, brk);
preempt_enable();
}
static void set_debug_reg_defaults(struct thread_struct *thread)
{
- thread->hw_brk.address = 0;
- thread->hw_brk.type = 0;
- thread->hw_brk.len = 0;
- thread->hw_brk.hw_len = 0;
- if (ppc_breakpoint_available())
- set_breakpoint(&thread->hw_brk);
-}
-#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
-#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
+ int i;
+ struct arch_hw_breakpoint null_brk = {0};
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
-{
- mtspr(SPRN_DAC1, dabr);
-#ifdef CONFIG_PPC_47x
- isync();
-#endif
- return 0;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ thread->hw_brk[i] = null_brk;
+ if (ppc_breakpoint_available())
+ set_breakpoint(i, &thread->hw_brk[i]);
+ }
}
-#elif defined(CONFIG_PPC_BOOK3S)
-static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
+
+static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
+ struct arch_hw_breakpoint *b)
{
- mtspr(SPRN_DABR, dabr);
- if (cpu_has_feature(CPU_FTR_DABRX))
- mtspr(SPRN_DABRX, dabrx);
- return 0;
+ if (a->address != b->address)
+ return false;
+ if (a->type != b->type)
+ return false;
+ if (a->len != b->len)
+ return false;
+ /* no need to check hw_len. it's calculated from address and len */
+ return true;
}
-#else
-static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
+
+static void switch_hw_breakpoint(struct task_struct *new)
{
- return -EINVAL;
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (likely(hw_brk_match(this_cpu_ptr(&current_brk[i]),
+ &new->thread.hw_brk[i])))
+ continue;
+
+ __set_breakpoint(i, &new->thread.hw_brk[i]);
+ }
}
-#endif
+#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
+#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
static inline int set_dabr(struct arch_hw_breakpoint *brk)
{
@@ -757,7 +814,19 @@ static inline int set_dabr(struct arch_hw_breakpoint *brk)
if (ppc_md.set_dabr)
return ppc_md.set_dabr(dabr, dabrx);
- return __set_dabr(dabr, dabrx);
+ if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
+ mtspr(SPRN_DAC1, dabr);
+ if (IS_ENABLED(CONFIG_PPC_47x))
+ isync();
+ return 0;
+ } else if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
+ mtspr(SPRN_DABR, dabr);
+ if (cpu_has_feature(CPU_FTR_DABRX))
+ mtspr(SPRN_DABRX, dabrx);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
@@ -765,12 +834,12 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
LCTRL1_CRWF_RW;
unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
- unsigned long start_addr = brk->address & ~HW_BREAKPOINT_ALIGN;
- unsigned long end_addr = (brk->address + brk->len - 1) | HW_BREAKPOINT_ALIGN;
+ unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
+ unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
if (start_addr == 0)
lctrl2 |= LCTRL2_LW0LA_F;
- else if (end_addr == ~0U)
+ else if (end_addr == 0)
lctrl2 |= LCTRL2_LW0LA_E;
else
lctrl2 |= LCTRL2_LW0LA_EandF;
@@ -786,20 +855,20 @@ static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
mtspr(SPRN_CMPE, start_addr - 1);
- mtspr(SPRN_CMPF, end_addr + 1);
+ mtspr(SPRN_CMPF, end_addr);
mtspr(SPRN_LCTRL1, lctrl1);
mtspr(SPRN_LCTRL2, lctrl2);
return 0;
}
-void __set_breakpoint(struct arch_hw_breakpoint *brk)
+void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
{
- memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
+ memcpy(this_cpu_ptr(&current_brk[nr]), brk, sizeof(*brk));
if (dawr_enabled())
// Power8 or later
- set_dawr(brk);
+ set_dawr(nr, brk);
else if (IS_ENABLED(CONFIG_PPC_8xx))
set_breakpoint_8xx(brk);
else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
@@ -822,19 +891,6 @@ bool ppc_breakpoint_available(void)
}
EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
-static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
- struct arch_hw_breakpoint *b)
-{
- if (a->address != b->address)
- return false;
- if (a->type != b->type)
- return false;
- if (a->len != b->len)
- return false;
- /* no need to check hw_len. it's calculated from address and len */
- return true;
-}
-
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline bool tm_enabled(struct task_struct *tsk)
@@ -1056,12 +1112,13 @@ void restore_tm_state(struct pt_regs *regs)
#endif
restore_math(regs);
- regs->msr |= msr_diff;
+ regs_set_return_msr(regs, regs->msr | msr_diff);
}
-#else
+#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
#define tm_recheckpoint_new_task(new)
#define __switch_to_tm(prev, new)
+void tm_reclaim_current(uint8_t cause) {}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
static inline void save_sprs(struct thread_struct *t)
@@ -1070,6 +1127,10 @@ static inline void save_sprs(struct thread_struct *t)
if (cpu_has_feature(CPU_FTR_ALTIVEC))
t->vrsave = mfspr(SPRN_VRSAVE);
#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE))
+ t->spefscr = mfspr(SPRN_SPEFSCR);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR))
t->dscr = mfspr(SPRN_DSCR);
@@ -1090,9 +1151,41 @@ static inline void save_sprs(struct thread_struct *t)
t->tar = mfspr(SPRN_TAR);
}
#endif
+}
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void kvmppc_save_user_regs(void)
+{
+ unsigned long usermsr;
+
+ if (!current->thread.regs)
+ return;
+
+ usermsr = current->thread.regs->msr;
+
+ if (usermsr & MSR_FP)
+ save_fpu(current);
+
+ if (usermsr & MSR_VEC)
+ save_altivec(current);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (usermsr & MSR_TM) {
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+ current->thread.regs->msr &= ~MSR_TM;
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
- thread_pkey_regs_save(t);
+void kvmppc_save_current_sprs(void)
+{
+ save_sprs(&current->thread);
}
+EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs);
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
static inline void restore_sprs(struct thread_struct *old_thread,
struct thread_struct *new_thread)
@@ -1102,6 +1195,11 @@ static inline void restore_sprs(struct thread_struct *old_thread,
old_thread->vrsave != new_thread->vrsave)
mtspr(SPRN_VRSAVE, new_thread->vrsave);
#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE) &&
+ old_thread->spefscr != new_thread->spefscr)
+ mtspr(SPRN_SPEFSCR, new_thread->spefscr);
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
if (cpu_has_feature(CPU_FTR_DSCR)) {
u64 dscr = get_paca()->dscr_default;
@@ -1132,7 +1230,6 @@ static inline void restore_sprs(struct thread_struct *old_thread,
mtspr(SPRN_TIDR, new_thread->tidr);
#endif
- thread_pkey_regs_restore(new_thread, old_thread);
}
struct task_struct *__switch_to(struct task_struct *prev,
@@ -1140,7 +1237,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
{
struct thread_struct *new_thread, *old_thread;
struct task_struct *last;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
struct ppc64_tlb_batch *batch;
#endif
@@ -1149,7 +1246,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
WARN_ON(!irqs_disabled());
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
batch = this_cpu_ptr(&ppc64_tlb_batch);
if (batch->active) {
current_thread_info()->local_flags |= _TLF_LAZY_MMU;
@@ -1157,6 +1254,19 @@ struct task_struct *__switch_to(struct task_struct *prev,
__flush_tlb_pending(batch);
batch->active = 0;
}
+
+ /*
+ * On POWER9 the copy-paste buffer can only paste into
+ * foreign real addresses, so unprivileged processes can not
+ * see the data or use it in any way unless they have
+ * foreign real mappings. If the new process has the foreign
+ * real address mappings, we must issue a cp_abort to clear
+ * any state and prevent snooping, corruption or a covert
+ * channel. ISA v3.1 supports paste into local memory.
+ */
+ if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
+ atomic_read(&new->mm->context.vas_windows)))
+ asm volatile(PPC_CP_ABORT);
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -1167,8 +1277,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* schedule DABR
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
- if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
- __set_breakpoint(&new->thread.hw_brk);
+ switch_hw_breakpoint(new);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
@@ -1193,37 +1302,50 @@ struct task_struct *__switch_to(struct task_struct *prev,
}
/*
- * Call restore_sprs() before calling _switch(). If we move it after
- * _switch() then we miss out on calling it for new tasks. The reason
- * for this is we manually create a stack frame for new tasks that
- * directly returns through ret_from_fork() or
+ * Call restore_sprs() and set_return_regs_changed() before calling
+ * _switch(). If we move it after _switch() then we miss out on calling
+ * it for new tasks. The reason for this is we manually create a stack
+ * frame for new tasks that directly returns through ret_from_fork() or
* ret_from_kernel_thread(). See copy_thread() for details.
*/
restore_sprs(old_thread, new_thread);
+ set_return_regs_changed(); /* _switch changes stack (and regs) */
+
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ kuap_assert_locked();
+
last = _switch(old_thread, new_thread);
+ /*
+ * Nothing after _switch will be run for newly created tasks,
+ * because they switch directly to ret_from_fork/ret_from_kernel_thread
+ * etc. Code added here should have a comment explaining why that is
+ * okay.
+ */
+
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ /*
+ * This applies to a process that was context switched while inside
+ * arch_enter_lazy_mmu_mode(), to re-activate the batch that was
+ * deactivated above, before _switch(). This will never be the case
+ * for new tasks.
+ */
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
+#endif
- if (current->thread.regs) {
+ /*
+ * Math facilities are masked out of the child MSR in copy_thread.
+ * A new task does not need to restore_math because it will
+ * demand fault them.
+ */
+ if (current->thread.regs)
restore_math(current->thread.regs);
-
- /*
- * The copy-paste buffer can only store into foreign real
- * addresses, so unprivileged processes can not see the
- * data or use it in any way unless they have foreign real
- * mappings. If the new process has the foreign real address
- * mappings, we must issue a cp_abort to clear any state and
- * prevent snooping, corruption or a covert channel.
- */
- if (current->thread.used_vas)
- asm volatile(PPC_CP_ABORT);
- }
#endif /* CONFIG_PPC_BOOK3S_64 */
return last;
@@ -1234,29 +1356,31 @@ struct task_struct *__switch_to(struct task_struct *prev,
static void show_instructions(struct pt_regs *regs)
{
int i;
+ unsigned long nip = regs->nip;
unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
printk("Instruction dump:");
+ /*
+ * If we were executing with the MMU off for instructions, adjust pc
+ * rather than printing XXXXXXXX.
+ */
+ if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
+ pc = (unsigned long)phys_to_virt(pc);
+ nip = (unsigned long)phys_to_virt(regs->nip);
+ }
+
for (i = 0; i < NR_INSN_TO_PRINT; i++) {
int instr;
if (!(i % 8))
pr_cont("\n");
-#if !defined(CONFIG_BOOKE)
- /* If executing with the IMMU off, adjust pc rather
- * than print XXXXXXXX.
- */
- if (!(regs->msr & MSR_IR))
- pc = (unsigned long)phys_to_virt(pc);
-#endif
-
if (!__kernel_text_address(pc) ||
- probe_kernel_address((const void *)pc, instr)) {
+ get_kernel_nofault(instr, (const void *)pc)) {
pr_cont("XXXXXXXX ");
} else {
- if (regs->nip == pc)
+ if (nip == pc)
pr_cont("<%08x> ", instr);
else
pr_cont("%08x ", instr);
@@ -1287,7 +1411,8 @@ void show_user_instructions(struct pt_regs *regs)
for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
int instr;
- if (probe_user_read(&instr, (void __user *)pc, sizeof(instr))) {
+ if (copy_from_user_nofault(&instr, (void __user *)pc,
+ sizeof(instr))) {
seq_buf_printf(&s, "XXXXXXXX ");
continue;
}
@@ -1384,19 +1509,15 @@ static void print_msr_bits(unsigned long val)
#ifdef CONFIG_PPC64
#define REG "%016lx"
#define REGS_PER_LINE 4
-#define LAST_VOLATILE 13
#else
#define REG "%08lx"
#define REGS_PER_LINE 8
-#define LAST_VOLATILE 12
#endif
-void show_regs(struct pt_regs * regs)
+static void __show_regs(struct pt_regs *regs)
{
int i, trap;
- show_regs_print_info(KERN_DEFAULT);
-
printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
regs->nip, regs->link, regs->ctr);
printk("REGS: %px TRAP: %04lx %s (%s)\n",
@@ -1405,14 +1526,17 @@ void show_regs(struct pt_regs * regs)
print_msr_bits(regs->msr);
pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
trap = TRAP(regs);
- if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
+ if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
pr_cont("CFAR: "REG" ", regs->orig_gpr3);
- if (trap == 0x200 || trap == 0x300 || trap == 0x600)
-#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
- pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
-#else
- pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
-#endif
+ if (trap == INTERRUPT_MACHINE_CHECK ||
+ trap == INTERRUPT_DATA_STORAGE ||
+ trap == INTERRUPT_ALIGNMENT) {
+ if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
+ pr_cont("DEAR: "REG" ESR: "REG" ", regs->dear, regs->esr);
+ else
+ pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
+ }
+
#ifdef CONFIG_PPC64
pr_cont("IRQMASK: %lx ", regs->softe);
#endif
@@ -1425,19 +1549,23 @@ void show_regs(struct pt_regs * regs)
if ((i % REGS_PER_LINE) == 0)
pr_cont("\nGPR%02d: ", i);
pr_cont(REG " ", regs->gpr[i]);
- if (i == LAST_VOLATILE && !FULL_REGS(regs))
- break;
}
pr_cont("\n");
-#ifdef CONFIG_KALLSYMS
/*
* Lookup NIP late so we have the best change of getting the
* above info out without failing
*/
- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
-#endif
- show_stack(current, (unsigned long *) regs->gpr[1]);
+ if (IS_ENABLED(CONFIG_KALLSYMS)) {
+ printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
+ printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
+ }
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ show_regs_print_info(KERN_DEFAULT);
+ __show_regs(regs);
+ show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
if (!user_mode(regs))
show_instructions(regs);
}
@@ -1451,34 +1579,26 @@ void flush_thread(void)
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
}
-#ifdef CONFIG_PPC_BOOK3S_64
void arch_setup_new_exec(void)
{
- if (radix_enabled())
- return;
- hash__setup_new_exec();
-}
-#endif
-int set_thread_uses_vas(void)
-{
#ifdef CONFIG_PPC_BOOK3S_64
- if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -EINVAL;
-
- current->thread.used_vas = 1;
-
+ if (!radix_enabled())
+ hash__setup_new_exec();
+#endif
/*
- * Even a process that has no foreign real address mapping can use
- * an unpaired COPY instruction (to no real effect). Issue CP_ABORT
- * to clear any pending COPY and prevent a covert channel.
- *
- * __switch_to() will issue CP_ABORT on future context switches.
+ * If we exec out of a kernel thread then thread.regs will not be
+ * set. Do it now.
*/
- asm volatile(PPC_CP_ABORT);
+ if (!current->thread.regs) {
+ struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
+ current->thread.regs = regs - 1;
+ }
-#endif /* CONFIG_PPC_BOOK3S_64 */
- return 0;
+#ifdef CONFIG_PPC_MEM_KEYS
+ current->thread.regs->amr = default_amr;
+ current->thread.regs->iamr = default_iamr;
+#endif
}
#ifdef CONFIG_PPC64
@@ -1535,11 +1655,6 @@ EXPORT_SYMBOL_GPL(set_thread_tidr);
#endif /* CONFIG_PPC64 */
-void
-release_thread(struct task_struct *t)
-{
-}
-
/*
* this gets called so that we can store coprocessor state into memory and
* copy the current task into the new thread.
@@ -1568,7 +1683,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
{
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -1593,56 +1708,63 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
/*
* Copy architecture-specific thread state
*/
-int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
- unsigned long kthread_arg, struct task_struct *p,
- unsigned long tls)
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
+ extern void ret_from_fork_scv(void);
extern void ret_from_kernel_thread(void);
void (*f)(void);
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
struct thread_info *ti = task_thread_info(p);
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int i;
+#endif
klp_init_thread_info(p);
/* Copy registers */
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
- if (unlikely(p->flags & PF_KTHREAD)) {
+ if (unlikely(args->fn)) {
/* kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
/* function */
- if (usp)
- childregs->gpr[14] = ppc_function_entry((void *)usp);
+ if (args->fn)
+ childregs->gpr[14] = ppc_function_entry((void *)args->fn);
#ifdef CONFIG_PPC64
clear_tsk_thread_flag(p, TIF_32BIT);
childregs->softe = IRQS_ENABLED;
#endif
- childregs->gpr[15] = kthread_arg;
+ childregs->gpr[15] = (unsigned long)args->fn_arg;
p->thread.regs = NULL; /* no user register state */
ti->flags |= _TIF_RESTOREALL;
f = ret_from_kernel_thread;
} else {
/* user thread */
struct pt_regs *regs = current_pt_regs();
- CHECK_FULL_REGS(regs);
*childregs = *regs;
if (usp)
childregs->gpr[1] = usp;
p->thread.regs = childregs;
- childregs->gpr[3] = 0; /* Result from fork() */
+ /* 64s sets this in ret_from_fork */
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ childregs->gpr[3] = 0; /* Result from fork() */
if (clone_flags & CLONE_SETTLS) {
-#ifdef CONFIG_PPC64
if (!is_32bit_task())
childregs->gpr[13] = tls;
else
-#endif
childregs->gpr[2] = tls;
}
- f = ret_from_fork;
+ if (trap_is_scv(regs))
+ f = ret_from_fork_scv;
+ else
+ f = ret_from_fork;
}
childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
sp -= STACK_FRAME_OVERHEAD;
@@ -1660,17 +1782,23 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
kregs = (struct pt_regs *) sp;
sp -= STACK_FRAME_OVERHEAD;
p->thread.ksp = sp;
-#ifdef CONFIG_PPC32
- p->thread.ksp_limit = (unsigned long)end_of_stack(p);
-#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- p->thread.ptrace_bps[0] = NULL;
+ for (i = 0; i < nr_wp_slots(); i++)
+ p->thread.ptrace_bps[i] = NULL;
#endif
+#ifdef CONFIG_PPC_FPU_REGS
p->thread.fp_save_area = NULL;
+#endif
#ifdef CONFIG_ALTIVEC
p->thread.vr_save_area = NULL;
#endif
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+ p->thread.kuap = KUAP_NONE;
+#endif
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ p->thread.pid = MMU_NO_CONTEXT;
+#endif
setup_ksp_vsid(p, sp);
@@ -1684,6 +1812,16 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
p->thread.tidr = 0;
#endif
+ /*
+ * Run with the current AMR value of the kernel
+ */
+#ifdef CONFIG_PPC_PKEY
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
+ kregs->amr = AMR_KUAP_BLOCKED;
+
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP))
+ kregs->iamr = AMR_KUEP_BLOCKED;
+#endif
kregs->nip = ppc_function_entry(f);
return 0;
}
@@ -1698,20 +1836,9 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#ifdef CONFIG_PPC64
unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
-#ifdef CONFIG_PPC_BOOK3S_64
- if (!radix_enabled())
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
preload_new_slb_context(start, sp);
#endif
-#endif
-
- /*
- * If we exec out of a kernel thread then thread.regs will not be
- * set. Do it now.
- */
- if (!current->thread.regs) {
- struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
- current->thread.regs = regs - 1;
- }
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
@@ -1723,20 +1850,13 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
tm_reclaim_current(0);
#endif
- memset(regs->gpr, 0, sizeof(regs->gpr));
+ memset(&regs->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
regs->ctr = 0;
regs->link = 0;
regs->xer = 0;
regs->ccr = 0;
regs->gpr[1] = sp;
- /*
- * We have just cleared all the nonvolatile GPRs, so make
- * FULL_REGS(regs) return true. This is necessary to allow
- * ptrace to examine the thread immediately after exec.
- */
- regs->trap &= ~1UL;
-
#ifdef CONFIG_PPC32
regs->mq = 0;
regs->nip = start;
@@ -1781,21 +1901,24 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
}
regs->gpr[2] = toc;
}
- regs->nip = entry;
- regs->msr = MSR_USER64;
+ regs_set_return_ip(regs, entry);
+ regs_set_return_msr(regs, MSR_USER64);
} else {
- regs->nip = start;
regs->gpr[2] = 0;
- regs->msr = MSR_USER32;
+ regs_set_return_ip(regs, start);
+ regs_set_return_msr(regs, MSR_USER32);
}
+
#endif
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
current->thread.load_slb = 0;
current->thread.load_fp = 0;
+#ifdef CONFIG_PPC_FPU_REGS
memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL;
+#endif
#ifdef CONFIG_ALTIVEC
memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
@@ -1816,8 +1939,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.tm_tfiar = 0;
current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
- thread_pkey_regs_init(&current->thread);
}
EXPORT_SYMBOL(start_thread);
@@ -1833,7 +1954,6 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
* fpexc_mode. fpexc_mode is also used for setting FP exception
* mode (asyn, precise, disabled) for 'Classic' FP. */
if (val & PR_FP_EXC_SW_ENABLE) {
-#ifdef CONFIG_SPE
if (cpu_has_feature(CPU_FTR_SPE)) {
/*
* When the sticky exception bits are set
@@ -1847,16 +1967,15 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
* anyway to restore the prctl settings from
* the saved environment.
*/
+#ifdef CONFIG_SPE
tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
tsk->thread.fpexc_mode = val &
(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
+#endif
return 0;
} else {
return -EINVAL;
}
-#else
- return -EINVAL;
-#endif
}
/* on a CONFIG_SPE this does not hurt us. The bits that
@@ -1867,18 +1986,18 @@ int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
if (val > PR_FP_EXC_PRECISE)
return -EINVAL;
tsk->thread.fpexc_mode = __pack_fe01(val);
- if (regs != NULL && (regs->msr & MSR_FP) != 0)
- regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
- | tsk->thread.fpexc_mode;
+ if (regs != NULL && (regs->msr & MSR_FP) != 0) {
+ regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
+ | tsk->thread.fpexc_mode);
+ }
return 0;
}
int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
{
- unsigned int val;
+ unsigned int val = 0;
- if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
-#ifdef CONFIG_SPE
+ if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
if (cpu_has_feature(CPU_FTR_SPE)) {
/*
* When the sticky exception bits are set
@@ -1892,15 +2011,15 @@ int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
* anyway to restore the prctl settings from
* the saved environment.
*/
+#ifdef CONFIG_SPE
tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
val = tsk->thread.fpexc_mode;
+#endif
} else
return -EINVAL;
-#else
- return -EINVAL;
-#endif
- else
+ } else {
val = __unpack_fe01(tsk->thread.fpexc_mode);
+ }
return put_user(val, (unsigned int __user *) adr);
}
@@ -1916,9 +2035,9 @@ int set_endian(struct task_struct *tsk, unsigned int val)
return -EINVAL;
if (val == PR_ENDIAN_BIG)
- regs->msr &= ~MSR_LE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_LE);
else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
- regs->msr |= MSR_LE;
+ regs_set_return_msr(regs, regs->msr | MSR_LE);
else
return -EINVAL;
@@ -1976,6 +2095,35 @@ static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
return 0;
}
+static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
+ unsigned long nbytes)
+{
+#ifdef CONFIG_PPC64
+ unsigned long stack_page;
+ unsigned long cpu = task_cpu(p);
+
+ if (!paca_ptrs)
+ return 0;
+
+ stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
+ if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+
+# ifdef CONFIG_PPC_BOOK3S_64
+ stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
+ if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+
+ stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
+ if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
+ return 1;
+# endif
+#endif
+
+ return 0;
+}
+
+
int validate_sp(unsigned long sp, struct task_struct *p,
unsigned long nbytes)
{
@@ -1987,30 +2135,30 @@ int validate_sp(unsigned long sp, struct task_struct *p,
if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
return 1;
- return valid_irq_stack(sp, p, nbytes);
+ if (valid_irq_stack(sp, p, nbytes))
+ return 1;
+
+ return valid_emergency_stack(sp, p, nbytes);
}
EXPORT_SYMBOL(validate_sp);
-static unsigned long __get_wchan(struct task_struct *p)
+static unsigned long ___get_wchan(struct task_struct *p)
{
unsigned long ip, sp;
int count = 0;
- if (!p || p == current || p->state == TASK_RUNNING)
- return 0;
-
sp = p->thread.ksp;
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
return 0;
do {
- sp = *(unsigned long *)sp;
+ sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
- p->state == TASK_RUNNING)
+ task_is_running(p))
return 0;
if (count > 0) {
- ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
+ ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
if (!in_sched_functions(ip))
return ip;
}
@@ -2018,14 +2166,14 @@ static unsigned long __get_wchan(struct task_struct *p)
return 0;
}
-unsigned long get_wchan(struct task_struct *p)
+unsigned long __get_wchan(struct task_struct *p)
{
unsigned long ret;
if (!try_get_task_stack(p))
return 0;
- ret = __get_wchan(p);
+ ret = ___get_wchan(p);
put_task_stack(p);
@@ -2034,15 +2182,15 @@ unsigned long get_wchan(struct task_struct *p)
static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
-void show_stack(struct task_struct *tsk, unsigned long *stack)
+void __no_sanitize_address show_stack(struct task_struct *tsk,
+ unsigned long *stack,
+ const char *loglvl)
{
unsigned long sp, ip, lr, newsp;
int count = 0;
int firstframe = 1;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
unsigned long ret_addr;
int ftrace_idx = 0;
-#endif
if (tsk == NULL)
tsk = current;
@@ -2053,13 +2201,13 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
sp = (unsigned long) stack;
if (sp == 0) {
if (tsk == current)
- sp = current_stack_pointer();
+ sp = current_stack_frame();
else
sp = tsk->thread.ksp;
}
lr = 0;
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
do {
if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
break;
@@ -2068,13 +2216,12 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
if (!firstframe || ip != lr) {
- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ printk("%s["REG"] ["REG"] %pS",
+ loglvl, sp, ip, (void *)ip);
ret_addr = ftrace_graph_ret_addr(current,
&ftrace_idx, ip, stack);
if (ret_addr != ip)
pr_cont(" (%pS)", (void *)ret_addr);
-#endif
if (firstframe)
pr_cont(" (unreliable)");
pr_cont("\n");
@@ -2085,13 +2232,18 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
* See if this is an exception frame.
* We look for the "regshere" marker in the current frame.
*/
- if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
+ if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
&& stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
struct pt_regs *regs = (struct pt_regs *)
(sp + STACK_FRAME_OVERHEAD);
+
lr = regs->link;
- printk("--- interrupt: %lx at %pS\n LR = %pS\n",
- regs->trap, (void *)regs->nip, (void *)lr);
+ printk("%s--- interrupt: %lx at %pS\n",
+ loglvl, regs->trap, (void *)regs->nip);
+ __show_regs(regs);
+ printk("%s--- interrupt: %lx\n",
+ loglvl, regs->trap);
+
firstframe = 1;
}
@@ -2151,46 +2303,6 @@ void notrace __ppc64_runlatch_off(void)
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
- sp -= get_random_int() & ~PAGE_MASK;
+ sp -= prandom_u32_max(PAGE_SIZE);
return sp & ~0xf;
}
-
-static inline unsigned long brk_rnd(void)
-{
- unsigned long rnd = 0;
-
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
- else
- rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
-
- return rnd << PAGE_SHIFT;
-}
-
-unsigned long arch_randomize_brk(struct mm_struct *mm)
-{
- unsigned long base = mm->brk;
- unsigned long ret;
-
-#ifdef CONFIG_PPC_BOOK3S_64
- /*
- * If we are using 1TB segments and we are allowed to randomise
- * the heap, we can put it above 1TB so it is backed by a 1TB
- * segment. Otherwise the heap will be in the bottom 1TB
- * which always uses 256MB segments and this may result in a
- * performance penalty. We don't need to worry about radix. For
- * radix, mmu_highuser_ssize remains unchanged from 256MB.
- */
- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
-#endif
-
- ret = PAGE_ALIGN(base + brk_rnd());
-
- if (ret < mm->brk)
- return mm->brk;
-
- return ret;
-}
-
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 6620f37abe73..1eed87d954ba 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -11,7 +11,6 @@
#undef DEBUG
-#include <stdarg.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
@@ -30,8 +29,9 @@
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
+#include <linux/seq_buf.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
#include <asm/processor.h>
@@ -41,12 +41,11 @@
#include <asm/smp.h>
#include <asm/mmu.h>
#include <asm/paca.h>
-#include <asm/pgtable.h>
#include <asm/powernv.h>
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/pci-bridge.h>
#include <asm/kexec.h>
#include <asm/opal.h>
@@ -56,6 +55,7 @@
#include <asm/dt_cpu_ftrs.h>
#include <asm/drmem.h>
#include <asm/ultravisor.h>
+#include <asm/prom.h>
#include <mm/mmu_decl.h>
@@ -65,6 +65,8 @@
#define DBG(fmt...)
#endif
+int *chip_id_lookup_table;
+
#ifdef CONFIG_PPC64
int __initdata iommu_is_off;
int __initdata iommu_force_on;
@@ -96,8 +98,8 @@ static inline int overlaps_initrd(unsigned long start, unsigned long size)
if (!initrd_start)
return 0;
- return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
- start <= _ALIGN_UP(initrd_end, PAGE_SIZE);
+ return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
+ start <= ALIGN(initrd_end, PAGE_SIZE);
#else
return 0;
#endif
@@ -136,7 +138,7 @@ static void __init move_device_tree(void)
}
/*
- * ibm,pa-features is a per-cpu property that contains a string of
+ * ibm,pa/pi-features is a per-cpu property that contains a string of
* attribute descriptors, each of which has a 2 byte header plus up
* to 254 bytes worth of processor attribute bits. First header
* byte specifies the number of bytes following the header.
@@ -148,24 +150,25 @@ static void __init move_device_tree(void)
* is supported/not supported. Note that the bit numbers are
* big-endian to match the definition in PAPR.
*/
-static struct ibm_pa_feature {
+struct ibm_feature {
unsigned long cpu_features; /* CPU_FTR_xxx bit */
unsigned long mmu_features; /* MMU_FTR_xxx bit */
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
- unsigned char pabyte; /* byte number in ibm,pa-features */
+ unsigned char pabyte; /* byte number in ibm,pa/pi-features */
unsigned char pabit; /* bit number (big-endian) */
unsigned char invert; /* if 1, pa bit set => clear feature */
-} ibm_pa_features[] __initdata = {
+};
+
+static struct ibm_feature ibm_pa_features[] __initdata = {
{ .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
{ .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
{ .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
{ .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
{ .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
#ifdef CONFIG_PPC_RADIX_MMU
- { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX },
+ { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE },
#endif
- { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
{ .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
.cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
/*
@@ -175,11 +178,23 @@ static struct ibm_pa_feature {
*/
{ .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
.cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
+
+ { .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 },
+};
+
+/*
+ * ibm,pi-features property provides the support of processor specific
+ * options not described in ibm,pa-features. Right now use byte 0, bit 3
+ * which indicates the occurrence of DSI interrupt when the paste operation
+ * on the suspended NX window.
+ */
+static struct ibm_feature ibm_pi_features[] __initdata = {
+ { .pabyte = 0, .pabit = 3, .mmu_features = MMU_FTR_NX_DSI },
};
static void __init scan_features(unsigned long node, const unsigned char *ftrs,
unsigned long tablelen,
- struct ibm_pa_feature *fp,
+ struct ibm_feature *fp,
unsigned long ft_size)
{
unsigned long i, len, bit;
@@ -216,20 +231,21 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
}
}
-static void __init check_cpu_pa_features(unsigned long node)
+static void __init check_cpu_features(unsigned long node, char *name,
+ struct ibm_feature *fp,
+ unsigned long size)
{
const unsigned char *pa_ftrs;
int tablelen;
- pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
+ pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen);
if (pa_ftrs == NULL)
return;
- scan_features(node, pa_ftrs, tablelen,
- ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
+ scan_features(node, pa_ftrs, tablelen, fp, size);
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
static void __init init_mmu_slb_size(unsigned long node)
{
const __be32 *slb_size_ptr;
@@ -266,7 +282,7 @@ static struct feature_property {
};
#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
-static inline void identical_pvr_fixup(unsigned long node)
+static __init void identical_pvr_fixup(unsigned long node)
{
unsigned int pvr;
const char *model = of_get_flat_dt_prop(node, "model", NULL);
@@ -350,6 +366,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
be32_to_cpu(intserv[found_thread]));
boot_cpuid = found;
+ // Pass the boot CPU's hard CPU id back to our caller
+ *((u32 *)data) = be32_to_cpu(intserv[found_thread]);
+
/*
* PAPR defines "logical" PVR values for cpus that
* meet various levels of the architecture:
@@ -371,11 +390,16 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
*/
if (!dt_cpu_ftrs_in_use()) {
prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
- if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
+ if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) {
identify_cpu(0, be32_to_cpup(prop));
+ seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop));
+ }
check_cpu_feature_properties(node);
- check_cpu_pa_features(node);
+ check_cpu_features(node, "ibm,pa-features", ibm_pa_features,
+ ARRAY_SIZE(ibm_pa_features));
+ check_cpu_features(node, "ibm,pi-features", ibm_pi_features,
+ ARRAY_SIZE(ibm_pi_features));
}
identical_pvr_fixup(node);
@@ -386,9 +410,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
else if (!dt_cpu_ftrs_in_use())
cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
- allocate_paca(boot_cpuid);
#endif
- set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread]));
return 0;
}
@@ -400,7 +422,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
/* Use common scan routine to determine if this is the chosen node */
- if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
+ if (early_init_dt_scan_chosen(data) < 0)
return 0;
#ifdef CONFIG_PPC64
@@ -445,7 +467,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
*/
#ifdef CONFIG_SPARSEMEM
-static bool validate_mem_limit(u64 base, u64 *size)
+static bool __init validate_mem_limit(u64 base, u64 *size)
{
u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
@@ -456,7 +478,7 @@ static bool validate_mem_limit(u64 base, u64 *size)
return true;
}
#else
-static bool validate_mem_limit(u64 base, u64 *size)
+static bool __init validate_mem_limit(u64 base, u64 *size)
{
return true;
}
@@ -468,8 +490,9 @@ static bool validate_mem_limit(u64 base, u64 *size)
* This contains a list of memory blocks along with NUMA affinity
* information.
*/
-static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
- const __be32 **usm)
+static int __init early_init_drmem_lmb(struct drmem_lmb *lmb,
+ const __be32 **usm,
+ void *data)
{
u64 base, size;
int is_kexec_kdump = 0, rngs;
@@ -484,7 +507,7 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
*/
if ((lmb->flags & DRCONF_MEM_RESERVED) ||
!(lmb->flags & DRCONF_MEM_ASSIGNED))
- return;
+ return 0;
if (*usm)
is_kexec_kdump = 1;
@@ -499,7 +522,7 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
*/
rngs = dt_mem_next_cell(dt_root_size_cells, usm);
if (!rngs) /* there are no (base, size) duple */
- return;
+ return 0;
}
do {
@@ -515,26 +538,32 @@ static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
size = 0x80000000ul - base;
}
+ if (!validate_mem_limit(base, &size))
+ continue;
+
DBG("Adding: %llx -> %llx\n", base, size);
- if (validate_mem_limit(base, &size))
- memblock_add(base, size);
+ memblock_add(base, size);
+
+ if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
+ memblock_mark_hotplug(base, size);
} while (--rngs);
+
+ return 0;
}
#endif /* CONFIG_PPC_PSERIES */
-static int __init early_init_dt_scan_memory_ppc(unsigned long node,
- const char *uname,
- int depth, void *data)
+static int __init early_init_dt_scan_memory_ppc(void)
{
#ifdef CONFIG_PPC_PSERIES
- if (depth == 1 &&
- strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) {
- walk_drmem_lmbs_early(node, early_init_drmem_lmb);
- return 0;
- }
+ const void *fdt = initial_boot_params;
+ int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
+
+ if (node > 0)
+ walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
+
#endif
-
- return early_init_dt_scan_memory(node, uname, depth, data);
+
+ return early_init_dt_scan_memory();
}
/*
@@ -623,13 +652,15 @@ static void __init early_reserve_mem(void)
#ifdef CONFIG_BLK_DEV_INITRD
/* Then reserve the initrd, if any */
if (initrd_start && (initrd_end > initrd_start)) {
- memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
- _ALIGN_UP(initrd_end, PAGE_SIZE) -
- _ALIGN_DOWN(initrd_start, PAGE_SIZE));
+ memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
+ ALIGN(initrd_end, PAGE_SIZE) -
+ ALIGN_DOWN(initrd_start, PAGE_SIZE));
}
#endif /* CONFIG_BLK_DEV_INITRD */
-#ifdef CONFIG_PPC32
+ if (!IS_ENABLED(CONFIG_PPC32))
+ return;
+
/*
* Handle the case where we might be booting from an old kexec
* image that setup the mem_rsvmap as pairs of 32-bit values
@@ -650,7 +681,6 @@ static void __init early_reserve_mem(void)
}
return;
}
-#endif
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -685,8 +715,43 @@ static void __init tm_init(void)
static void tm_init(void) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+static int __init
+early_init_dt_scan_model(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ const char *prop;
+
+ if (depth != 0)
+ return 0;
+
+ prop = of_get_flat_dt_prop(node, "model", NULL);
+ if (prop)
+ seq_buf_printf(&ppc_hw_desc, "%s ", prop);
+
+ /* break now */
+ return 1;
+}
+
+#ifdef CONFIG_PPC64
+static void __init save_fscr_to_task(void)
+{
+ /*
+ * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
+ * have configured via the device tree features or via __init_FSCR().
+ * That value will then be propagated to pid 1 (init) and all future
+ * processes.
+ */
+ if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
+ init_task.thread.fscr = mfspr(SPRN_FSCR);
+}
+#else
+static inline void save_fscr_to_task(void) {}
+#endif
+
+
void __init early_init_devtree(void *params)
{
+ u32 boot_cpu_hwid;
phys_addr_t limit;
DBG(" -> early_init_devtree(%px)\n", params);
@@ -695,6 +760,8 @@ void __init early_init_devtree(void *params)
if (!early_init_dt_verify(params))
panic("BUG: Failed verifying flat device tree, bad version?");
+ of_scan_flat_dt(early_init_dt_scan_model, NULL);
+
#ifdef CONFIG_PPC_RTAS
/* Some machines might need RTAS info for debugging, grab it now. */
of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
@@ -720,9 +787,16 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
+ early_init_dt_scan_root();
+ early_init_dt_scan_memory_ppc();
+ /*
+ * As generic code authors expect to be able to use static keys
+ * in early_param() handlers, we initialize the static keys just
+ * before parsing early params (it's fine to call jump_label_init()
+ * more than once).
+ */
+ jump_label_init();
parse_early_param();
/* make sure we've parsed cmdline for mem= before this */
@@ -730,7 +804,7 @@ void __init early_init_devtree(void *params)
first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
setup_initial_memory_limit(memstart_addr, first_memblock_size);
/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
- memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
+ memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
/* If relocatable, reserve first 32k for interrupt vectors etc. */
if (PHYSICAL_START > MEMORY_START)
memblock_reserve(MEMORY_START, 0x8000);
@@ -749,6 +823,11 @@ void __init early_init_devtree(void *params)
limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
memblock_enforce_memory_limit(limit);
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
+ if (!early_radix_enabled())
+ memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS));
+#endif
+
memblock_allow_resize();
memblock_dump_all();
@@ -758,21 +837,24 @@ void __init early_init_devtree(void *params)
* FIXME .. and the initrd too? */
move_device_tree();
- allocate_paca_ptrs();
-
DBG("Scanning CPUs ...\n");
dt_cpu_ftrs_scan();
+ // We can now add the CPU name & PVR to the hardware description
+ seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
+
/* Retrieve CPU related informations from the flat tree
* (altivec support, boot CPU ID, ...)
*/
- of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
+ of_scan_flat_dt(early_init_dt_scan_cpus, &boot_cpu_hwid);
if (boot_cpuid < 0) {
printk("Failed to identify boot CPU !\n");
BUG();
}
+ save_fscr_to_task();
+
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
/* We'll later wait for secondaries to check in; there are
* NCPUS-1 non-boot CPUs :-)
@@ -782,6 +864,11 @@ void __init early_init_devtree(void *params)
mmu_early_init_devtree();
+ // NB. paca is not installed until later in early_setup()
+ allocate_paca_ptrs();
+ allocate_paca(boot_cpuid);
+ set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
+
#ifdef CONFIG_PPC_POWERNV
/* Scan and build the list of machine check recoverable ranges */
of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
@@ -791,6 +878,11 @@ void __init early_init_devtree(void *params)
/* Now try to figure out if we are running on LPAR and so on */
pseries_probe_fw_features();
+ /*
+ * Initialize pkey features and default AMR/IAMR values
+ */
+ pkey_early_init_devtree();
+
#ifdef CONFIG_PPC_PS3
/* Identify PS3 firmware */
if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
@@ -817,8 +909,8 @@ void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
* mess the memblock.
*/
add_mem_to_memblock = 0;
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
+ early_init_dt_scan_root();
+ early_init_dt_scan_memory_ppc();
add_mem_to_memblock = 1;
if (size)
@@ -876,13 +968,22 @@ EXPORT_SYMBOL(of_get_ibm_chip_id);
int cpu_to_chip_id(int cpu)
{
struct device_node *np;
+ int ret = -1, idx;
+
+ idx = cpu / threads_per_core;
+ if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
+ return chip_id_lookup_table[idx];
np = of_get_cpu_node(cpu, NULL);
- if (!np)
- return -1;
+ if (np) {
+ ret = of_get_ibm_chip_id(np);
+ of_node_put(np);
+
+ if (chip_id_lookup_table)
+ chip_id_lookup_table[idx] = ret;
+ }
- of_node_put(np);
- return of_get_ibm_chip_id(np);
+ return ret;
}
EXPORT_SYMBOL(cpu_to_chip_id);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 577345382b23..d464ba412084 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -14,7 +14,7 @@
/* we cannot use FORTIFY as it brings in new symbols */
#define __NO_FORTIFY
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
@@ -26,26 +26,30 @@
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
+#include <linux/pgtable.h>
+#include <linux/printk.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
#include <asm/processor.h>
+#include <asm/interrupt.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/asm-prototypes.h>
#include <asm/ultravisor-api.h>
#include <linux/linux_logo.h>
/* All of prom_init bss lives here */
-#define __prombss __section(.bss.prominit)
+#define __prombss __section(".bss.prominit")
/*
* Eventually bump that one up
@@ -92,12 +96,6 @@ static int of_workarounds __prombss;
#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
-#define PROM_BUG() do { \
- prom_printf("kernel BUG at %s line 0x%x!\n", \
- __FILE__, __LINE__); \
- __builtin_trap(); \
-} while (0)
-
#ifdef DEBUG_PROM
#define prom_debug(x...) prom_printf(x)
#else
@@ -169,6 +167,7 @@ static unsigned long __prombss prom_tce_alloc_end;
#ifdef CONFIG_PPC_PSERIES
static bool __prombss prom_radix_disable;
+static bool __prombss prom_radix_gtse_disable;
static bool __prombss prom_xive_disable;
#endif
@@ -241,13 +240,31 @@ static int __init prom_strcmp(const char *cs, const char *ct)
return 0;
}
-static char __init *prom_strcpy(char *dest, const char *src)
+static ssize_t __init prom_strscpy_pad(char *dest, const char *src, size_t n)
{
- char *tmp = dest;
+ ssize_t rc;
+ size_t i;
- while ((*dest++ = *src++) != '\0')
- /* nothing */;
- return tmp;
+ if (n == 0 || n > INT_MAX)
+ return -E2BIG;
+
+ // Copy up to n bytes
+ for (i = 0; i < n && src[i] != '\0'; i++)
+ dest[i] = src[i];
+
+ rc = i;
+
+ // If we copied all n then we have run out of space for the nul
+ if (rc == n) {
+ // Rewind by one character to ensure nul termination
+ i--;
+ rc = -E2BIG;
+ }
+
+ for (; i < n; i++)
+ dest[i] = '\0';
+
+ return rc;
}
static int __init prom_strncmp(const char *cs, const char *ct, size_t count)
@@ -354,6 +371,7 @@ static int __init prom_strtobool(const char *s, bool *res)
default:
break;
}
+ break;
default:
break;
}
@@ -650,7 +668,7 @@ static inline int __init prom_getproplen(phandle node, const char *pname)
return call_prom("getproplen", 2, 1, node, ADDR(pname));
}
-static void add_string(char **str, const char *q)
+static void __init add_string(char **str, const char *q)
{
char *p = *str;
@@ -660,7 +678,7 @@ static void add_string(char **str, const char *q)
*str = p;
}
-static char *tohex(unsigned int x)
+static char *__init tohex(unsigned int x)
{
static const char digits[] __initconst = "0123456789abcdef";
static char result[9] __prombss;
@@ -699,29 +717,28 @@ static int __init prom_setprop(phandle node, const char *nodename,
}
/* We can't use the standard versions because of relocation headaches. */
-#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
- || ('a' <= (c) && (c) <= 'f') \
- || ('A' <= (c) && (c) <= 'F'))
+#define prom_isxdigit(c) \
+ (('0' <= (c) && (c) <= '9') || ('a' <= (c) && (c) <= 'f') || ('A' <= (c) && (c) <= 'F'))
-#define isdigit(c) ('0' <= (c) && (c) <= '9')
-#define islower(c) ('a' <= (c) && (c) <= 'z')
-#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
+#define prom_isdigit(c) ('0' <= (c) && (c) <= '9')
+#define prom_islower(c) ('a' <= (c) && (c) <= 'z')
+#define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
-static unsigned long prom_strtoul(const char *cp, const char **endp)
+static unsigned long __init prom_strtoul(const char *cp, const char **endp)
{
unsigned long result = 0, base = 10, value;
if (*cp == '0') {
base = 8;
cp++;
- if (toupper(*cp) == 'X') {
+ if (prom_toupper(*cp) == 'X') {
cp++;
base = 16;
}
}
- while (isxdigit(*cp) &&
- (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
+ while (prom_isxdigit(*cp) &&
+ (value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
result = result * base + value;
cp++;
}
@@ -732,7 +749,7 @@ static unsigned long prom_strtoul(const char *cp, const char **endp)
return result;
}
-static unsigned long prom_memparse(const char *ptr, const char **retptr)
+static unsigned long __init prom_memparse(const char *ptr, const char **retptr)
{
unsigned long ret = prom_strtoul(ptr, retptr);
int shift = 0;
@@ -823,6 +840,12 @@ static void __init early_cmdline_parse(void)
if (prom_radix_disable)
prom_debug("Radix disabled from cmdline\n");
+ opt = prom_strstr(prom_cmd_line, "radix_hcall_invalidate=on");
+ if (opt) {
+ prom_radix_gtse_disable = true;
+ prom_debug("Radix GTSE disabled from cmdline\n");
+ }
+
opt = prom_strstr(prom_cmd_line, "xive=off");
if (opt) {
prom_xive_disable = true;
@@ -919,8 +942,12 @@ struct option_vector6 {
u8 os_name;
} __packed;
+struct option_vector7 {
+ u8 os_id[256];
+} __packed;
+
struct ibm_arch_vec {
- struct { u32 mask, val; } pvrs[12];
+ struct { u32 mask, val; } pvrs[14];
u8 num_vectors;
@@ -941,6 +968,9 @@ struct ibm_arch_vec {
u8 vec6_len;
struct option_vector6 vec6;
+
+ u8 vec7_len;
+ struct option_vector7 vec7;
} __packed;
static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
@@ -974,6 +1004,14 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.val = cpu_to_be32(0x004e0000),
},
{
+ .mask = cpu_to_be32(0xffff0000), /* POWER10 */
+ .val = cpu_to_be32(0x00800000),
+ },
+ {
+ .mask = cpu_to_be32(0xffffffff), /* all 3.1-compliant */
+ .val = cpu_to_be32(0x0f000006),
+ },
+ {
.mask = cpu_to_be32(0xffffffff), /* all 3.00-compliant */
.val = cpu_to_be32(0x0f000005),
},
@@ -1002,7 +1040,7 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.byte1 = 0,
.arch_versions = OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
- .arch_versions3 = OV1_PPC_3_00,
+ .arch_versions3 = OV1_PPC_3_00 | OV1_PPC_3_1,
},
.vec2_len = VECTOR_LENGTH(sizeof(struct option_vector2)),
@@ -1054,7 +1092,8 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
#else
0,
#endif
- .associativity = OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
+ .associativity = OV5_FEAT(OV5_FORM1_AFFINITY) | OV5_FEAT(OV5_PRRN) |
+ OV5_FEAT(OV5_FORM2_AFFINITY),
.bin_opts = OV5_FEAT(OV5_RESIZE_HPT) | OV5_FEAT(OV5_HP_EVT),
.micro_checkpoint = 0,
.reserved0 = 0,
@@ -1079,6 +1118,9 @@ static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.secondary_pteg = 0,
.os_name = OV6_LINUX,
},
+
+ /* option vector 7: OS Identification */
+ .vec7_len = VECTOR_LENGTH(sizeof(struct option_vector7)),
};
static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
@@ -1277,10 +1319,8 @@ static void __init prom_parse_platform_support(u8 index, u8 val,
prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support);
break;
case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */
- if (val & OV5_FEAT(OV5_RADIX_GTSE)) {
- prom_debug("Radix - GTSE supported\n");
- support->radix_gtse = true;
- }
+ if (val & OV5_FEAT(OV5_RADIX_GTSE))
+ support->radix_gtse = !prom_radix_gtse_disable;
break;
case OV5_INDX(OV5_XIVE_SUPPORT): /* Interrupt mode */
prom_parse_xive_model(val & OV5_FEAT(OV5_XIVE_SUPPORT),
@@ -1309,6 +1349,8 @@ static void __init prom_check_platform_support(void)
memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template,
sizeof(ibm_architecture_vec));
+ prom_strscpy_pad(ibm_architecture_vec.vec7.os_id, linux_banner, 256);
+
if (prop_len > 1) {
int i;
u8 vec[8];
@@ -1317,23 +1359,22 @@ static void __init prom_check_platform_support(void)
if (prop_len > sizeof(vec))
prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
prop_len);
- prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
- &vec, sizeof(vec));
- for (i = 0; i < sizeof(vec); i += 2) {
- prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
- , vec[i]
- , vec[i + 1]);
- prom_parse_platform_support(vec[i], vec[i + 1],
- &supported);
+ prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
+ for (i = 0; i < prop_len; i += 2) {
+ prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
+ prom_parse_platform_support(vec[i], vec[i + 1], &supported);
}
}
- if (supported.radix_mmu && supported.radix_gtse &&
- IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
- /* Radix preferred - but we require GTSE for now */
- prom_debug("Asking for radix with GTSE\n");
+ if (supported.radix_mmu && IS_ENABLED(CONFIG_PPC_RADIX_MMU)) {
+ /* Radix preferred - Check if GTSE is also supported */
+ prom_debug("Asking for radix\n");
ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX);
- ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE);
+ if (supported.radix_gtse)
+ ibm_architecture_vec.vec5.radix_ext =
+ OV5_FEAT(OV5_RADIX_GTSE);
+ else
+ prom_debug("Radix GTSE isn't supported\n");
} else if (supported.hash_mmu) {
/* Default to hash mmu (if we can) */
prom_debug("Asking for hash\n");
@@ -1449,18 +1490,18 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align)
unsigned long addr = 0;
if (align)
- base = _ALIGN_UP(base, align);
+ base = ALIGN(base, align);
prom_debug("%s(%lx, %lx)\n", __func__, size, align);
if (ram_top == 0)
prom_panic("alloc_up() called with mem not initialized\n");
if (align)
- base = _ALIGN_UP(alloc_bottom, align);
+ base = ALIGN(alloc_bottom, align);
else
base = alloc_bottom;
for(; (base + size) <= alloc_top;
- base = _ALIGN_UP(base + 0x100000, align)) {
+ base = ALIGN(base + 0x100000, align)) {
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0)
@@ -1500,7 +1541,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
if (highmem) {
/* Carve out storage for the TCE table. */
- addr = _ALIGN_DOWN(alloc_top_high - size, align);
+ addr = ALIGN_DOWN(alloc_top_high - size, align);
if (addr <= alloc_bottom)
return 0;
/* Will we bump into the RMO ? If yes, check out that we
@@ -1518,9 +1559,9 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align,
goto bail;
}
- base = _ALIGN_DOWN(alloc_top - size, align);
+ base = ALIGN_DOWN(alloc_top - size, align);
for (; base > alloc_bottom;
- base = _ALIGN_DOWN(base - 0x100000, align)) {
+ base = ALIGN_DOWN(base - 0x100000, align)) {
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsigned long)prom_claim(base, size, 0);
if (addr != PROM_ERROR && addr != 0)
@@ -1586,8 +1627,8 @@ static void __init reserve_mem(u64 base, u64 size)
* have our terminator with "size" set to 0 since we are
* dumb and just copy this entire array to the boot params
*/
- base = _ALIGN_DOWN(base, PAGE_SIZE);
- top = _ALIGN_UP(top, PAGE_SIZE);
+ base = ALIGN_DOWN(base, PAGE_SIZE);
+ top = ALIGN(top, PAGE_SIZE);
size = top - base;
if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
@@ -1741,7 +1782,7 @@ static void __init prom_close_stdin(void)
}
#ifdef CONFIG_PPC_SVM
-static int prom_rtas_hcall(uint64_t args)
+static int __init prom_rtas_hcall(uint64_t args)
{
register uint64_t arg1 asm("r3") = H_RTAS;
register uint64_t arg2 asm("r4") = args;
@@ -1749,6 +1790,8 @@ static int prom_rtas_hcall(uint64_t args)
asm volatile("sc 1\n" : "=r" (arg1) :
"r" (arg1),
"r" (arg2) :);
+ srr_regs_clobbered();
+
return arg1;
}
@@ -1773,6 +1816,9 @@ static void __init prom_rtas_os_term(char *str)
if (token == 0)
prom_panic("Could not get token for ibm,os-term\n");
os_term_args.token = cpu_to_be32(token);
+ os_term_args.nargs = cpu_to_be32(1);
+ os_term_args.nret = cpu_to_be32(1);
+ os_term_args.args[0] = cpu_to_be32(__pa(str));
prom_rtas_hcall((uint64_t)&os_term_args);
}
#endif /* CONFIG_PPC_SVM */
@@ -2250,7 +2296,7 @@ static void __init prom_init_stdout(void)
static int __init prom_find_machine_type(void)
{
- char compat[256];
+ static char compat[256] __prombss;
int len, i = 0;
#ifdef CONFIG_PPC64
phandle rtas;
@@ -2403,10 +2449,19 @@ static void __init prom_check_displays(void)
u32 width, height, pitch, addr;
prom_printf("Setting btext !\n");
- prom_getprop(node, "width", &width, 4);
- prom_getprop(node, "height", &height, 4);
- prom_getprop(node, "linebytes", &pitch, 4);
- prom_getprop(node, "address", &addr, 4);
+
+ if (prom_getprop(node, "width", &width, 4) == PROM_ERROR)
+ return;
+
+ if (prom_getprop(node, "height", &height, 4) == PROM_ERROR)
+ return;
+
+ if (prom_getprop(node, "linebytes", &pitch, 4) == PROM_ERROR)
+ return;
+
+ if (prom_getprop(node, "address", &addr, 4) == PROM_ERROR)
+ return;
+
prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
width, height, pitch, addr);
btext_setup_display(width, height, 8, pitch, addr);
@@ -2423,7 +2478,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
{
void *ret;
- *mem_start = _ALIGN(*mem_start, align);
+ *mem_start = ALIGN(*mem_start, align);
while ((*mem_start + needed) > *mem_end) {
unsigned long room, chunk;
@@ -2559,7 +2614,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
*lp++ = *p;
}
*lp = 0;
- *mem_start = _ALIGN((unsigned long)lp + 1, 4);
+ *mem_start = ALIGN((unsigned long)lp + 1, 4);
}
/* get it again for debugging */
@@ -2605,7 +2660,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
/* push property content */
valp = make_room(mem_start, mem_end, l, 4);
call_prom("getprop", 4, 1, node, pname, valp, l);
- *mem_start = _ALIGN(*mem_start, 4);
+ *mem_start = ALIGN(*mem_start, 4);
if (!prom_strcmp(pname, "phandle"))
has_phandle = 1;
@@ -2664,7 +2719,7 @@ static void __init flatten_device_tree(void)
prom_panic ("couldn't get device tree root\n");
/* Build header and make room for mem rsv map */
- mem_start = _ALIGN(mem_start, 4);
+ mem_start = ALIGN(mem_start, 4);
hdr = make_room(&mem_start, &mem_end,
sizeof(struct boot_param_header), 4);
dt_header_start = (unsigned long)hdr;
@@ -2677,7 +2732,7 @@ static void __init flatten_device_tree(void)
/* Add "phandle" in there, we'll need it */
namep = make_room(&mem_start, &mem_end, 16, 1);
- prom_strcpy(namep, "phandle");
+ prom_strscpy_pad(namep, "phandle", sizeof("phandle"));
mem_start = (unsigned long)namep + prom_strlen(namep) + 1;
/* Build string array */
@@ -2932,7 +2987,7 @@ static void __init fixup_device_tree_efika_add_phy(void)
/* Check if the phy-handle property exists - bail if it does */
rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
- if (!rv)
+ if (rv <= 0)
return;
/*
@@ -2958,7 +3013,7 @@ static void __init fixup_device_tree_efika_add_phy(void)
" 0x3 encode-int encode+"
" s\" interrupts\" property"
" finish-device");
- };
+ }
/* Check for a PHY device node - if missing then create one and
* give it's phandle to the ethernet node */
@@ -3185,59 +3240,11 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
#endif /* CONFIG_BLK_DEV_INITRD */
}
-#ifdef CONFIG_PPC64
-#ifdef CONFIG_RELOCATABLE
-static void reloc_toc(void)
-{
-}
-
-static void unreloc_toc(void)
-{
-}
-#else
-static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
-{
- unsigned long i;
- unsigned long *toc_entry;
-
- /* Get the start of the TOC by using r2 directly. */
- asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
-
- for (i = 0; i < nr_entries; i++) {
- *toc_entry = *toc_entry + offset;
- toc_entry++;
- }
-}
-
-static void reloc_toc(void)
-{
- unsigned long offset = reloc_offset();
- unsigned long nr_entries =
- (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
-
- __reloc_toc(offset, nr_entries);
-
- mb();
-}
-
-static void unreloc_toc(void)
-{
- unsigned long offset = reloc_offset();
- unsigned long nr_entries =
- (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
-
- mb();
-
- __reloc_toc(-offset, nr_entries);
-}
-#endif
-#endif
-
#ifdef CONFIG_PPC_SVM
/*
* Perform the Enter Secure Mode ultracall.
*/
-static int enter_secure_mode(unsigned long kbase, unsigned long fdt)
+static int __init enter_secure_mode(unsigned long kbase, unsigned long fdt)
{
register unsigned long r3 asm("r3") = UV_ESM;
register unsigned long r4 asm("r4") = kbase;
@@ -3251,7 +3258,7 @@ static int enter_secure_mode(unsigned long kbase, unsigned long fdt)
/*
* Call the Ultravisor to transfer us to secure memory if we have an ESM blob.
*/
-static void setup_secure_guest(unsigned long kbase, unsigned long fdt)
+static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
{
int ret;
@@ -3266,14 +3273,12 @@ static void setup_secure_guest(unsigned long kbase, unsigned long fdt)
* relocated it so the check will fail. Restore the original image by
* relocating it back to the kernel virtual base address.
*/
- if (IS_ENABLED(CONFIG_RELOCATABLE))
- relocate(KERNELBASE);
+ relocate(KERNELBASE);
ret = enter_secure_mode(kbase, fdt);
/* Relocate the kernel again. */
- if (IS_ENABLED(CONFIG_RELOCATABLE))
- relocate(kbase);
+ relocate(kbase);
if (ret != U_SUCCESS) {
prom_printf("Returned %d from switching to secure mode.\n", ret);
@@ -3281,7 +3286,7 @@ static void setup_secure_guest(unsigned long kbase, unsigned long fdt)
}
}
#else
-static void setup_secure_guest(unsigned long kbase, unsigned long fdt)
+static void __init setup_secure_guest(unsigned long kbase, unsigned long fdt)
{
}
#endif /* CONFIG_PPC_SVM */
@@ -3301,8 +3306,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
#ifdef CONFIG_PPC32
unsigned long offset = reloc_offset();
reloc_got2(offset);
-#else
- reloc_toc();
#endif
/*
@@ -3409,7 +3412,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*
* PowerMacs use a different mechanism to spin CPUs
*
- * (This must be done after instanciating RTAS)
+ * (This must be done after instantiating RTAS)
*/
if (of_platform != PLATFORM_POWERMAC)
prom_hold_cpus();
@@ -3474,14 +3477,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
hdr = dt_header_start;
- /* Don't print anything after quiesce under OPAL, it crashes OFW */
prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
prom_debug("->dt_header_start=0x%lx\n", hdr);
#ifdef CONFIG_PPC32
reloc_got2(-offset);
-#else
- unreloc_toc();
#endif
/* Move to secure memory if we're supposed to be secure guests. */
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index b183ab9c5107..311890d71c4c 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -13,7 +13,7 @@
# If you really need to reference something from prom_init.o add
# it to the list below:
-grep "^CONFIG_KASAN=y$" .config >/dev/null
+grep "^CONFIG_KASAN=y$" ${KCONFIG_CONFIG} >/dev/null
if [ $? -eq 0 ]
then
MEM_FUNCS="__memcpy __memset"
@@ -26,8 +26,7 @@ _end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
__secondary_hold_acknowledge __secondary_hold_spinloop __start
logo_linux_clut224 btext_prepare_BAT
reloc_got2 kernstart_addr memstart_addr linux_banner _stext
-__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC.
-relocate"
+btext_setup_display TOC. relocate"
NM="$1"
OBJ="$2"
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
deleted file mode 100644
index 25c0424e8868..000000000000
--- a/arch/powerpc/kernel/ptrace.c
+++ /dev/null
@@ -1,3468 +0,0 @@
-/*
- * PowerPC version
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Derived from "arch/m68k/kernel/ptrace.c"
- * Copyright (C) 1994 by Hamish Macdonald
- * Taken from linux/kernel/ptrace.c and modified for M680x0.
- * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
- *
- * Modified by Cort Dougan (cort@hq.fsmlabs.com)
- * and Paul Mackerras (paulus@samba.org).
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License. See the file README.legal in the main directory of
- * this archive for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/regset.h>
-#include <linux/tracehook.h>
-#include <linux/elf.h>
-#include <linux/user.h>
-#include <linux/security.h>
-#include <linux/signal.h>
-#include <linux/seccomp.h>
-#include <linux/audit.h>
-#include <trace/syscall.h>
-#include <linux/hw_breakpoint.h>
-#include <linux/perf_event.h>
-#include <linux/context_tracking.h>
-#include <linux/nospec.h>
-
-#include <linux/uaccess.h>
-#include <linux/pkeys.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/switch_to.h>
-#include <asm/tm.h>
-#include <asm/asm-prototypes.h>
-#include <asm/debug.h>
-#include <asm/hw_breakpoint.h>
-
-#define CREATE_TRACE_POINTS
-#include <trace/events/syscalls.h>
-
-/*
- * The parameter save area on the stack is used to store arguments being passed
- * to callee function and is located at fixed offset from stack pointer.
- */
-#ifdef CONFIG_PPC32
-#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
-#else /* CONFIG_PPC32 */
-#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
-#endif
-
-struct pt_regs_offset {
- const char *name;
- int offset;
-};
-
-#define STR(s) #s /* convert to string */
-#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
-#define GPR_OFFSET_NAME(num) \
- {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
- {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
-#define REG_OFFSET_END {.name = NULL, .offset = 0}
-
-#define TVSO(f) (offsetof(struct thread_vr_state, f))
-#define TFSO(f) (offsetof(struct thread_fp_state, f))
-#define TSO(f) (offsetof(struct thread_struct, f))
-
-static const struct pt_regs_offset regoffset_table[] = {
- GPR_OFFSET_NAME(0),
- GPR_OFFSET_NAME(1),
- GPR_OFFSET_NAME(2),
- GPR_OFFSET_NAME(3),
- GPR_OFFSET_NAME(4),
- GPR_OFFSET_NAME(5),
- GPR_OFFSET_NAME(6),
- GPR_OFFSET_NAME(7),
- GPR_OFFSET_NAME(8),
- GPR_OFFSET_NAME(9),
- GPR_OFFSET_NAME(10),
- GPR_OFFSET_NAME(11),
- GPR_OFFSET_NAME(12),
- GPR_OFFSET_NAME(13),
- GPR_OFFSET_NAME(14),
- GPR_OFFSET_NAME(15),
- GPR_OFFSET_NAME(16),
- GPR_OFFSET_NAME(17),
- GPR_OFFSET_NAME(18),
- GPR_OFFSET_NAME(19),
- GPR_OFFSET_NAME(20),
- GPR_OFFSET_NAME(21),
- GPR_OFFSET_NAME(22),
- GPR_OFFSET_NAME(23),
- GPR_OFFSET_NAME(24),
- GPR_OFFSET_NAME(25),
- GPR_OFFSET_NAME(26),
- GPR_OFFSET_NAME(27),
- GPR_OFFSET_NAME(28),
- GPR_OFFSET_NAME(29),
- GPR_OFFSET_NAME(30),
- GPR_OFFSET_NAME(31),
- REG_OFFSET_NAME(nip),
- REG_OFFSET_NAME(msr),
- REG_OFFSET_NAME(ctr),
- REG_OFFSET_NAME(link),
- REG_OFFSET_NAME(xer),
- REG_OFFSET_NAME(ccr),
-#ifdef CONFIG_PPC64
- REG_OFFSET_NAME(softe),
-#else
- REG_OFFSET_NAME(mq),
-#endif
- REG_OFFSET_NAME(trap),
- REG_OFFSET_NAME(dar),
- REG_OFFSET_NAME(dsisr),
- REG_OFFSET_END,
-};
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-static void flush_tmregs_to_thread(struct task_struct *tsk)
-{
- /*
- * If task is not current, it will have been flushed already to
- * it's thread_struct during __switch_to().
- *
- * A reclaim flushes ALL the state or if not in TM save TM SPRs
- * in the appropriate thread structures from live.
- */
-
- if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
- return;
-
- if (MSR_TM_SUSPENDED(mfmsr())) {
- tm_reclaim_current(TM_CAUSE_SIGNAL);
- } else {
- tm_enable();
- tm_save_sprs(&(tsk->thread));
- }
-}
-#else
-static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
-#endif
-
-/**
- * regs_query_register_offset() - query register offset from its name
- * @name: the name of a register
- *
- * regs_query_register_offset() returns the offset of a register in struct
- * pt_regs from its name. If the name is invalid, this returns -EINVAL;
- */
-int regs_query_register_offset(const char *name)
-{
- const struct pt_regs_offset *roff;
- for (roff = regoffset_table; roff->name != NULL; roff++)
- if (!strcmp(roff->name, name))
- return roff->offset;
- return -EINVAL;
-}
-
-/**
- * regs_query_register_name() - query register name from its offset
- * @offset: the offset of a register in struct pt_regs.
- *
- * regs_query_register_name() returns the name of a register from its
- * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
- */
-const char *regs_query_register_name(unsigned int offset)
-{
- const struct pt_regs_offset *roff;
- for (roff = regoffset_table; roff->name != NULL; roff++)
- if (roff->offset == offset)
- return roff->name;
- return NULL;
-}
-
-/*
- * does not yet catch signals sent when the child dies.
- * in exit.c or in signal.c.
- */
-
-/*
- * Set of msr bits that gdb can change on behalf of a process.
- */
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-#define MSR_DEBUGCHANGE 0
-#else
-#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
-#endif
-
-/*
- * Max register writeable via put_reg
- */
-#ifdef CONFIG_PPC32
-#define PT_MAX_PUT_REG PT_MQ
-#else
-#define PT_MAX_PUT_REG PT_CCR
-#endif
-
-static unsigned long get_user_msr(struct task_struct *task)
-{
- return task->thread.regs->msr | task->thread.fpexc_mode;
-}
-
-static int set_user_msr(struct task_struct *task, unsigned long msr)
-{
- task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
- task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
- return 0;
-}
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-static unsigned long get_user_ckpt_msr(struct task_struct *task)
-{
- return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
-}
-
-static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
-{
- task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
- task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
- return 0;
-}
-
-static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
-{
- task->thread.ckpt_regs.trap = trap & 0xfff0;
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PPC64
-static int get_user_dscr(struct task_struct *task, unsigned long *data)
-{
- *data = task->thread.dscr;
- return 0;
-}
-
-static int set_user_dscr(struct task_struct *task, unsigned long dscr)
-{
- task->thread.dscr = dscr;
- task->thread.dscr_inherit = 1;
- return 0;
-}
-#else
-static int get_user_dscr(struct task_struct *task, unsigned long *data)
-{
- return -EIO;
-}
-
-static int set_user_dscr(struct task_struct *task, unsigned long dscr)
-{
- return -EIO;
-}
-#endif
-
-/*
- * We prevent mucking around with the reserved area of trap
- * which are used internally by the kernel.
- */
-static int set_user_trap(struct task_struct *task, unsigned long trap)
-{
- task->thread.regs->trap = trap & 0xfff0;
- return 0;
-}
-
-/*
- * Get contents of register REGNO in task TASK.
- */
-int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
-{
- unsigned int regs_max;
-
- if ((task->thread.regs == NULL) || !data)
- return -EIO;
-
- if (regno == PT_MSR) {
- *data = get_user_msr(task);
- return 0;
- }
-
- if (regno == PT_DSCR)
- return get_user_dscr(task, data);
-
-#ifdef CONFIG_PPC64
- /*
- * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
- * no more used as a flag, lets force usr to alway see the softe value as 1
- * which means interrupts are not soft disabled.
- */
- if (regno == PT_SOFTE) {
- *data = 1;
- return 0;
- }
-#endif
-
- regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
- if (regno < regs_max) {
- regno = array_index_nospec(regno, regs_max);
- *data = ((unsigned long *)task->thread.regs)[regno];
- return 0;
- }
-
- return -EIO;
-}
-
-/*
- * Write contents of register REGNO in task TASK.
- */
-int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
-{
- if (task->thread.regs == NULL)
- return -EIO;
-
- if (regno == PT_MSR)
- return set_user_msr(task, data);
- if (regno == PT_TRAP)
- return set_user_trap(task, data);
- if (regno == PT_DSCR)
- return set_user_dscr(task, data);
-
- if (regno <= PT_MAX_PUT_REG) {
- regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
- ((unsigned long *)task->thread.regs)[regno] = data;
- return 0;
- }
- return -EIO;
-}
-
-static int gpr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int i, ret;
-
- if (target->thread.regs == NULL)
- return -EIO;
-
- if (!FULL_REGS(target->thread.regs)) {
- /* We have a partial register set. Fill 14-31 with bogus values */
- for (i = 14; i < 32; i++)
- target->thread.regs->gpr[i] = NV_REG_POISON;
- }
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- target->thread.regs,
- 0, offsetof(struct pt_regs, msr));
- if (!ret) {
- unsigned long msr = get_user_msr(target);
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
- offsetof(struct pt_regs, msr),
- offsetof(struct pt_regs, msr) +
- sizeof(msr));
- }
-
- BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
- offsetof(struct pt_regs, msr) + sizeof(long));
-
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.regs->orig_gpr3,
- offsetof(struct pt_regs, orig_gpr3),
- sizeof(struct user_pt_regs));
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- sizeof(struct user_pt_regs), -1);
-
- return ret;
-}
-
-static int gpr_set(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- unsigned long reg;
- int ret;
-
- if (target->thread.regs == NULL)
- return -EIO;
-
- CHECK_FULL_REGS(target->thread.regs);
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- target->thread.regs,
- 0, PT_MSR * sizeof(reg));
-
- if (!ret && count > 0) {
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
- PT_MSR * sizeof(reg),
- (PT_MSR + 1) * sizeof(reg));
- if (!ret)
- ret = set_user_msr(target, reg);
- }
-
- BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
- offsetof(struct pt_regs, msr) + sizeof(long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.regs->orig_gpr3,
- PT_ORIG_R3 * sizeof(reg),
- (PT_MAX_PUT_REG + 1) * sizeof(reg));
-
- if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
- ret = user_regset_copyin_ignore(
- &pos, &count, &kbuf, &ubuf,
- (PT_MAX_PUT_REG + 1) * sizeof(reg),
- PT_TRAP * sizeof(reg));
-
- if (!ret && count > 0) {
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
- PT_TRAP * sizeof(reg),
- (PT_TRAP + 1) * sizeof(reg));
- if (!ret)
- ret = set_user_trap(target, reg);
- }
-
- if (!ret)
- ret = user_regset_copyin_ignore(
- &pos, &count, &kbuf, &ubuf,
- (PT_TRAP + 1) * sizeof(reg), -1);
-
- return ret;
-}
-
-/*
- * Regardless of transactions, 'fp_state' holds the current running
- * value of all FPR registers and 'ckfp_state' holds the last checkpointed
- * value of all FPR registers for the current transaction.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * u64 fpr[32];
- * u64 fpscr;
- * };
- */
-static int fpr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
-#ifdef CONFIG_VSX
- u64 buf[33];
- int i;
-
- flush_fp_to_thread(target);
-
- /* copy to local buffer then write that out */
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.TS_FPR(i);
- buf[32] = target->thread.fp_state.fpscr;
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
-#else
- BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
- offsetof(struct thread_fp_state, fpr[32]));
-
- flush_fp_to_thread(target);
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fp_state, 0, -1);
-#endif
-}
-
-/*
- * Regardless of transactions, 'fp_state' holds the current running
- * value of all FPR registers and 'ckfp_state' holds the last checkpointed
- * value of all FPR registers for the current transaction.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * u64 fpr[32];
- * u64 fpscr;
- * };
- *
- */
-static int fpr_set(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
-#ifdef CONFIG_VSX
- u64 buf[33];
- int i;
-
- flush_fp_to_thread(target);
-
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.TS_FPR(i);
- buf[32] = target->thread.fp_state.fpscr;
-
- /* copy to local buffer then write that out */
- i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
- if (i)
- return i;
-
- for (i = 0; i < 32 ; i++)
- target->thread.TS_FPR(i) = buf[i];
- target->thread.fp_state.fpscr = buf[32];
- return 0;
-#else
- BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
- offsetof(struct thread_fp_state, fpr[32]));
-
- flush_fp_to_thread(target);
-
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fp_state, 0, -1);
-#endif
-}
-
-#ifdef CONFIG_ALTIVEC
-/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
- * The transfer totals 34 quadword. Quadwords 0-31 contain the
- * corresponding vector registers. Quadword 32 contains the vscr as the
- * last word (offset 12) within that quadword. Quadword 33 contains the
- * vrsave as the first word (offset 0) within the quadword.
- *
- * This definition of the VMX state is compatible with the current PPC32
- * ptrace interface. This allows signal handling and ptrace to use the
- * same structures. This also simplifies the implementation of a bi-arch
- * (combined (32- and 64-bit) gdb.
- */
-
-static int vr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- flush_altivec_to_thread(target);
- return target->thread.used_vr ? regset->n : 0;
-}
-
-/*
- * Regardless of transactions, 'vr_state' holds the current running
- * value of all the VMX registers and 'ckvr_state' holds the last
- * checkpointed value of all the VMX registers for the current
- * transaction to fall back on in case it aborts.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * vector128 vr[32];
- * vector128 vscr;
- * vector128 vrsave;
- * };
- */
-static int vr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- flush_altivec_to_thread(target);
-
- BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
- offsetof(struct thread_vr_state, vr[32]));
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr_state, 0,
- 33 * sizeof(vector128));
- if (!ret) {
- /*
- * Copy out only the low-order word of vrsave.
- */
- int start, end;
- union {
- elf_vrreg_t reg;
- u32 word;
- } vrsave;
- memset(&vrsave, 0, sizeof(vrsave));
-
- vrsave.word = target->thread.vrsave;
-
- start = 33 * sizeof(vector128);
- end = start + sizeof(vrsave);
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
- start, end);
- }
-
- return ret;
-}
-
-/*
- * Regardless of transactions, 'vr_state' holds the current running
- * value of all the VMX registers and 'ckvr_state' holds the last
- * checkpointed value of all the VMX registers for the current
- * transaction to fall back on in case it aborts.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * vector128 vr[32];
- * vector128 vscr;
- * vector128 vrsave;
- * };
- */
-static int vr_set(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- flush_altivec_to_thread(target);
-
- BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
- offsetof(struct thread_vr_state, vr[32]));
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.vr_state, 0,
- 33 * sizeof(vector128));
- if (!ret && count > 0) {
- /*
- * We use only the first word of vrsave.
- */
- int start, end;
- union {
- elf_vrreg_t reg;
- u32 word;
- } vrsave;
- memset(&vrsave, 0, sizeof(vrsave));
-
- vrsave.word = target->thread.vrsave;
-
- start = 33 * sizeof(vector128);
- end = start + sizeof(vrsave);
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
- start, end);
- if (!ret)
- target->thread.vrsave = vrsave.word;
- }
-
- return ret;
-}
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_VSX
-/*
- * Currently to set and and get all the vsx state, you need to call
- * the fp and VMX calls as well. This only get/sets the lower 32
- * 128bit VSX registers.
- */
-
-static int vsr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- flush_vsx_to_thread(target);
- return target->thread.used_vsr ? regset->n : 0;
-}
-
-/*
- * Regardless of transactions, 'fp_state' holds the current running
- * value of all FPR registers and 'ckfp_state' holds the last
- * checkpointed value of all FPR registers for the current
- * transaction.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * u64 vsx[32];
- * };
- */
-static int vsr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- u64 buf[32];
- int ret, i;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
- flush_vsx_to_thread(target);
-
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- buf, 0, 32 * sizeof(double));
-
- return ret;
-}
-
-/*
- * Regardless of transactions, 'fp_state' holds the current running
- * value of all FPR registers and 'ckfp_state' holds the last
- * checkpointed value of all FPR registers for the current
- * transaction.
- *
- * Userspace interface buffer layout:
- *
- * struct data {
- * u64 vsx[32];
- * };
- */
-static int vsr_set(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- u64 buf[32];
- int ret,i;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
- flush_vsx_to_thread(target);
-
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- buf, 0, 32 * sizeof(double));
- if (!ret)
- for (i = 0; i < 32 ; i++)
- target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
-
- return ret;
-}
-#endif /* CONFIG_VSX */
-
-#ifdef CONFIG_SPE
-
-/*
- * For get_evrregs/set_evrregs functions 'data' has the following layout:
- *
- * struct {
- * u32 evr[32];
- * u64 acc;
- * u32 spefscr;
- * }
- */
-
-static int evr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- flush_spe_to_thread(target);
- return target->thread.used_spe ? regset->n : 0;
-}
-
-static int evr_get(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- flush_spe_to_thread(target);
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.evr,
- 0, sizeof(target->thread.evr));
-
- BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
- offsetof(struct thread_struct, spefscr));
-
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.acc,
- sizeof(target->thread.evr), -1);
-
- return ret;
-}
-
-static int evr_set(struct task_struct *target, const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- flush_spe_to_thread(target);
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.evr,
- 0, sizeof(target->thread.evr));
-
- BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
- offsetof(struct thread_struct, spefscr));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.acc,
- sizeof(target->thread.evr), -1);
-
- return ret;
-}
-#endif /* CONFIG_SPE */
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/**
- * tm_cgpr_active - get active number of registers in CGPR
- * @target: The target task.
- * @regset: The user regset structure.
- *
- * This function checks for the active number of available
- * regisers in transaction checkpointed GPR category.
- */
-static int tm_cgpr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return 0;
-
- return regset->n;
-}
-
-/**
- * tm_cgpr_get - get CGPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy from.
- * @ubuf: User buffer to copy into.
- *
- * This function gets transaction checkpointed GPR registers.
- *
- * When the transaction is active, 'ckpt_regs' holds all the checkpointed
- * GPR register values for the current transaction to fall back on if it
- * aborts in between. This function gets those checkpointed GPR registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct data {
- * struct pt_regs ckpt_regs;
- * };
- */
-static int tm_cgpr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckpt_regs,
- 0, offsetof(struct pt_regs, msr));
- if (!ret) {
- unsigned long msr = get_user_ckpt_msr(target);
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
- offsetof(struct pt_regs, msr),
- offsetof(struct pt_regs, msr) +
- sizeof(msr));
- }
-
- BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
- offsetof(struct pt_regs, msr) + sizeof(long));
-
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckpt_regs.orig_gpr3,
- offsetof(struct pt_regs, orig_gpr3),
- sizeof(struct user_pt_regs));
- if (!ret)
- ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- sizeof(struct user_pt_regs), -1);
-
- return ret;
-}
-
-/*
- * tm_cgpr_set - set the CGPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy into.
- * @ubuf: User buffer to copy from.
- *
- * This function sets in transaction checkpointed GPR registers.
- *
- * When the transaction is active, 'ckpt_regs' holds the checkpointed
- * GPR register values for the current transaction to fall back on if it
- * aborts in between. This function sets those checkpointed GPR registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct data {
- * struct pt_regs ckpt_regs;
- * };
- */
-static int tm_cgpr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- unsigned long reg;
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckpt_regs,
- 0, PT_MSR * sizeof(reg));
-
- if (!ret && count > 0) {
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
- PT_MSR * sizeof(reg),
- (PT_MSR + 1) * sizeof(reg));
- if (!ret)
- ret = set_user_ckpt_msr(target, reg);
- }
-
- BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
- offsetof(struct pt_regs, msr) + sizeof(long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckpt_regs.orig_gpr3,
- PT_ORIG_R3 * sizeof(reg),
- (PT_MAX_PUT_REG + 1) * sizeof(reg));
-
- if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
- ret = user_regset_copyin_ignore(
- &pos, &count, &kbuf, &ubuf,
- (PT_MAX_PUT_REG + 1) * sizeof(reg),
- PT_TRAP * sizeof(reg));
-
- if (!ret && count > 0) {
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
- PT_TRAP * sizeof(reg),
- (PT_TRAP + 1) * sizeof(reg));
- if (!ret)
- ret = set_user_ckpt_trap(target, reg);
- }
-
- if (!ret)
- ret = user_regset_copyin_ignore(
- &pos, &count, &kbuf, &ubuf,
- (PT_TRAP + 1) * sizeof(reg), -1);
-
- return ret;
-}
-
-/**
- * tm_cfpr_active - get active number of registers in CFPR
- * @target: The target task.
- * @regset: The user regset structure.
- *
- * This function checks for the active number of available
- * regisers in transaction checkpointed FPR category.
- */
-static int tm_cfpr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return 0;
-
- return regset->n;
-}
-
-/**
- * tm_cfpr_get - get CFPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy from.
- * @ubuf: User buffer to copy into.
- *
- * This function gets in transaction checkpointed FPR registers.
- *
- * When the transaction is active 'ckfp_state' holds the checkpointed
- * values for the current transaction to fall back on if it aborts
- * in between. This function gets those checkpointed FPR registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct data {
- * u64 fpr[32];
- * u64 fpscr;
- *};
- */
-static int tm_cfpr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- u64 buf[33];
- int i;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- /* copy to local buffer then write that out */
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.TS_CKFPR(i);
- buf[32] = target->thread.ckfp_state.fpscr;
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
-}
-
-/**
- * tm_cfpr_set - set CFPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy into.
- * @ubuf: User buffer to copy from.
- *
- * This function sets in transaction checkpointed FPR registers.
- *
- * When the transaction is active 'ckfp_state' holds the checkpointed
- * FPR register values for the current transaction to fall back on
- * if it aborts in between. This function sets these checkpointed
- * FPR registers. The userspace interface buffer layout is as follows.
- *
- * struct data {
- * u64 fpr[32];
- * u64 fpscr;
- *};
- */
-static int tm_cfpr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- u64 buf[33];
- int i;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- for (i = 0; i < 32; i++)
- buf[i] = target->thread.TS_CKFPR(i);
- buf[32] = target->thread.ckfp_state.fpscr;
-
- /* copy to local buffer then write that out */
- i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
- if (i)
- return i;
- for (i = 0; i < 32 ; i++)
- target->thread.TS_CKFPR(i) = buf[i];
- target->thread.ckfp_state.fpscr = buf[32];
- return 0;
-}
-
-/**
- * tm_cvmx_active - get active number of registers in CVMX
- * @target: The target task.
- * @regset: The user regset structure.
- *
- * This function checks for the active number of available
- * regisers in checkpointed VMX category.
- */
-static int tm_cvmx_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return 0;
-
- return regset->n;
-}
-
-/**
- * tm_cvmx_get - get CMVX registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy from.
- * @ubuf: User buffer to copy into.
- *
- * This function gets in transaction checkpointed VMX registers.
- *
- * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
- * the checkpointed values for the current transaction to fall
- * back on if it aborts in between. The userspace interface buffer
- * layout is as follows.
- *
- * struct data {
- * vector128 vr[32];
- * vector128 vscr;
- * vector128 vrsave;
- *};
- */
-static int tm_cvmx_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- /* Flush the state */
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckvr_state, 0,
- 33 * sizeof(vector128));
- if (!ret) {
- /*
- * Copy out only the low-order word of vrsave.
- */
- union {
- elf_vrreg_t reg;
- u32 word;
- } vrsave;
- memset(&vrsave, 0, sizeof(vrsave));
- vrsave.word = target->thread.ckvrsave;
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
- 33 * sizeof(vector128), -1);
- }
-
- return ret;
-}
-
-/**
- * tm_cvmx_set - set CMVX registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy into.
- * @ubuf: User buffer to copy from.
- *
- * This function sets in transaction checkpointed VMX registers.
- *
- * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
- * the checkpointed values for the current transaction to fall
- * back on if it aborts in between. The userspace interface buffer
- * layout is as follows.
- *
- * struct data {
- * vector128 vr[32];
- * vector128 vscr;
- * vector128 vrsave;
- *};
- */
-static int tm_cvmx_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ckvr_state, 0,
- 33 * sizeof(vector128));
- if (!ret && count > 0) {
- /*
- * We use only the low-order word of vrsave.
- */
- union {
- elf_vrreg_t reg;
- u32 word;
- } vrsave;
- memset(&vrsave, 0, sizeof(vrsave));
- vrsave.word = target->thread.ckvrsave;
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
- 33 * sizeof(vector128), -1);
- if (!ret)
- target->thread.ckvrsave = vrsave.word;
- }
-
- return ret;
-}
-
-/**
- * tm_cvsx_active - get active number of registers in CVSX
- * @target: The target task.
- * @regset: The user regset structure.
- *
- * This function checks for the active number of available
- * regisers in transaction checkpointed VSX category.
- */
-static int tm_cvsx_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return 0;
-
- flush_vsx_to_thread(target);
- return target->thread.used_vsr ? regset->n : 0;
-}
-
-/**
- * tm_cvsx_get - get CVSX registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy from.
- * @ubuf: User buffer to copy into.
- *
- * This function gets in transaction checkpointed VSX registers.
- *
- * When the transaction is active 'ckfp_state' holds the checkpointed
- * values for the current transaction to fall back on if it aborts
- * in between. This function gets those checkpointed VSX registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct data {
- * u64 vsx[32];
- *};
- */
-static int tm_cvsx_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- u64 buf[32];
- int ret, i;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- /* Flush the state */
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
- flush_vsx_to_thread(target);
-
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- buf, 0, 32 * sizeof(double));
-
- return ret;
-}
-
-/**
- * tm_cvsx_set - set CFPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy into.
- * @ubuf: User buffer to copy from.
- *
- * This function sets in transaction checkpointed VSX registers.
- *
- * When the transaction is active 'ckfp_state' holds the checkpointed
- * VSX register values for the current transaction to fall back on
- * if it aborts in between. This function sets these checkpointed
- * FPR registers. The userspace interface buffer layout is as follows.
- *
- * struct data {
- * u64 vsx[32];
- *};
- */
-static int tm_cvsx_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- u64 buf[32];
- int ret, i;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- /* Flush the state */
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
- flush_vsx_to_thread(target);
-
- for (i = 0; i < 32 ; i++)
- buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- buf, 0, 32 * sizeof(double));
- if (!ret)
- for (i = 0; i < 32 ; i++)
- target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
-
- return ret;
-}
-
-/**
- * tm_spr_active - get active number of registers in TM SPR
- * @target: The target task.
- * @regset: The user regset structure.
- *
- * This function checks the active number of available
- * regisers in the transactional memory SPR category.
- */
-static int tm_spr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- return regset->n;
-}
-
-/**
- * tm_spr_get - get the TM related SPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy from.
- * @ubuf: User buffer to copy into.
- *
- * This function gets transactional memory related SPR registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct {
- * u64 tm_tfhar;
- * u64 tm_texasr;
- * u64 tm_tfiar;
- * };
- */
-static int tm_spr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- /* Build tests */
- BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
- BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
- BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- /* Flush the states */
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- /* TFHAR register */
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tfhar, 0, sizeof(u64));
-
- /* TEXASR register */
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_texasr, sizeof(u64),
- 2 * sizeof(u64));
-
- /* TFIAR register */
- if (!ret)
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tfiar,
- 2 * sizeof(u64), 3 * sizeof(u64));
- return ret;
-}
-
-/**
- * tm_spr_set - set the TM related SPR registers
- * @target: The target task.
- * @regset: The user regset structure.
- * @pos: The buffer position.
- * @count: Number of bytes to copy.
- * @kbuf: Kernel buffer to copy into.
- * @ubuf: User buffer to copy from.
- *
- * This function sets transactional memory related SPR registers.
- * The userspace interface buffer layout is as follows.
- *
- * struct {
- * u64 tm_tfhar;
- * u64 tm_texasr;
- * u64 tm_tfiar;
- * };
- */
-static int tm_spr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- /* Build tests */
- BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
- BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
- BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- /* Flush the states */
- flush_tmregs_to_thread(target);
- flush_fp_to_thread(target);
- flush_altivec_to_thread(target);
-
- /* TFHAR register */
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tfhar, 0, sizeof(u64));
-
- /* TEXASR register */
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_texasr, sizeof(u64),
- 2 * sizeof(u64));
-
- /* TFIAR register */
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tfiar,
- 2 * sizeof(u64), 3 * sizeof(u64));
- return ret;
-}
-
-static int tm_tar_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (MSR_TM_ACTIVE(target->thread.regs->msr))
- return regset->n;
-
- return 0;
-}
-
-static int tm_tar_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tar, 0, sizeof(u64));
- return ret;
-}
-
-static int tm_tar_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_tar, 0, sizeof(u64));
- return ret;
-}
-
-static int tm_ppr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (MSR_TM_ACTIVE(target->thread.regs->msr))
- return regset->n;
-
- return 0;
-}
-
-
-static int tm_ppr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_ppr, 0, sizeof(u64));
- return ret;
-}
-
-static int tm_ppr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_ppr, 0, sizeof(u64));
- return ret;
-}
-
-static int tm_dscr_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (MSR_TM_ACTIVE(target->thread.regs->msr))
- return regset->n;
-
- return 0;
-}
-
-static int tm_dscr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_dscr, 0, sizeof(u64));
- return ret;
-}
-
-static int tm_dscr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
-
- if (!cpu_has_feature(CPU_FTR_TM))
- return -ENODEV;
-
- if (!MSR_TM_ACTIVE(target->thread.regs->msr))
- return -ENODATA;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tm_dscr, 0, sizeof(u64));
- return ret;
-}
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
-#ifdef CONFIG_PPC64
-static int ppr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.regs->ppr, 0, sizeof(u64));
-}
-
-static int ppr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.regs->ppr, 0, sizeof(u64));
-}
-
-static int dscr_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.dscr, 0, sizeof(u64));
-}
-static int dscr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.dscr, 0, sizeof(u64));
-}
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
-static int tar_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.tar, 0, sizeof(u64));
-}
-static int tar_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.tar, 0, sizeof(u64));
-}
-
-static int ebb_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- if (target->thread.used_ebb)
- return regset->n;
-
- return 0;
-}
-
-static int ebb_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- /* Build tests */
- BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
- BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- if (!target->thread.used_ebb)
- return -ENODATA;
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
-}
-
-static int ebb_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret = 0;
-
- /* Build tests */
- BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
- BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- if (target->thread.used_ebb)
- return -ENODATA;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ebbrr, 0, sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ebbhr, sizeof(unsigned long),
- 2 * sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.bescr,
- 2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
-
- return ret;
-}
-static int pmu_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- return regset->n;
-}
-
-static int pmu_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- /* Build tests */
- BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
- BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
- BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
- BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.siar, 0,
- 5 * sizeof(unsigned long));
-}
-
-static int pmu_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret = 0;
-
- /* Build tests */
- BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
- BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
- BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
- BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
-
- if (!cpu_has_feature(CPU_FTR_ARCH_207S))
- return -ENODEV;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.siar, 0,
- sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.sdar, sizeof(unsigned long),
- 2 * sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.sier, 2 * sizeof(unsigned long),
- 3 * sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.mmcr2, 3 * sizeof(unsigned long),
- 4 * sizeof(unsigned long));
-
- if (!ret)
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.mmcr0, 4 * sizeof(unsigned long),
- 5 * sizeof(unsigned long));
- return ret;
-}
-#endif
-
-#ifdef CONFIG_PPC_MEM_KEYS
-static int pkey_active(struct task_struct *target,
- const struct user_regset *regset)
-{
- if (!arch_pkeys_enabled())
- return -ENODEV;
-
- return regset->n;
-}
-
-static int pkey_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
- BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
-
- if (!arch_pkeys_enabled())
- return -ENODEV;
-
- return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.amr, 0,
- ELF_NPKEY * sizeof(unsigned long));
-}
-
-static int pkey_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- u64 new_amr;
- int ret;
-
- if (!arch_pkeys_enabled())
- return -ENODEV;
-
- /* Only the AMR can be set from userspace */
- if (pos != 0 || count != sizeof(new_amr))
- return -EINVAL;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &new_amr, 0, sizeof(new_amr));
- if (ret)
- return ret;
-
- /* UAMOR determines which bits of the AMR can be set from userspace. */
- target->thread.amr = (new_amr & target->thread.uamor) |
- (target->thread.amr & ~target->thread.uamor);
-
- return 0;
-}
-#endif /* CONFIG_PPC_MEM_KEYS */
-
-/*
- * These are our native regset flavors.
- */
-enum powerpc_regset {
- REGSET_GPR,
- REGSET_FPR,
-#ifdef CONFIG_ALTIVEC
- REGSET_VMX,
-#endif
-#ifdef CONFIG_VSX
- REGSET_VSX,
-#endif
-#ifdef CONFIG_SPE
- REGSET_SPE,
-#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- REGSET_TM_CGPR, /* TM checkpointed GPR registers */
- REGSET_TM_CFPR, /* TM checkpointed FPR registers */
- REGSET_TM_CVMX, /* TM checkpointed VMX registers */
- REGSET_TM_CVSX, /* TM checkpointed VSX registers */
- REGSET_TM_SPR, /* TM specific SPR registers */
- REGSET_TM_CTAR, /* TM checkpointed TAR register */
- REGSET_TM_CPPR, /* TM checkpointed PPR register */
- REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
-#endif
-#ifdef CONFIG_PPC64
- REGSET_PPR, /* PPR register */
- REGSET_DSCR, /* DSCR register */
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- REGSET_TAR, /* TAR register */
- REGSET_EBB, /* EBB registers */
- REGSET_PMR, /* Performance Monitor Registers */
-#endif
-#ifdef CONFIG_PPC_MEM_KEYS
- REGSET_PKEY, /* AMR register */
-#endif
-};
-
-static const struct user_regset native_regsets[] = {
- [REGSET_GPR] = {
- .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
- .size = sizeof(long), .align = sizeof(long),
- .get = gpr_get, .set = gpr_set
- },
- [REGSET_FPR] = {
- .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
- .size = sizeof(double), .align = sizeof(double),
- .get = fpr_get, .set = fpr_set
- },
-#ifdef CONFIG_ALTIVEC
- [REGSET_VMX] = {
- .core_note_type = NT_PPC_VMX, .n = 34,
- .size = sizeof(vector128), .align = sizeof(vector128),
- .active = vr_active, .get = vr_get, .set = vr_set
- },
-#endif
-#ifdef CONFIG_VSX
- [REGSET_VSX] = {
- .core_note_type = NT_PPC_VSX, .n = 32,
- .size = sizeof(double), .align = sizeof(double),
- .active = vsr_active, .get = vsr_get, .set = vsr_set
- },
-#endif
-#ifdef CONFIG_SPE
- [REGSET_SPE] = {
- .core_note_type = NT_PPC_SPE, .n = 35,
- .size = sizeof(u32), .align = sizeof(u32),
- .active = evr_active, .get = evr_get, .set = evr_set
- },
-#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- [REGSET_TM_CGPR] = {
- .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
- .size = sizeof(long), .align = sizeof(long),
- .active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
- },
- [REGSET_TM_CFPR] = {
- .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
- .size = sizeof(double), .align = sizeof(double),
- .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
- },
- [REGSET_TM_CVMX] = {
- .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
- .size = sizeof(vector128), .align = sizeof(vector128),
- .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
- },
- [REGSET_TM_CVSX] = {
- .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
- .size = sizeof(double), .align = sizeof(double),
- .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
- },
- [REGSET_TM_SPR] = {
- .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
- },
- [REGSET_TM_CTAR] = {
- .core_note_type = NT_PPC_TM_CTAR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
- },
- [REGSET_TM_CPPR] = {
- .core_note_type = NT_PPC_TM_CPPR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
- },
- [REGSET_TM_CDSCR] = {
- .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
- },
-#endif
-#ifdef CONFIG_PPC64
- [REGSET_PPR] = {
- .core_note_type = NT_PPC_PPR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = ppr_get, .set = ppr_set
- },
- [REGSET_DSCR] = {
- .core_note_type = NT_PPC_DSCR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = dscr_get, .set = dscr_set
- },
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- [REGSET_TAR] = {
- .core_note_type = NT_PPC_TAR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = tar_get, .set = tar_set
- },
- [REGSET_EBB] = {
- .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = ebb_active, .get = ebb_get, .set = ebb_set
- },
- [REGSET_PMR] = {
- .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = pmu_active, .get = pmu_get, .set = pmu_set
- },
-#endif
-#ifdef CONFIG_PPC_MEM_KEYS
- [REGSET_PKEY] = {
- .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = pkey_active, .get = pkey_get, .set = pkey_set
- },
-#endif
-};
-
-static const struct user_regset_view user_ppc_native_view = {
- .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
- .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
-};
-
-#ifdef CONFIG_PPC64
-#include <linux/compat.h>
-
-static int gpr32_get_common(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf,
- unsigned long *regs)
-{
- compat_ulong_t *k = kbuf;
- compat_ulong_t __user *u = ubuf;
- compat_ulong_t reg;
-
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf)
- for (; count > 0 && pos < PT_MSR; --count)
- *k++ = regs[pos++];
- else
- for (; count > 0 && pos < PT_MSR; --count)
- if (__put_user((compat_ulong_t) regs[pos++], u++))
- return -EFAULT;
-
- if (count > 0 && pos == PT_MSR) {
- reg = get_user_msr(target);
- if (kbuf)
- *k++ = reg;
- else if (__put_user(reg, u++))
- return -EFAULT;
- ++pos;
- --count;
- }
-
- if (kbuf)
- for (; count > 0 && pos < PT_REGS_COUNT; --count)
- *k++ = regs[pos++];
- else
- for (; count > 0 && pos < PT_REGS_COUNT; --count)
- if (__put_user((compat_ulong_t) regs[pos++], u++))
- return -EFAULT;
-
- kbuf = k;
- ubuf = u;
- pos *= sizeof(reg);
- count *= sizeof(reg);
- return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- PT_REGS_COUNT * sizeof(reg), -1);
-}
-
-static int gpr32_set_common(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf,
- unsigned long *regs)
-{
- const compat_ulong_t *k = kbuf;
- const compat_ulong_t __user *u = ubuf;
- compat_ulong_t reg;
-
- pos /= sizeof(reg);
- count /= sizeof(reg);
-
- if (kbuf)
- for (; count > 0 && pos < PT_MSR; --count)
- regs[pos++] = *k++;
- else
- for (; count > 0 && pos < PT_MSR; --count) {
- if (__get_user(reg, u++))
- return -EFAULT;
- regs[pos++] = reg;
- }
-
-
- if (count > 0 && pos == PT_MSR) {
- if (kbuf)
- reg = *k++;
- else if (__get_user(reg, u++))
- return -EFAULT;
- set_user_msr(target, reg);
- ++pos;
- --count;
- }
-
- if (kbuf) {
- for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
- regs[pos++] = *k++;
- for (; count > 0 && pos < PT_TRAP; --count, ++pos)
- ++k;
- } else {
- for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
- if (__get_user(reg, u++))
- return -EFAULT;
- regs[pos++] = reg;
- }
- for (; count > 0 && pos < PT_TRAP; --count, ++pos)
- if (__get_user(reg, u++))
- return -EFAULT;
- }
-
- if (count > 0 && pos == PT_TRAP) {
- if (kbuf)
- reg = *k++;
- else if (__get_user(reg, u++))
- return -EFAULT;
- set_user_trap(target, reg);
- ++pos;
- --count;
- }
-
- kbuf = k;
- ubuf = u;
- pos *= sizeof(reg);
- count *= sizeof(reg);
- return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- (PT_TRAP + 1) * sizeof(reg), -1);
-}
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-static int tm_cgpr32_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
- &target->thread.ckpt_regs.gpr[0]);
-}
-
-static int tm_cgpr32_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
- &target->thread.ckpt_regs.gpr[0]);
-}
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
-static int gpr32_get(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- void *kbuf, void __user *ubuf)
-{
- int i;
-
- if (target->thread.regs == NULL)
- return -EIO;
-
- if (!FULL_REGS(target->thread.regs)) {
- /*
- * We have a partial register set.
- * Fill 14-31 with bogus values.
- */
- for (i = 14; i < 32; i++)
- target->thread.regs->gpr[i] = NV_REG_POISON;
- }
- return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
- &target->thread.regs->gpr[0]);
-}
-
-static int gpr32_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- if (target->thread.regs == NULL)
- return -EIO;
-
- CHECK_FULL_REGS(target->thread.regs);
- return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
- &target->thread.regs->gpr[0]);
-}
-
-/*
- * These are the regset flavors matching the CONFIG_PPC32 native set.
- */
-static const struct user_regset compat_regsets[] = {
- [REGSET_GPR] = {
- .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
- .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
- .get = gpr32_get, .set = gpr32_set
- },
- [REGSET_FPR] = {
- .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
- .size = sizeof(double), .align = sizeof(double),
- .get = fpr_get, .set = fpr_set
- },
-#ifdef CONFIG_ALTIVEC
- [REGSET_VMX] = {
- .core_note_type = NT_PPC_VMX, .n = 34,
- .size = sizeof(vector128), .align = sizeof(vector128),
- .active = vr_active, .get = vr_get, .set = vr_set
- },
-#endif
-#ifdef CONFIG_SPE
- [REGSET_SPE] = {
- .core_note_type = NT_PPC_SPE, .n = 35,
- .size = sizeof(u32), .align = sizeof(u32),
- .active = evr_active, .get = evr_get, .set = evr_set
- },
-#endif
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- [REGSET_TM_CGPR] = {
- .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
- .size = sizeof(long), .align = sizeof(long),
- .active = tm_cgpr_active,
- .get = tm_cgpr32_get, .set = tm_cgpr32_set
- },
- [REGSET_TM_CFPR] = {
- .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
- .size = sizeof(double), .align = sizeof(double),
- .active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
- },
- [REGSET_TM_CVMX] = {
- .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
- .size = sizeof(vector128), .align = sizeof(vector128),
- .active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
- },
- [REGSET_TM_CVSX] = {
- .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
- .size = sizeof(double), .align = sizeof(double),
- .active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
- },
- [REGSET_TM_SPR] = {
- .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
- },
- [REGSET_TM_CTAR] = {
- .core_note_type = NT_PPC_TM_CTAR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
- },
- [REGSET_TM_CPPR] = {
- .core_note_type = NT_PPC_TM_CPPR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
- },
- [REGSET_TM_CDSCR] = {
- .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
- },
-#endif
-#ifdef CONFIG_PPC64
- [REGSET_PPR] = {
- .core_note_type = NT_PPC_PPR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = ppr_get, .set = ppr_set
- },
- [REGSET_DSCR] = {
- .core_note_type = NT_PPC_DSCR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = dscr_get, .set = dscr_set
- },
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- [REGSET_TAR] = {
- .core_note_type = NT_PPC_TAR, .n = 1,
- .size = sizeof(u64), .align = sizeof(u64),
- .get = tar_get, .set = tar_set
- },
- [REGSET_EBB] = {
- .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
- .size = sizeof(u64), .align = sizeof(u64),
- .active = ebb_active, .get = ebb_get, .set = ebb_set
- },
-#endif
-};
-
-static const struct user_regset_view user_ppc_compat_view = {
- .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
- .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
-};
-#endif /* CONFIG_PPC64 */
-
-const struct user_regset_view *task_user_regset_view(struct task_struct *task)
-{
-#ifdef CONFIG_PPC64
- if (test_tsk_thread_flag(task, TIF_32BIT))
- return &user_ppc_compat_view;
-#endif
- return &user_ppc_native_view;
-}
-
-
-void user_enable_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
-
- if (regs != NULL) {
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- task->thread.debug.dbcr0 &= ~DBCR0_BT;
- task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
- regs->msr |= MSR_DE;
-#else
- regs->msr &= ~MSR_BE;
- regs->msr |= MSR_SE;
-#endif
- }
- set_tsk_thread_flag(task, TIF_SINGLESTEP);
-}
-
-void user_enable_block_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
-
- if (regs != NULL) {
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- task->thread.debug.dbcr0 &= ~DBCR0_IC;
- task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
- regs->msr |= MSR_DE;
-#else
- regs->msr &= ~MSR_SE;
- regs->msr |= MSR_BE;
-#endif
- }
- set_tsk_thread_flag(task, TIF_SINGLESTEP);
-}
-
-void user_disable_single_step(struct task_struct *task)
-{
- struct pt_regs *regs = task->thread.regs;
-
- if (regs != NULL) {
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- /*
- * The logic to disable single stepping should be as
- * simple as turning off the Instruction Complete flag.
- * And, after doing so, if all debug flags are off, turn
- * off DBCR0(IDM) and MSR(DE) .... Torez
- */
- task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
- /*
- * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
- */
- if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
- task->thread.debug.dbcr1)) {
- /*
- * All debug events were off.....
- */
- task->thread.debug.dbcr0 &= ~DBCR0_IDM;
- regs->msr &= ~MSR_DE;
- }
-#else
- regs->msr &= ~(MSR_SE | MSR_BE);
-#endif
- }
- clear_tsk_thread_flag(task, TIF_SINGLESTEP);
-}
-
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
-void ptrace_triggered(struct perf_event *bp,
- struct perf_sample_data *data, struct pt_regs *regs)
-{
- struct perf_event_attr attr;
-
- /*
- * Disable the breakpoint request here since ptrace has defined a
- * one-shot behaviour for breakpoint exceptions in PPC64.
- * The SIGTRAP signal is generated automatically for us in do_dabr().
- * We don't have to do anything about that here
- */
- attr = bp->attr;
- attr.disabled = true;
- modify_user_hw_breakpoint(bp, &attr);
-}
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-
-static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
- unsigned long data)
-{
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- int ret;
- struct thread_struct *thread = &(task->thread);
- struct perf_event *bp;
- struct perf_event_attr attr;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#ifndef CONFIG_PPC_ADV_DEBUG_REGS
- bool set_bp = true;
- struct arch_hw_breakpoint hw_brk;
-#endif
-
- /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
- * For embedded processors we support one DAC and no IAC's at the
- * moment.
- */
- if (addr > 0)
- return -EINVAL;
-
- /* The bottom 3 bits in dabr are flags */
- if ((data & ~0x7UL) >= TASK_SIZE)
- return -EIO;
-
-#ifndef CONFIG_PPC_ADV_DEBUG_REGS
- /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
- * It was assumed, on previous implementations, that 3 bits were
- * passed together with the data address, fitting the design of the
- * DABR register, as follows:
- *
- * bit 0: Read flag
- * bit 1: Write flag
- * bit 2: Breakpoint translation
- *
- * Thus, we use them here as so.
- */
-
- /* Ensure breakpoint translation bit is set */
- if (data && !(data & HW_BRK_TYPE_TRANSLATE))
- return -EIO;
- hw_brk.address = data & (~HW_BRK_TYPE_DABR);
- hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
- hw_brk.len = DABR_MAX_LEN;
- hw_brk.hw_len = DABR_MAX_LEN;
- set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- bp = thread->ptrace_bps[0];
- if (!set_bp) {
- if (bp) {
- unregister_hw_breakpoint(bp);
- thread->ptrace_bps[0] = NULL;
- }
- return 0;
- }
- if (bp) {
- attr = bp->attr;
- attr.bp_addr = hw_brk.address;
- attr.bp_len = DABR_MAX_LEN;
- arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
-
- /* Enable breakpoint */
- attr.disabled = false;
-
- ret = modify_user_hw_breakpoint(bp, &attr);
- if (ret) {
- return ret;
- }
- thread->ptrace_bps[0] = bp;
- thread->hw_brk = hw_brk;
- return 0;
- }
-
- /* Create a new breakpoint request if one doesn't exist already */
- hw_breakpoint_init(&attr);
- attr.bp_addr = hw_brk.address;
- attr.bp_len = DABR_MAX_LEN;
- arch_bp_generic_fields(hw_brk.type,
- &attr.bp_type);
-
- thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
- ptrace_triggered, NULL, task);
- if (IS_ERR(bp)) {
- thread->ptrace_bps[0] = NULL;
- return PTR_ERR(bp);
- }
-
-#else /* !CONFIG_HAVE_HW_BREAKPOINT */
- if (set_bp && (!ppc_breakpoint_available()))
- return -ENODEV;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
- task->thread.hw_brk = hw_brk;
-#else /* CONFIG_PPC_ADV_DEBUG_REGS */
- /* As described above, it was assumed 3 bits were passed with the data
- * address, but we will assume only the mode bits will be passed
- * as to not cause alignment restrictions for DAC-based processors.
- */
-
- /* DAC's hold the whole address without any mode flags */
- task->thread.debug.dac1 = data & ~0x3UL;
-
- if (task->thread.debug.dac1 == 0) {
- dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
- if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
- task->thread.debug.dbcr1)) {
- task->thread.regs->msr &= ~MSR_DE;
- task->thread.debug.dbcr0 &= ~DBCR0_IDM;
- }
- return 0;
- }
-
- /* Read or Write bits must be set */
-
- if (!(data & 0x3UL))
- return -EINVAL;
-
- /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
- register */
- task->thread.debug.dbcr0 |= DBCR0_IDM;
-
- /* Check for write and read flags and set DBCR0
- accordingly */
- dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
- if (data & 0x1UL)
- dbcr_dac(task) |= DBCR_DAC1R;
- if (data & 0x2UL)
- dbcr_dac(task) |= DBCR_DAC1W;
- task->thread.regs->msr |= MSR_DE;
-#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
- return 0;
-}
-
-/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure single step bits etc are not set.
- */
-void ptrace_disable(struct task_struct *child)
-{
- /* make sure the single step bit is not set. */
- user_disable_single_step(child);
-}
-
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-static long set_instruction_bp(struct task_struct *child,
- struct ppc_hw_breakpoint *bp_info)
-{
- int slot;
- int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
- int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
- int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
- int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
-
- if (dbcr_iac_range(child) & DBCR_IAC12MODE)
- slot2_in_use = 1;
- if (dbcr_iac_range(child) & DBCR_IAC34MODE)
- slot4_in_use = 1;
-
- if (bp_info->addr >= TASK_SIZE)
- return -EIO;
-
- if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
-
- /* Make sure range is valid. */
- if (bp_info->addr2 >= TASK_SIZE)
- return -EIO;
-
- /* We need a pair of IAC regsisters */
- if ((!slot1_in_use) && (!slot2_in_use)) {
- slot = 1;
- child->thread.debug.iac1 = bp_info->addr;
- child->thread.debug.iac2 = bp_info->addr2;
- child->thread.debug.dbcr0 |= DBCR0_IAC1;
- if (bp_info->addr_mode ==
- PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
- dbcr_iac_range(child) |= DBCR_IAC12X;
- else
- dbcr_iac_range(child) |= DBCR_IAC12I;
-#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- } else if ((!slot3_in_use) && (!slot4_in_use)) {
- slot = 3;
- child->thread.debug.iac3 = bp_info->addr;
- child->thread.debug.iac4 = bp_info->addr2;
- child->thread.debug.dbcr0 |= DBCR0_IAC3;
- if (bp_info->addr_mode ==
- PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
- dbcr_iac_range(child) |= DBCR_IAC34X;
- else
- dbcr_iac_range(child) |= DBCR_IAC34I;
-#endif
- } else
- return -ENOSPC;
- } else {
- /* We only need one. If possible leave a pair free in
- * case a range is needed later
- */
- if (!slot1_in_use) {
- /*
- * Don't use iac1 if iac1-iac2 are free and either
- * iac3 or iac4 (but not both) are free
- */
- if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
- slot = 1;
- child->thread.debug.iac1 = bp_info->addr;
- child->thread.debug.dbcr0 |= DBCR0_IAC1;
- goto out;
- }
- }
- if (!slot2_in_use) {
- slot = 2;
- child->thread.debug.iac2 = bp_info->addr;
- child->thread.debug.dbcr0 |= DBCR0_IAC2;
-#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- } else if (!slot3_in_use) {
- slot = 3;
- child->thread.debug.iac3 = bp_info->addr;
- child->thread.debug.dbcr0 |= DBCR0_IAC3;
- } else if (!slot4_in_use) {
- slot = 4;
- child->thread.debug.iac4 = bp_info->addr;
- child->thread.debug.dbcr0 |= DBCR0_IAC4;
-#endif
- } else
- return -ENOSPC;
- }
-out:
- child->thread.debug.dbcr0 |= DBCR0_IDM;
- child->thread.regs->msr |= MSR_DE;
-
- return slot;
-}
-
-static int del_instruction_bp(struct task_struct *child, int slot)
-{
- switch (slot) {
- case 1:
- if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
- return -ENOENT;
-
- if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
- /* address range - clear slots 1 & 2 */
- child->thread.debug.iac2 = 0;
- dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
- }
- child->thread.debug.iac1 = 0;
- child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
- break;
- case 2:
- if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
- return -ENOENT;
-
- if (dbcr_iac_range(child) & DBCR_IAC12MODE)
- /* used in a range */
- return -EINVAL;
- child->thread.debug.iac2 = 0;
- child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
- break;
-#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- case 3:
- if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
- return -ENOENT;
-
- if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
- /* address range - clear slots 3 & 4 */
- child->thread.debug.iac4 = 0;
- dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
- }
- child->thread.debug.iac3 = 0;
- child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
- break;
- case 4:
- if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
- return -ENOENT;
-
- if (dbcr_iac_range(child) & DBCR_IAC34MODE)
- /* Used in a range */
- return -EINVAL;
- child->thread.debug.iac4 = 0;
- child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
- break;
-#endif
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
-{
- int byte_enable =
- (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
- & 0xf;
- int condition_mode =
- bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
- int slot;
-
- if (byte_enable && (condition_mode == 0))
- return -EINVAL;
-
- if (bp_info->addr >= TASK_SIZE)
- return -EIO;
-
- if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
- slot = 1;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
- dbcr_dac(child) |= DBCR_DAC1R;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
- dbcr_dac(child) |= DBCR_DAC1W;
- child->thread.debug.dac1 = (unsigned long)bp_info->addr;
-#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- if (byte_enable) {
- child->thread.debug.dvc1 =
- (unsigned long)bp_info->condition_value;
- child->thread.debug.dbcr2 |=
- ((byte_enable << DBCR2_DVC1BE_SHIFT) |
- (condition_mode << DBCR2_DVC1M_SHIFT));
- }
-#endif
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
- /* Both dac1 and dac2 are part of a range */
- return -ENOSPC;
-#endif
- } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
- slot = 2;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
- dbcr_dac(child) |= DBCR_DAC2R;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
- dbcr_dac(child) |= DBCR_DAC2W;
- child->thread.debug.dac2 = (unsigned long)bp_info->addr;
-#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- if (byte_enable) {
- child->thread.debug.dvc2 =
- (unsigned long)bp_info->condition_value;
- child->thread.debug.dbcr2 |=
- ((byte_enable << DBCR2_DVC2BE_SHIFT) |
- (condition_mode << DBCR2_DVC2M_SHIFT));
- }
-#endif
- } else
- return -ENOSPC;
- child->thread.debug.dbcr0 |= DBCR0_IDM;
- child->thread.regs->msr |= MSR_DE;
-
- return slot + 4;
-}
-
-static int del_dac(struct task_struct *child, int slot)
-{
- if (slot == 1) {
- if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
- return -ENOENT;
-
- child->thread.debug.dac1 = 0;
- dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
- child->thread.debug.dac2 = 0;
- child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
- }
- child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
-#endif
-#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- child->thread.debug.dvc1 = 0;
-#endif
- } else if (slot == 2) {
- if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
- return -ENOENT;
-
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
- /* Part of a range */
- return -EINVAL;
- child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
-#endif
-#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- child->thread.debug.dvc2 = 0;
-#endif
- child->thread.debug.dac2 = 0;
- dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
- } else
- return -EINVAL;
-
- return 0;
-}
-#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
-static int set_dac_range(struct task_struct *child,
- struct ppc_hw_breakpoint *bp_info)
-{
- int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
-
- /* We don't allow range watchpoints to be used with DVC */
- if (bp_info->condition_mode)
- return -EINVAL;
-
- /*
- * Best effort to verify the address range. The user/supervisor bits
- * prevent trapping in kernel space, but let's fail on an obvious bad
- * range. The simple test on the mask is not fool-proof, and any
- * exclusive range will spill over into kernel space.
- */
- if (bp_info->addr >= TASK_SIZE)
- return -EIO;
- if (mode == PPC_BREAKPOINT_MODE_MASK) {
- /*
- * dac2 is a bitmask. Don't allow a mask that makes a
- * kernel space address from a valid dac1 value
- */
- if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
- return -EIO;
- } else {
- /*
- * For range breakpoints, addr2 must also be a valid address
- */
- if (bp_info->addr2 >= TASK_SIZE)
- return -EIO;
- }
-
- if (child->thread.debug.dbcr0 &
- (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
- return -ENOSPC;
-
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
- child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
- child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
- child->thread.debug.dac1 = bp_info->addr;
- child->thread.debug.dac2 = bp_info->addr2;
- if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
- child->thread.debug.dbcr2 |= DBCR2_DAC12M;
- else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
- child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
- else /* PPC_BREAKPOINT_MODE_MASK */
- child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
- child->thread.regs->msr |= MSR_DE;
-
- return 5;
-}
-#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
-
-static long ppc_set_hwdebug(struct task_struct *child,
- struct ppc_hw_breakpoint *bp_info)
-{
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- int len = 0;
- struct thread_struct *thread = &(child->thread);
- struct perf_event *bp;
- struct perf_event_attr attr;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#ifndef CONFIG_PPC_ADV_DEBUG_REGS
- struct arch_hw_breakpoint brk;
-#endif
-
- if (bp_info->version != 1)
- return -ENOTSUPP;
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- /*
- * Check for invalid flags and combinations
- */
- if ((bp_info->trigger_type == 0) ||
- (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
- PPC_BREAKPOINT_TRIGGER_RW)) ||
- (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
- (bp_info->condition_mode &
- ~(PPC_BREAKPOINT_CONDITION_MODE |
- PPC_BREAKPOINT_CONDITION_BE_ALL)))
- return -EINVAL;
-#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
- if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
- return -EINVAL;
-#endif
-
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
- if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
- (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
- return -EINVAL;
- return set_instruction_bp(child, bp_info);
- }
- if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
- return set_dac(child, bp_info);
-
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- return set_dac_range(child, bp_info);
-#else
- return -EINVAL;
-#endif
-#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
- /*
- * We only support one data breakpoint
- */
- if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
- (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
- bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
- return -EINVAL;
-
- if ((unsigned long)bp_info->addr >= TASK_SIZE)
- return -EIO;
-
- brk.address = bp_info->addr & ~HW_BREAKPOINT_ALIGN;
- brk.type = HW_BRK_TYPE_TRANSLATE;
- brk.len = DABR_MAX_LEN;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
- brk.type |= HW_BRK_TYPE_READ;
- if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
- brk.type |= HW_BRK_TYPE_WRITE;
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
- len = bp_info->addr2 - bp_info->addr;
- else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
- len = 1;
- else
- return -EINVAL;
- bp = thread->ptrace_bps[0];
- if (bp)
- return -ENOSPC;
-
- /* Create a new breakpoint request if one doesn't exist already */
- hw_breakpoint_init(&attr);
- attr.bp_addr = (unsigned long)bp_info->addr;
- attr.bp_len = len;
- arch_bp_generic_fields(brk.type, &attr.bp_type);
-
- thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
- ptrace_triggered, NULL, child);
- if (IS_ERR(bp)) {
- thread->ptrace_bps[0] = NULL;
- return PTR_ERR(bp);
- }
-
- return 1;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-
- if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
- return -EINVAL;
-
- if (child->thread.hw_brk.address)
- return -ENOSPC;
-
- if (!ppc_breakpoint_available())
- return -ENODEV;
-
- child->thread.hw_brk = brk;
-
- return 1;
-#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
-}
-
-static long ppc_del_hwdebug(struct task_struct *child, long data)
-{
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- int ret = 0;
- struct thread_struct *thread = &(child->thread);
- struct perf_event *bp;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- int rc;
-
- if (data <= 4)
- rc = del_instruction_bp(child, (int)data);
- else
- rc = del_dac(child, (int)data - 4);
-
- if (!rc) {
- if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
- child->thread.debug.dbcr1)) {
- child->thread.debug.dbcr0 &= ~DBCR0_IDM;
- child->thread.regs->msr &= ~MSR_DE;
- }
- }
- return rc;
-#else
- if (data != 1)
- return -EINVAL;
-
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- bp = thread->ptrace_bps[0];
- if (bp) {
- unregister_hw_breakpoint(bp);
- thread->ptrace_bps[0] = NULL;
- } else
- ret = -ENOENT;
- return ret;
-#else /* CONFIG_HAVE_HW_BREAKPOINT */
- if (child->thread.hw_brk.address == 0)
- return -ENOENT;
-
- child->thread.hw_brk.address = 0;
- child->thread.hw_brk.type = 0;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-
- return 0;
-#endif
-}
-
-long arch_ptrace(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- int ret = -EPERM;
- void __user *datavp = (void __user *) data;
- unsigned long __user *datalp = datavp;
-
- switch (request) {
- /* read the word at location addr in the USER area. */
- case PTRACE_PEEKUSR: {
- unsigned long index, tmp;
-
- ret = -EIO;
- /* convert to index and check */
-#ifdef CONFIG_PPC32
- index = addr >> 2;
- if ((addr & 3) || (index > PT_FPSCR)
- || (child->thread.regs == NULL))
-#else
- index = addr >> 3;
- if ((addr & 7) || (index > PT_FPSCR))
-#endif
- break;
-
- CHECK_FULL_REGS(child->thread.regs);
- if (index < PT_FPR0) {
- ret = ptrace_get_reg(child, (int) index, &tmp);
- if (ret)
- break;
- } else {
- unsigned int fpidx = index - PT_FPR0;
-
- flush_fp_to_thread(child);
- if (fpidx < (PT_FPSCR - PT_FPR0))
- memcpy(&tmp, &child->thread.TS_FPR(fpidx),
- sizeof(long));
- else
- tmp = child->thread.fp_state.fpscr;
- }
- ret = put_user(tmp, datalp);
- break;
- }
-
- /* write the word at location addr in the USER area */
- case PTRACE_POKEUSR: {
- unsigned long index;
-
- ret = -EIO;
- /* convert to index and check */
-#ifdef CONFIG_PPC32
- index = addr >> 2;
- if ((addr & 3) || (index > PT_FPSCR)
- || (child->thread.regs == NULL))
-#else
- index = addr >> 3;
- if ((addr & 7) || (index > PT_FPSCR))
-#endif
- break;
-
- CHECK_FULL_REGS(child->thread.regs);
- if (index < PT_FPR0) {
- ret = ptrace_put_reg(child, index, data);
- } else {
- unsigned int fpidx = index - PT_FPR0;
-
- flush_fp_to_thread(child);
- if (fpidx < (PT_FPSCR - PT_FPR0))
- memcpy(&child->thread.TS_FPR(fpidx), &data,
- sizeof(long));
- else
- child->thread.fp_state.fpscr = data;
- ret = 0;
- }
- break;
- }
-
- case PPC_PTRACE_GETHWDBGINFO: {
- struct ppc_debug_info dbginfo;
-
- dbginfo.version = 1;
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
- dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
- dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
- dbginfo.data_bp_alignment = 4;
- dbginfo.sizeof_condition = 4;
- dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
- PPC_DEBUG_FEATURE_INSN_BP_MASK;
-#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
- dbginfo.features |=
- PPC_DEBUG_FEATURE_DATA_BP_RANGE |
- PPC_DEBUG_FEATURE_DATA_BP_MASK;
-#endif
-#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
- dbginfo.num_instruction_bps = 0;
- if (ppc_breakpoint_available())
- dbginfo.num_data_bps = 1;
- else
- dbginfo.num_data_bps = 0;
- dbginfo.num_condition_regs = 0;
-#ifdef CONFIG_PPC64
- dbginfo.data_bp_alignment = 8;
-#else
- dbginfo.data_bp_alignment = 4;
-#endif
- dbginfo.sizeof_condition = 0;
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
- if (dawr_enabled())
- dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
-#else
- dbginfo.features = 0;
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
-#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-
- if (copy_to_user(datavp, &dbginfo,
- sizeof(struct ppc_debug_info)))
- return -EFAULT;
- return 0;
- }
-
- case PPC_PTRACE_SETHWDEBUG: {
- struct ppc_hw_breakpoint bp_info;
-
- if (copy_from_user(&bp_info, datavp,
- sizeof(struct ppc_hw_breakpoint)))
- return -EFAULT;
- return ppc_set_hwdebug(child, &bp_info);
- }
-
- case PPC_PTRACE_DELHWDEBUG: {
- ret = ppc_del_hwdebug(child, data);
- break;
- }
-
- case PTRACE_GET_DEBUGREG: {
-#ifndef CONFIG_PPC_ADV_DEBUG_REGS
- unsigned long dabr_fake;
-#endif
- ret = -EINVAL;
- /* We only support one DABR and no IABRS at the moment */
- if (addr > 0)
- break;
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- ret = put_user(child->thread.debug.dac1, datalp);
-#else
- dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
- (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
- ret = put_user(dabr_fake, datalp);
-#endif
- break;
- }
-
- case PTRACE_SET_DEBUGREG:
- ret = ptrace_set_debugreg(child, addr, data);
- break;
-
-#ifdef CONFIG_PPC64
- case PTRACE_GETREGS64:
-#endif
- case PTRACE_GETREGS: /* Get all pt_regs from the child. */
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_GPR,
- 0, sizeof(struct user_pt_regs),
- datavp);
-
-#ifdef CONFIG_PPC64
- case PTRACE_SETREGS64:
-#endif
- case PTRACE_SETREGS: /* Set all gp regs in the child. */
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_GPR,
- 0, sizeof(struct user_pt_regs),
- datavp);
-
- case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_FPR,
- 0, sizeof(elf_fpregset_t),
- datavp);
-
- case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_FPR,
- 0, sizeof(elf_fpregset_t),
- datavp);
-
-#ifdef CONFIG_ALTIVEC
- case PTRACE_GETVRREGS:
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_VMX,
- 0, (33 * sizeof(vector128) +
- sizeof(u32)),
- datavp);
-
- case PTRACE_SETVRREGS:
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_VMX,
- 0, (33 * sizeof(vector128) +
- sizeof(u32)),
- datavp);
-#endif
-#ifdef CONFIG_VSX
- case PTRACE_GETVSRREGS:
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_VSX,
- 0, 32 * sizeof(double),
- datavp);
-
- case PTRACE_SETVSRREGS:
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_VSX,
- 0, 32 * sizeof(double),
- datavp);
-#endif
-#ifdef CONFIG_SPE
- case PTRACE_GETEVRREGS:
- /* Get the child spe register state. */
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_SPE, 0, 35 * sizeof(u32),
- datavp);
-
- case PTRACE_SETEVRREGS:
- /* Set the child spe register state. */
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_SPE, 0, 35 * sizeof(u32),
- datavp);
-#endif
-
- default:
- ret = ptrace_request(child, request, addr, data);
- break;
- }
- return ret;
-}
-
-#ifdef CONFIG_SECCOMP
-static int do_seccomp(struct pt_regs *regs)
-{
- if (!test_thread_flag(TIF_SECCOMP))
- return 0;
-
- /*
- * The ABI we present to seccomp tracers is that r3 contains
- * the syscall return value and orig_gpr3 contains the first
- * syscall parameter. This is different to the ptrace ABI where
- * both r3 and orig_gpr3 contain the first syscall parameter.
- */
- regs->gpr[3] = -ENOSYS;
-
- /*
- * We use the __ version here because we have already checked
- * TIF_SECCOMP. If this fails, there is nothing left to do, we
- * have already loaded -ENOSYS into r3, or seccomp has put
- * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
- */
- if (__secure_computing(NULL))
- return -1;
-
- /*
- * The syscall was allowed by seccomp, restore the register
- * state to what audit expects.
- * Note that we use orig_gpr3, which means a seccomp tracer can
- * modify the first syscall parameter (in orig_gpr3) and also
- * allow the syscall to proceed.
- */
- regs->gpr[3] = regs->orig_gpr3;
-
- return 0;
-}
-#else
-static inline int do_seccomp(struct pt_regs *regs) { return 0; }
-#endif /* CONFIG_SECCOMP */
-
-/**
- * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
- * @regs: the pt_regs of the task to trace (current)
- *
- * Performs various types of tracing on syscall entry. This includes seccomp,
- * ptrace, syscall tracepoints and audit.
- *
- * The pt_regs are potentially visible to userspace via ptrace, so their
- * contents is ABI.
- *
- * One or more of the tracers may modify the contents of pt_regs, in particular
- * to modify arguments or even the syscall number itself.
- *
- * It's also possible that a tracer can choose to reject the system call. In
- * that case this function will return an illegal syscall number, and will put
- * an appropriate return value in regs->r3.
- *
- * Return: the (possibly changed) syscall number.
- */
-long do_syscall_trace_enter(struct pt_regs *regs)
-{
- u32 flags;
-
- user_exit();
-
- flags = READ_ONCE(current_thread_info()->flags) &
- (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
-
- if (flags) {
- int rc = tracehook_report_syscall_entry(regs);
-
- if (unlikely(flags & _TIF_SYSCALL_EMU)) {
- /*
- * A nonzero return code from
- * tracehook_report_syscall_entry() tells us to prevent
- * the syscall execution, but we are not going to
- * execute it anyway.
- *
- * Returning -1 will skip the syscall execution. We want
- * to avoid clobbering any registers, so we don't goto
- * the skip label below.
- */
- return -1;
- }
-
- if (rc) {
- /*
- * The tracer decided to abort the syscall. Note that
- * the tracer may also just change regs->gpr[0] to an
- * invalid syscall number, that is handled below on the
- * exit path.
- */
- goto skip;
- }
- }
-
- /* Run seccomp after ptrace; allow it to set gpr[3]. */
- if (do_seccomp(regs))
- return -1;
-
- /* Avoid trace and audit when syscall is invalid. */
- if (regs->gpr[0] >= NR_syscalls)
- goto skip;
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_enter(regs, regs->gpr[0]);
-
-#ifdef CONFIG_PPC64
- if (!is_32bit_task())
- audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
- regs->gpr[5], regs->gpr[6]);
- else
-#endif
- audit_syscall_entry(regs->gpr[0],
- regs->gpr[3] & 0xffffffff,
- regs->gpr[4] & 0xffffffff,
- regs->gpr[5] & 0xffffffff,
- regs->gpr[6] & 0xffffffff);
-
- /* Return the possibly modified but valid syscall number */
- return regs->gpr[0];
-
-skip:
- /*
- * If we are aborting explicitly, or if the syscall number is
- * now invalid, set the return value to -ENOSYS.
- */
- regs->gpr[3] = -ENOSYS;
- return -1;
-}
-
-void do_syscall_trace_leave(struct pt_regs *regs)
-{
- int step;
-
- audit_syscall_exit(regs);
-
- if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->result);
-
- step = test_thread_flag(TIF_SINGLESTEP);
- if (step || test_thread_flag(TIF_SYSCALL_TRACE))
- tracehook_report_syscall_exit(regs, step);
-
- user_enter();
-}
-
-void __init pt_regs_check(void);
-
-/*
- * Dummy function, its purpose is to break the build if struct pt_regs and
- * struct user_pt_regs don't match.
- */
-void __init pt_regs_check(void)
-{
- BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
- offsetof(struct user_pt_regs, gpr));
- BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
- offsetof(struct user_pt_regs, nip));
- BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
- offsetof(struct user_pt_regs, msr));
- BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
- offsetof(struct user_pt_regs, msr));
- BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
- offsetof(struct user_pt_regs, orig_gpr3));
- BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
- offsetof(struct user_pt_regs, ctr));
- BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
- offsetof(struct user_pt_regs, link));
- BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
- offsetof(struct user_pt_regs, xer));
- BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
- offsetof(struct user_pt_regs, ccr));
-#ifdef __powerpc64__
- BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
- offsetof(struct user_pt_regs, softe));
-#else
- BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
- offsetof(struct user_pt_regs, mq));
-#endif
- BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
- offsetof(struct user_pt_regs, trap));
- BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
- offsetof(struct user_pt_regs, dar));
- BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
- offsetof(struct user_pt_regs, dsisr));
- BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
- offsetof(struct user_pt_regs, result));
-
- BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
-
- // Now check that the pt_regs offsets match the uapi #defines
- #define CHECK_REG(_pt, _reg) \
- BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
- sizeof(unsigned long)));
-
- CHECK_REG(PT_R0, gpr[0]);
- CHECK_REG(PT_R1, gpr[1]);
- CHECK_REG(PT_R2, gpr[2]);
- CHECK_REG(PT_R3, gpr[3]);
- CHECK_REG(PT_R4, gpr[4]);
- CHECK_REG(PT_R5, gpr[5]);
- CHECK_REG(PT_R6, gpr[6]);
- CHECK_REG(PT_R7, gpr[7]);
- CHECK_REG(PT_R8, gpr[8]);
- CHECK_REG(PT_R9, gpr[9]);
- CHECK_REG(PT_R10, gpr[10]);
- CHECK_REG(PT_R11, gpr[11]);
- CHECK_REG(PT_R12, gpr[12]);
- CHECK_REG(PT_R13, gpr[13]);
- CHECK_REG(PT_R14, gpr[14]);
- CHECK_REG(PT_R15, gpr[15]);
- CHECK_REG(PT_R16, gpr[16]);
- CHECK_REG(PT_R17, gpr[17]);
- CHECK_REG(PT_R18, gpr[18]);
- CHECK_REG(PT_R19, gpr[19]);
- CHECK_REG(PT_R20, gpr[20]);
- CHECK_REG(PT_R21, gpr[21]);
- CHECK_REG(PT_R22, gpr[22]);
- CHECK_REG(PT_R23, gpr[23]);
- CHECK_REG(PT_R24, gpr[24]);
- CHECK_REG(PT_R25, gpr[25]);
- CHECK_REG(PT_R26, gpr[26]);
- CHECK_REG(PT_R27, gpr[27]);
- CHECK_REG(PT_R28, gpr[28]);
- CHECK_REG(PT_R29, gpr[29]);
- CHECK_REG(PT_R30, gpr[30]);
- CHECK_REG(PT_R31, gpr[31]);
- CHECK_REG(PT_NIP, nip);
- CHECK_REG(PT_MSR, msr);
- CHECK_REG(PT_ORIG_R3, orig_gpr3);
- CHECK_REG(PT_CTR, ctr);
- CHECK_REG(PT_LNK, link);
- CHECK_REG(PT_XER, xer);
- CHECK_REG(PT_CCR, ccr);
-#ifdef CONFIG_PPC64
- CHECK_REG(PT_SOFTE, softe);
-#else
- CHECK_REG(PT_MQ, mq);
-#endif
- CHECK_REG(PT_TRAP, trap);
- CHECK_REG(PT_DAR, dar);
- CHECK_REG(PT_DSISR, dsisr);
- CHECK_REG(PT_RESULT, result);
- #undef CHECK_REG
-
- BUILD_BUG_ON(PT_REGS_COUNT != sizeof(struct user_pt_regs) / sizeof(unsigned long));
-
- /*
- * PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
- * real registers.
- */
- BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
-}
diff --git a/arch/powerpc/kernel/ptrace/Makefile b/arch/powerpc/kernel/ptrace/Makefile
new file mode 100644
index 000000000000..77abd1a5a508
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux kernel.
+#
+
+CFLAGS_ptrace-view.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
+obj-y += ptrace.o ptrace-view.o
+obj-y += ptrace-fpu.o
+obj-$(CONFIG_COMPAT) += ptrace32.o
+obj-$(CONFIG_VSX) += ptrace-vsx.o
+ifneq ($(CONFIG_VSX),y)
+obj-y += ptrace-novsx.o
+endif
+obj-$(CONFIG_ALTIVEC) += ptrace-altivec.o
+obj-$(CONFIG_SPE) += ptrace-spe.o
+obj-$(CONFIG_PPC_TRANSACTIONAL_MEM) += ptrace-tm.o
+obj-$(CONFIG_PPC_ADV_DEBUG_REGS) += ptrace-adv.o
+ifneq ($(CONFIG_PPC_ADV_DEBUG_REGS),y)
+obj-y += ptrace-noadv.o
+endif
diff --git a/arch/powerpc/kernel/ptrace/ptrace-adv.c b/arch/powerpc/kernel/ptrace/ptrace-adv.c
new file mode 100644
index 000000000000..399f5d94a3df
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-adv.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+#include <linux/hw_breakpoint.h>
+
+#include "ptrace-decl.h"
+
+void user_enable_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL) {
+ task->thread.debug.dbcr0 &= ~DBCR0_BT;
+ task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
+ }
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void user_enable_block_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL) {
+ task->thread.debug.dbcr0 &= ~DBCR0_IC;
+ task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
+ }
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL) {
+ /*
+ * The logic to disable single stepping should be as
+ * simple as turning off the Instruction Complete flag.
+ * And, after doing so, if all debug flags are off, turn
+ * off DBCR0(IDM) and MSR(DE) .... Torez
+ */
+ task->thread.debug.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
+ /*
+ * Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
+ */
+ if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
+ task->thread.debug.dbcr1)) {
+ /*
+ * All debug events were off.....
+ */
+ task->thread.debug.dbcr0 &= ~DBCR0_IDM;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
+ }
+ }
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void ppc_gethwdinfo(struct ppc_debug_info *dbginfo)
+{
+ dbginfo->version = 1;
+ dbginfo->num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
+ dbginfo->num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
+ dbginfo->num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
+ dbginfo->data_bp_alignment = 4;
+ dbginfo->sizeof_condition = 4;
+ dbginfo->features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
+ PPC_DEBUG_FEATURE_INSN_BP_MASK;
+ if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_DAC_RANGE))
+ dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_RANGE |
+ PPC_DEBUG_FEATURE_DATA_BP_MASK;
+}
+
+int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
+ unsigned long __user *datalp)
+{
+ /* We only support one DABR and no IABRS at the moment */
+ if (addr > 0)
+ return -EINVAL;
+ return put_user(child->thread.debug.dac1, datalp);
+}
+
+int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
+{
+ struct pt_regs *regs = task->thread.regs;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int ret;
+ struct thread_struct *thread = &task->thread;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+ /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
+ * For embedded processors we support one DAC and no IAC's at the
+ * moment.
+ */
+ if (addr > 0)
+ return -EINVAL;
+
+ /* The bottom 3 bits in dabr are flags */
+ if ((data & ~0x7UL) >= TASK_SIZE)
+ return -EIO;
+
+ /* As described above, it was assumed 3 bits were passed with the data
+ * address, but we will assume only the mode bits will be passed
+ * as to not cause alignment restrictions for DAC-based processors.
+ */
+
+ /* DAC's hold the whole address without any mode flags */
+ task->thread.debug.dac1 = data & ~0x3UL;
+
+ if (task->thread.debug.dac1 == 0) {
+ dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
+ if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
+ task->thread.debug.dbcr1)) {
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
+ task->thread.debug.dbcr0 &= ~DBCR0_IDM;
+ }
+ return 0;
+ }
+
+ /* Read or Write bits must be set */
+
+ if (!(data & 0x3UL))
+ return -EINVAL;
+
+ /* Set the Internal Debugging flag (IDM bit 1) for the DBCR0 register */
+ task->thread.debug.dbcr0 |= DBCR0_IDM;
+
+ /* Check for write and read flags and set DBCR0 accordingly */
+ dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
+ if (data & 0x1UL)
+ dbcr_dac(task) |= DBCR_DAC1R;
+ if (data & 0x2UL)
+ dbcr_dac(task) |= DBCR_DAC1W;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
+ return 0;
+}
+
+static long set_instruction_bp(struct task_struct *child,
+ struct ppc_hw_breakpoint *bp_info)
+{
+ int slot;
+ int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
+ int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
+ int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
+ int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
+
+ if (dbcr_iac_range(child) & DBCR_IAC12MODE)
+ slot2_in_use = 1;
+ if (dbcr_iac_range(child) & DBCR_IAC34MODE)
+ slot4_in_use = 1;
+
+ if (bp_info->addr >= TASK_SIZE)
+ return -EIO;
+
+ if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
+ /* Make sure range is valid. */
+ if (bp_info->addr2 >= TASK_SIZE)
+ return -EIO;
+
+ /* We need a pair of IAC regsisters */
+ if (!slot1_in_use && !slot2_in_use) {
+ slot = 1;
+ child->thread.debug.iac1 = bp_info->addr;
+ child->thread.debug.iac2 = bp_info->addr2;
+ child->thread.debug.dbcr0 |= DBCR0_IAC1;
+ if (bp_info->addr_mode ==
+ PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
+ dbcr_iac_range(child) |= DBCR_IAC12X;
+ else
+ dbcr_iac_range(child) |= DBCR_IAC12I;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+ } else if ((!slot3_in_use) && (!slot4_in_use)) {
+ slot = 3;
+ child->thread.debug.iac3 = bp_info->addr;
+ child->thread.debug.iac4 = bp_info->addr2;
+ child->thread.debug.dbcr0 |= DBCR0_IAC3;
+ if (bp_info->addr_mode ==
+ PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
+ dbcr_iac_range(child) |= DBCR_IAC34X;
+ else
+ dbcr_iac_range(child) |= DBCR_IAC34I;
+#endif
+ } else {
+ return -ENOSPC;
+ }
+ } else {
+ /* We only need one. If possible leave a pair free in
+ * case a range is needed later
+ */
+ if (!slot1_in_use) {
+ /*
+ * Don't use iac1 if iac1-iac2 are free and either
+ * iac3 or iac4 (but not both) are free
+ */
+ if (slot2_in_use || slot3_in_use == slot4_in_use) {
+ slot = 1;
+ child->thread.debug.iac1 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC1;
+ goto out;
+ }
+ }
+ if (!slot2_in_use) {
+ slot = 2;
+ child->thread.debug.iac2 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC2;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+ } else if (!slot3_in_use) {
+ slot = 3;
+ child->thread.debug.iac3 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC3;
+ } else if (!slot4_in_use) {
+ slot = 4;
+ child->thread.debug.iac4 = bp_info->addr;
+ child->thread.debug.dbcr0 |= DBCR0_IAC4;
+#endif
+ } else {
+ return -ENOSPC;
+ }
+ }
+out:
+ child->thread.debug.dbcr0 |= DBCR0_IDM;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
+
+ return slot;
+}
+
+static int del_instruction_bp(struct task_struct *child, int slot)
+{
+ switch (slot) {
+ case 1:
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
+ return -ENOENT;
+
+ if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
+ /* address range - clear slots 1 & 2 */
+ child->thread.debug.iac2 = 0;
+ dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
+ }
+ child->thread.debug.iac1 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
+ break;
+ case 2:
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
+ return -ENOENT;
+
+ if (dbcr_iac_range(child) & DBCR_IAC12MODE)
+ /* used in a range */
+ return -EINVAL;
+ child->thread.debug.iac2 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
+ break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+ case 3:
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
+ return -ENOENT;
+
+ if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
+ /* address range - clear slots 3 & 4 */
+ child->thread.debug.iac4 = 0;
+ dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
+ }
+ child->thread.debug.iac3 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
+ break;
+ case 4:
+ if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
+ return -ENOENT;
+
+ if (dbcr_iac_range(child) & DBCR_IAC34MODE)
+ /* Used in a range */
+ return -EINVAL;
+ child->thread.debug.iac4 = 0;
+ child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
+{
+ int byte_enable =
+ (bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
+ & 0xf;
+ int condition_mode =
+ bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
+ int slot;
+
+ if (byte_enable && condition_mode == 0)
+ return -EINVAL;
+
+ if (bp_info->addr >= TASK_SIZE)
+ return -EIO;
+
+ if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
+ slot = 1;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+ dbcr_dac(child) |= DBCR_DAC1R;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+ dbcr_dac(child) |= DBCR_DAC1W;
+ child->thread.debug.dac1 = (unsigned long)bp_info->addr;
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+ if (byte_enable) {
+ child->thread.debug.dvc1 =
+ (unsigned long)bp_info->condition_value;
+ child->thread.debug.dbcr2 |=
+ ((byte_enable << DBCR2_DVC1BE_SHIFT) |
+ (condition_mode << DBCR2_DVC1M_SHIFT));
+ }
+#endif
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+ } else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
+ /* Both dac1 and dac2 are part of a range */
+ return -ENOSPC;
+#endif
+ } else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
+ slot = 2;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+ dbcr_dac(child) |= DBCR_DAC2R;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+ dbcr_dac(child) |= DBCR_DAC2W;
+ child->thread.debug.dac2 = (unsigned long)bp_info->addr;
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+ if (byte_enable) {
+ child->thread.debug.dvc2 =
+ (unsigned long)bp_info->condition_value;
+ child->thread.debug.dbcr2 |=
+ ((byte_enable << DBCR2_DVC2BE_SHIFT) |
+ (condition_mode << DBCR2_DVC2M_SHIFT));
+ }
+#endif
+ } else {
+ return -ENOSPC;
+ }
+ child->thread.debug.dbcr0 |= DBCR0_IDM;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
+
+ return slot + 4;
+}
+
+static int del_dac(struct task_struct *child, int slot)
+{
+ if (slot == 1) {
+ if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
+ return -ENOENT;
+
+ child->thread.debug.dac1 = 0;
+ dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+ if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
+ child->thread.debug.dac2 = 0;
+ child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
+ }
+ child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
+#endif
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+ child->thread.debug.dvc1 = 0;
+#endif
+ } else if (slot == 2) {
+ if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
+ return -ENOENT;
+
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+ if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
+ /* Part of a range */
+ return -EINVAL;
+ child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
+#endif
+#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
+ child->thread.debug.dvc2 = 0;
+#endif
+ child->thread.debug.dac2 = 0;
+ dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+static int set_dac_range(struct task_struct *child,
+ struct ppc_hw_breakpoint *bp_info)
+{
+ int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
+
+ /* We don't allow range watchpoints to be used with DVC */
+ if (bp_info->condition_mode)
+ return -EINVAL;
+
+ /*
+ * Best effort to verify the address range. The user/supervisor bits
+ * prevent trapping in kernel space, but let's fail on an obvious bad
+ * range. The simple test on the mask is not fool-proof, and any
+ * exclusive range will spill over into kernel space.
+ */
+ if (bp_info->addr >= TASK_SIZE)
+ return -EIO;
+ if (mode == PPC_BREAKPOINT_MODE_MASK) {
+ /*
+ * dac2 is a bitmask. Don't allow a mask that makes a
+ * kernel space address from a valid dac1 value
+ */
+ if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
+ return -EIO;
+ } else {
+ /*
+ * For range breakpoints, addr2 must also be a valid address
+ */
+ if (bp_info->addr2 >= TASK_SIZE)
+ return -EIO;
+ }
+
+ if (child->thread.debug.dbcr0 &
+ (DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
+ return -ENOSPC;
+
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+ child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+ child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
+ child->thread.debug.dac1 = bp_info->addr;
+ child->thread.debug.dac2 = bp_info->addr2;
+ if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
+ child->thread.debug.dbcr2 |= DBCR2_DAC12M;
+ else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
+ child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
+ else /* PPC_BREAKPOINT_MODE_MASK */
+ child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
+ regs_set_return_msr(child->thread.regs, child->thread.regs->msr | MSR_DE);
+
+ return 5;
+}
+#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
+
+long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
+{
+ if (bp_info->version != 1)
+ return -ENOTSUPP;
+ /*
+ * Check for invalid flags and combinations
+ */
+ if (bp_info->trigger_type == 0 ||
+ (bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
+ PPC_BREAKPOINT_TRIGGER_RW)) ||
+ (bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
+ (bp_info->condition_mode &
+ ~(PPC_BREAKPOINT_CONDITION_MODE |
+ PPC_BREAKPOINT_CONDITION_BE_ALL)))
+ return -EINVAL;
+#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
+ if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
+ return -EINVAL;
+#endif
+
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
+ if (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE ||
+ bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
+ return -EINVAL;
+ return set_instruction_bp(child, bp_info);
+ }
+ if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
+ return set_dac(child, bp_info);
+
+#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
+ return set_dac_range(child, bp_info);
+#else
+ return -EINVAL;
+#endif
+}
+
+long ppc_del_hwdebug(struct task_struct *child, long data)
+{
+ int rc;
+
+ if (data <= 4)
+ rc = del_instruction_bp(child, (int)data);
+ else
+ rc = del_dac(child, (int)data - 4);
+
+ if (!rc) {
+ if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
+ child->thread.debug.dbcr1)) {
+ child->thread.debug.dbcr0 &= ~DBCR0_IDM;
+ regs_set_return_msr(child->thread.regs,
+ child->thread.regs->msr & ~MSR_DE);
+ }
+ }
+ return rc;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-altivec.c b/arch/powerpc/kernel/ptrace/ptrace-altivec.c
new file mode 100644
index 000000000000..0d9bc4bd4972
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-altivec.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+#include <linux/elf.h>
+
+#include <asm/switch_to.h>
+
+#include "ptrace-decl.h"
+
+/*
+ * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
+ * The transfer totals 34 quadword. Quadwords 0-31 contain the
+ * corresponding vector registers. Quadword 32 contains the vscr as the
+ * last word (offset 12) within that quadword. Quadword 33 contains the
+ * vrsave as the first word (offset 0) within the quadword.
+ *
+ * This definition of the VMX state is compatible with the current PPC32
+ * ptrace interface. This allows signal handling and ptrace to use the
+ * same structures. This also simplifies the implementation of a bi-arch
+ * (combined (32- and 64-bit) gdb.
+ */
+
+int vr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ flush_altivec_to_thread(target);
+ return target->thread.used_vr ? regset->n : 0;
+}
+
+/*
+ * Regardless of transactions, 'vr_state' holds the current running
+ * value of all the VMX registers and 'ckvr_state' holds the last
+ * checkpointed value of all the VMX registers for the current
+ * transaction to fall back on in case it aborts.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ * };
+ */
+int vr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+
+ flush_altivec_to_thread(target);
+
+ BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+ offsetof(struct thread_vr_state, vr[32]));
+
+ membuf_write(&to, &target->thread.vr_state, 33 * sizeof(vector128));
+ /*
+ * Copy out only the low-order word of vrsave.
+ */
+ memset(&vrsave, 0, sizeof(vrsave));
+ vrsave.word = target->thread.vrsave;
+ return membuf_write(&to, &vrsave, sizeof(vrsave));
+}
+
+/*
+ * Regardless of transactions, 'vr_state' holds the current running
+ * value of all the VMX registers and 'ckvr_state' holds the last
+ * checkpointed value of all the VMX registers for the current
+ * transaction to fall back on in case it aborts.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ * };
+ */
+int vr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ flush_altivec_to_thread(target);
+
+ BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
+ offsetof(struct thread_vr_state, vr[32]));
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.vr_state, 0,
+ 33 * sizeof(vector128));
+ if (!ret && count > 0) {
+ /*
+ * We use only the first word of vrsave.
+ */
+ int start, end;
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ memset(&vrsave, 0, sizeof(vrsave));
+
+ vrsave.word = target->thread.vrsave;
+
+ start = 33 * sizeof(vector128);
+ end = start + sizeof(vrsave);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
+ start, end);
+ if (!ret)
+ target->thread.vrsave = vrsave.word;
+ }
+
+ return ret;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-decl.h b/arch/powerpc/kernel/ptrace/ptrace-decl.h
new file mode 100644
index 000000000000..eafe5f0f6289
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-decl.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/*
+ * Set of msr bits that gdb can change on behalf of a process.
+ */
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+#define MSR_DEBUGCHANGE 0
+#else
+#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
+#endif
+
+/*
+ * Max register writeable via put_reg
+ */
+#ifdef CONFIG_PPC32
+#define PT_MAX_PUT_REG PT_MQ
+#else
+#define PT_MAX_PUT_REG PT_CCR
+#endif
+
+#define TVSO(f) (offsetof(struct thread_vr_state, f))
+#define TFSO(f) (offsetof(struct thread_fp_state, f))
+#define TSO(f) (offsetof(struct thread_struct, f))
+
+/*
+ * These are our native regset flavors.
+ */
+enum powerpc_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+#ifdef CONFIG_ALTIVEC
+ REGSET_VMX,
+#endif
+#ifdef CONFIG_VSX
+ REGSET_VSX,
+#endif
+#ifdef CONFIG_SPE
+ REGSET_SPE,
+#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ REGSET_TM_CGPR, /* TM checkpointed GPR registers */
+ REGSET_TM_CFPR, /* TM checkpointed FPR registers */
+ REGSET_TM_CVMX, /* TM checkpointed VMX registers */
+ REGSET_TM_CVSX, /* TM checkpointed VSX registers */
+ REGSET_TM_SPR, /* TM specific SPR registers */
+ REGSET_TM_CTAR, /* TM checkpointed TAR register */
+ REGSET_TM_CPPR, /* TM checkpointed PPR register */
+ REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
+#endif
+#ifdef CONFIG_PPC64
+ REGSET_PPR, /* PPR register */
+ REGSET_DSCR, /* DSCR register */
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ REGSET_TAR, /* TAR register */
+ REGSET_EBB, /* EBB registers */
+ REGSET_PMR, /* Performance Monitor Registers */
+#endif
+#ifdef CONFIG_PPC_MEM_KEYS
+ REGSET_PKEY, /* AMR register */
+#endif
+};
+
+/* ptrace-(no)vsx */
+
+user_regset_get2_fn fpr_get;
+int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/* ptrace-vsx */
+
+int vsr_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn vsr_get;
+int vsr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/* ptrace-altivec */
+
+int vr_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn vr_get;
+int vr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/* ptrace-spe */
+
+int evr_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn evr_get;
+int evr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/* ptrace */
+
+int gpr32_get_common(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to,
+ unsigned long *regs);
+int gpr32_set_common(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf,
+ unsigned long *regs);
+
+/* ptrace-tm */
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void flush_tmregs_to_thread(struct task_struct *tsk);
+#else
+static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
+#endif
+
+int tm_cgpr_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn tm_cgpr_get;
+int tm_cgpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_cfpr_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn tm_cfpr_get;
+int tm_cfpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_cvmx_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn tm_cvmx_get;
+int tm_cvmx_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_cvsx_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn tm_cvsx_get;
+int tm_cvsx_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_spr_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn tm_spr_get;
+int tm_spr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_tar_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn tm_tar_get;
+int tm_tar_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_ppr_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn tm_ppr_get;
+int tm_ppr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+int tm_dscr_active(struct task_struct *target, const struct user_regset *regset);
+user_regset_get2_fn tm_dscr_get;
+int tm_dscr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+user_regset_get2_fn tm_cgpr32_get;
+int tm_cgpr32_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf);
+
+/* ptrace-view */
+
+int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data);
+int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data);
+
+extern const struct user_regset_view user_ppc_native_view;
+
+/* ptrace-fpu */
+int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data);
+int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data);
+
+/* ptrace-(no)adv */
+void ppc_gethwdinfo(struct ppc_debug_info *dbginfo);
+int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
+ unsigned long __user *datalp);
+int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data);
+long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info);
+long ppc_del_hwdebug(struct task_struct *child, long data);
diff --git a/arch/powerpc/kernel/ptrace/ptrace-fpu.c b/arch/powerpc/kernel/ptrace/ptrace-fpu.c
new file mode 100644
index 000000000000..09c49632bfe5
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-fpu.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+
+#include <asm/switch_to.h>
+
+#include "ptrace-decl.h"
+
+int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data)
+{
+#ifdef CONFIG_PPC_FPU_REGS
+ unsigned int fpidx = index - PT_FPR0;
+#endif
+
+ if (index > PT_FPSCR)
+ return -EIO;
+
+#ifdef CONFIG_PPC_FPU_REGS
+ flush_fp_to_thread(child);
+ if (fpidx < (PT_FPSCR - PT_FPR0)) {
+ if (IS_ENABLED(CONFIG_PPC32))
+ // On 32-bit the index we are passed refers to 32-bit words
+ *data = ((u32 *)child->thread.fp_state.fpr)[fpidx];
+ else
+ memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long));
+ } else
+ *data = child->thread.fp_state.fpscr;
+#else
+ *data = 0;
+#endif
+
+ return 0;
+}
+
+int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data)
+{
+#ifdef CONFIG_PPC_FPU_REGS
+ unsigned int fpidx = index - PT_FPR0;
+#endif
+
+ if (index > PT_FPSCR)
+ return -EIO;
+
+#ifdef CONFIG_PPC_FPU_REGS
+ flush_fp_to_thread(child);
+ if (fpidx < (PT_FPSCR - PT_FPR0)) {
+ if (IS_ENABLED(CONFIG_PPC32))
+ // On 32-bit the index we are passed refers to 32-bit words
+ ((u32 *)child->thread.fp_state.fpr)[fpidx] = data;
+ else
+ memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long));
+ } else
+ child->thread.fp_state.fpscr = data;
+#endif
+
+ return 0;
+}
+
diff --git a/arch/powerpc/kernel/ptrace/ptrace-noadv.c b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
new file mode 100644
index 000000000000..a5dd7d2e2c9e
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-noadv.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+#include <linux/hw_breakpoint.h>
+
+#include <asm/debug.h>
+
+#include "ptrace-decl.h"
+
+void user_enable_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL)
+ regs_set_return_msr(regs, (regs->msr & ~MSR_BE) | MSR_SE);
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void user_enable_block_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL)
+ regs_set_return_msr(regs, (regs->msr & ~MSR_SE) | MSR_BE);
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL)
+ regs_set_return_msr(regs, regs->msr & ~(MSR_SE | MSR_BE));
+
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
+void ppc_gethwdinfo(struct ppc_debug_info *dbginfo)
+{
+ dbginfo->version = 1;
+ dbginfo->num_instruction_bps = 0;
+ if (ppc_breakpoint_available())
+ dbginfo->num_data_bps = nr_wp_slots();
+ else
+ dbginfo->num_data_bps = 0;
+ dbginfo->num_condition_regs = 0;
+ dbginfo->data_bp_alignment = sizeof(long);
+ dbginfo->sizeof_condition = 0;
+ if (IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT)) {
+ dbginfo->features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
+ if (dawr_enabled())
+ dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
+ } else {
+ dbginfo->features = 0;
+ }
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ dbginfo->features |= PPC_DEBUG_FEATURE_DATA_BP_ARCH_31;
+}
+
+int ptrace_get_debugreg(struct task_struct *child, unsigned long addr,
+ unsigned long __user *datalp)
+{
+ unsigned long dabr_fake;
+
+ /* We only support one DABR and no IABRS at the moment */
+ if (addr > 0)
+ return -EINVAL;
+ dabr_fake = ((child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) |
+ (child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR));
+ return put_user(dabr_fake, datalp);
+}
+
+/*
+ * ptrace_set_debugreg() fakes DABR and DABR is only one. So even if
+ * internal hw supports more than one watchpoint, we support only one
+ * watchpoint with this interface.
+ */
+int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, unsigned long data)
+{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int ret;
+ struct thread_struct *thread = &task->thread;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+ bool set_bp = true;
+ struct arch_hw_breakpoint hw_brk;
+
+ /* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
+ * For embedded processors we support one DAC and no IAC's at the
+ * moment.
+ */
+ if (addr > 0)
+ return -EINVAL;
+
+ /* The bottom 3 bits in dabr are flags */
+ if ((data & ~0x7UL) >= TASK_SIZE)
+ return -EIO;
+
+ /* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
+ * It was assumed, on previous implementations, that 3 bits were
+ * passed together with the data address, fitting the design of the
+ * DABR register, as follows:
+ *
+ * bit 0: Read flag
+ * bit 1: Write flag
+ * bit 2: Breakpoint translation
+ *
+ * Thus, we use them here as so.
+ */
+
+ /* Ensure breakpoint translation bit is set */
+ if (data && !(data & HW_BRK_TYPE_TRANSLATE))
+ return -EIO;
+ hw_brk.address = data & (~HW_BRK_TYPE_DABR);
+ hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
+ hw_brk.len = DABR_MAX_LEN;
+ hw_brk.hw_len = DABR_MAX_LEN;
+ set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ bp = thread->ptrace_bps[0];
+ if (!set_bp) {
+ if (bp) {
+ unregister_hw_breakpoint(bp);
+ thread->ptrace_bps[0] = NULL;
+ }
+ return 0;
+ }
+ if (bp) {
+ attr = bp->attr;
+ attr.bp_addr = hw_brk.address;
+ attr.bp_len = DABR_MAX_LEN;
+ arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
+
+ /* Enable breakpoint */
+ attr.disabled = false;
+
+ ret = modify_user_hw_breakpoint(bp, &attr);
+ if (ret)
+ return ret;
+
+ thread->ptrace_bps[0] = bp;
+ thread->hw_brk[0] = hw_brk;
+ return 0;
+ }
+
+ /* Create a new breakpoint request if one doesn't exist already */
+ hw_breakpoint_init(&attr);
+ attr.bp_addr = hw_brk.address;
+ attr.bp_len = DABR_MAX_LEN;
+ arch_bp_generic_fields(hw_brk.type,
+ &attr.bp_type);
+
+ thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
+ ptrace_triggered, NULL, task);
+ if (IS_ERR(bp)) {
+ thread->ptrace_bps[0] = NULL;
+ return PTR_ERR(bp);
+ }
+
+#else /* !CONFIG_HAVE_HW_BREAKPOINT */
+ if (set_bp && (!ppc_breakpoint_available()))
+ return -ENODEV;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+ task->thread.hw_brk[0] = hw_brk;
+ return 0;
+}
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+static int find_empty_ptrace_bp(struct thread_struct *thread)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!thread->ptrace_bps[i])
+ return i;
+ }
+ return -1;
+}
+#endif
+
+static int find_empty_hw_brk(struct thread_struct *thread)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!thread->hw_brk[i].address)
+ return i;
+ }
+ return -1;
+}
+
+long ppc_set_hwdebug(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
+{
+ int i;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int len = 0;
+ struct thread_struct *thread = &child->thread;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+ struct arch_hw_breakpoint brk;
+
+ if (bp_info->version != 1)
+ return -ENOTSUPP;
+ /*
+ * We only support one data breakpoint
+ */
+ if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
+ (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
+ bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
+ return -EINVAL;
+
+ if ((unsigned long)bp_info->addr >= TASK_SIZE)
+ return -EIO;
+
+ brk.address = ALIGN_DOWN(bp_info->addr, HW_BREAKPOINT_SIZE);
+ brk.type = HW_BRK_TYPE_TRANSLATE | HW_BRK_TYPE_PRIV_ALL;
+ brk.len = DABR_MAX_LEN;
+ brk.hw_len = DABR_MAX_LEN;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
+ brk.type |= HW_BRK_TYPE_READ;
+ if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
+ brk.type |= HW_BRK_TYPE_WRITE;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
+ len = bp_info->addr2 - bp_info->addr;
+ else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
+ len = 1;
+ else
+ return -EINVAL;
+
+ i = find_empty_ptrace_bp(thread);
+ if (i < 0)
+ return -ENOSPC;
+
+ /* Create a new breakpoint request if one doesn't exist already */
+ hw_breakpoint_init(&attr);
+ attr.bp_addr = (unsigned long)bp_info->addr;
+ attr.bp_len = len;
+ arch_bp_generic_fields(brk.type, &attr.bp_type);
+
+ bp = register_user_hw_breakpoint(&attr, ptrace_triggered, NULL, child);
+ thread->ptrace_bps[i] = bp;
+ if (IS_ERR(bp)) {
+ thread->ptrace_bps[i] = NULL;
+ return PTR_ERR(bp);
+ }
+
+ return i + 1;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+ if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
+ return -EINVAL;
+
+ i = find_empty_hw_brk(&child->thread);
+ if (i < 0)
+ return -ENOSPC;
+
+ if (!ppc_breakpoint_available())
+ return -ENODEV;
+
+ child->thread.hw_brk[i] = brk;
+
+ return i + 1;
+}
+
+long ppc_del_hwdebug(struct task_struct *child, long data)
+{
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ int ret = 0;
+ struct thread_struct *thread = &child->thread;
+ struct perf_event *bp;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+ if (data < 1 || data > nr_wp_slots())
+ return -EINVAL;
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ bp = thread->ptrace_bps[data - 1];
+ if (bp) {
+ unregister_hw_breakpoint(bp);
+ thread->ptrace_bps[data - 1] = NULL;
+ } else {
+ ret = -ENOENT;
+ }
+ return ret;
+#else /* CONFIG_HAVE_HW_BREAKPOINT */
+ if (!(child->thread.hw_brk[data - 1].flags & HW_BRK_FLAG_DISABLED) &&
+ child->thread.hw_brk[data - 1].address == 0)
+ return -ENOENT;
+
+ child->thread.hw_brk[data - 1].address = 0;
+ child->thread.hw_brk[data - 1].type = 0;
+ child->thread.hw_brk[data - 1].flags = 0;
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-novsx.c b/arch/powerpc/kernel/ptrace/ptrace-novsx.c
new file mode 100644
index 000000000000..7433f3db979a
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-novsx.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+
+#include <asm/switch_to.h>
+
+#include "ptrace-decl.h"
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ */
+int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+#ifdef CONFIG_PPC_FPU_REGS
+ BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+ offsetof(struct thread_fp_state, fpr[32]));
+
+ flush_fp_to_thread(target);
+
+ return membuf_write(&to, &target->thread.fp_state, 33 * sizeof(u64));
+#else
+ return membuf_write(&to, &empty_zero_page, 33 * sizeof(u64));
+#endif
+}
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ *
+ */
+int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+#ifdef CONFIG_PPC_FPU_REGS
+ BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
+ offsetof(struct thread_fp_state, fpr[32]));
+
+ flush_fp_to_thread(target);
+
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fp_state, 0, -1);
+#else
+ return 0;
+#endif
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-spe.c b/arch/powerpc/kernel/ptrace/ptrace-spe.c
new file mode 100644
index 000000000000..47034d069045
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-spe.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+
+#include <asm/switch_to.h>
+
+#include "ptrace-decl.h"
+
+/*
+ * For get_evrregs/set_evrregs functions 'data' has the following layout:
+ *
+ * struct {
+ * u32 evr[32];
+ * u64 acc;
+ * u32 spefscr;
+ * }
+ */
+
+int evr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ flush_spe_to_thread(target);
+ return target->thread.used_spe ? regset->n : 0;
+}
+
+int evr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ flush_spe_to_thread(target);
+
+ membuf_write(&to, &target->thread.evr, sizeof(target->thread.evr));
+
+ BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
+ offsetof(struct thread_struct, spefscr));
+
+ return membuf_write(&to, &target->thread.acc,
+ sizeof(u64) + sizeof(u32));
+}
+
+int evr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ flush_spe_to_thread(target);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.evr,
+ 0, sizeof(target->thread.evr));
+
+ BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
+ offsetof(struct thread_struct, spefscr));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.acc,
+ sizeof(target->thread.evr), -1);
+
+ return ret;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-tm.c b/arch/powerpc/kernel/ptrace/ptrace-tm.c
new file mode 100644
index 000000000000..44045363a903
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-tm.c
@@ -0,0 +1,788 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+
+#include <asm/switch_to.h>
+#include <asm/tm.h>
+#include <asm/asm-prototypes.h>
+
+#include "ptrace-decl.h"
+
+void flush_tmregs_to_thread(struct task_struct *tsk)
+{
+ /*
+ * If task is not current, it will have been flushed already to
+ * it's thread_struct during __switch_to().
+ *
+ * A reclaim flushes ALL the state or if not in TM save TM SPRs
+ * in the appropriate thread structures from live.
+ */
+
+ if (!cpu_has_feature(CPU_FTR_TM) || tsk != current)
+ return;
+
+ if (MSR_TM_SUSPENDED(mfmsr())) {
+ tm_reclaim_current(TM_CAUSE_SIGNAL);
+ } else {
+ tm_enable();
+ tm_save_sprs(&tsk->thread);
+ }
+}
+
+static unsigned long get_user_ckpt_msr(struct task_struct *task)
+{
+ return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
+}
+
+static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
+{
+ task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
+ task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
+ return 0;
+}
+
+static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
+{
+ set_trap(&task->thread.ckpt_regs, trap);
+ return 0;
+}
+
+/**
+ * tm_cgpr_active - get active number of registers in CGPR
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed GPR category.
+ */
+int tm_cgpr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ return regset->n;
+}
+
+/**
+ * tm_cgpr_get - get CGPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @to: Destination of copy.
+ *
+ * This function gets transaction checkpointed GPR registers.
+ *
+ * When the transaction is active, 'ckpt_regs' holds all the checkpointed
+ * GPR register values for the current transaction to fall back on if it
+ * aborts in between. This function gets those checkpointed GPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * struct pt_regs ckpt_regs;
+ * };
+ */
+int tm_cgpr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ struct membuf to_msr = membuf_at(&to, offsetof(struct pt_regs, msr));
+#ifdef CONFIG_PPC64
+ struct membuf to_softe = membuf_at(&to, offsetof(struct pt_regs, softe));
+#endif
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ membuf_write(&to, &target->thread.ckpt_regs, sizeof(struct user_pt_regs));
+
+ membuf_store(&to_msr, get_user_ckpt_msr(target));
+#ifdef CONFIG_PPC64
+ membuf_store(&to_softe, 0x1ul);
+#endif
+ return membuf_zero(&to, ELF_NGREG * sizeof(unsigned long) -
+ sizeof(struct user_pt_regs));
+}
+
+/*
+ * tm_cgpr_set - set the CGPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed GPR registers.
+ *
+ * When the transaction is active, 'ckpt_regs' holds the checkpointed
+ * GPR register values for the current transaction to fall back on if it
+ * aborts in between. This function sets those checkpointed GPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * struct pt_regs ckpt_regs;
+ * };
+ */
+int tm_cgpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned long reg;
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs,
+ 0, PT_MSR * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_MSR * sizeof(reg),
+ (PT_MSR + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_ckpt_msr(target, reg);
+ }
+
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct pt_regs, msr) + sizeof(long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ckpt_regs.orig_gpr3,
+ PT_ORIG_R3 * sizeof(reg),
+ (PT_MAX_PUT_REG + 1) * sizeof(reg));
+
+ if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_MAX_PUT_REG + 1) * sizeof(reg),
+ PT_TRAP * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_TRAP * sizeof(reg),
+ (PT_TRAP + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_ckpt_trap(target, reg);
+ }
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_TRAP + 1) * sizeof(reg), -1);
+
+ return ret;
+}
+
+/**
+ * tm_cfpr_active - get active number of registers in CFPR
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed FPR category.
+ */
+int tm_cfpr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ return regset->n;
+}
+
+/**
+ * tm_cfpr_get - get CFPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @to: Destination of copy.
+ *
+ * This function gets in transaction checkpointed FPR registers.
+ *
+ * When the transaction is active 'ckfp_state' holds the checkpointed
+ * values for the current transaction to fall back on if it aborts
+ * in between. This function gets those checkpointed FPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ *};
+ */
+int tm_cfpr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ u64 buf[33];
+ int i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ /* copy to local buffer then write that out */
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_CKFPR(i);
+ buf[32] = target->thread.ckfp_state.fpscr;
+ return membuf_write(&to, buf, sizeof(buf));
+}
+
+/**
+ * tm_cfpr_set - set CFPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed FPR registers.
+ *
+ * When the transaction is active 'ckfp_state' holds the checkpointed
+ * FPR register values for the current transaction to fall back on
+ * if it aborts in between. This function sets these checkpointed
+ * FPR registers. The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ *};
+ */
+int tm_cfpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u64 buf[33];
+ int i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ for (i = 0; i < 32; i++)
+ buf[i] = target->thread.TS_CKFPR(i);
+ buf[32] = target->thread.ckfp_state.fpscr;
+
+ /* copy to local buffer then write that out */
+ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+ if (i)
+ return i;
+ for (i = 0; i < 32 ; i++)
+ target->thread.TS_CKFPR(i) = buf[i];
+ target->thread.ckfp_state.fpscr = buf[32];
+ return 0;
+}
+
+/**
+ * tm_cvmx_active - get active number of registers in CVMX
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in checkpointed VMX category.
+ */
+int tm_cvmx_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ return regset->n;
+}
+
+/**
+ * tm_cvmx_get - get CMVX registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @to: Destination of copy.
+ *
+ * This function gets in transaction checkpointed VMX registers.
+ *
+ * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
+ * the checkpointed values for the current transaction to fall
+ * back on if it aborts in between. The userspace interface buffer
+ * layout is as follows.
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ *};
+ */
+int tm_cvmx_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ /* Flush the state */
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ membuf_write(&to, &target->thread.ckvr_state, 33 * sizeof(vector128));
+ /*
+ * Copy out only the low-order word of vrsave.
+ */
+ memset(&vrsave, 0, sizeof(vrsave));
+ vrsave.word = target->thread.ckvrsave;
+ return membuf_write(&to, &vrsave, sizeof(vrsave));
+}
+
+/**
+ * tm_cvmx_set - set CMVX registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed VMX registers.
+ *
+ * When the transaction is active 'ckvr_state' and 'ckvrsave' hold
+ * the checkpointed values for the current transaction to fall
+ * back on if it aborts in between. The userspace interface buffer
+ * layout is as follows.
+ *
+ * struct data {
+ * vector128 vr[32];
+ * vector128 vscr;
+ * vector128 vrsave;
+ *};
+ */
+int tm_cvmx_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.ckvr_state,
+ 0, 33 * sizeof(vector128));
+ if (!ret && count > 0) {
+ /*
+ * We use only the low-order word of vrsave.
+ */
+ union {
+ elf_vrreg_t reg;
+ u32 word;
+ } vrsave;
+ memset(&vrsave, 0, sizeof(vrsave));
+ vrsave.word = target->thread.ckvrsave;
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
+ 33 * sizeof(vector128), -1);
+ if (!ret)
+ target->thread.ckvrsave = vrsave.word;
+ }
+
+ return ret;
+}
+
+/**
+ * tm_cvsx_active - get active number of registers in CVSX
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks for the active number of available
+ * regisers in transaction checkpointed VSX category.
+ */
+int tm_cvsx_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return 0;
+
+ flush_vsx_to_thread(target);
+ return target->thread.used_vsr ? regset->n : 0;
+}
+
+/**
+ * tm_cvsx_get - get CVSX registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @to: Destination of copy.
+ *
+ * This function gets in transaction checkpointed VSX registers.
+ *
+ * When the transaction is active 'ckfp_state' holds the checkpointed
+ * values for the current transaction to fall back on if it aborts
+ * in between. This function gets those checkpointed VSX registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 vsx[32];
+ *};
+ */
+int tm_cvsx_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ u64 buf[32];
+ int i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ /* Flush the state */
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_vsx_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+ return membuf_write(&to, buf, 32 * sizeof(double));
+}
+
+/**
+ * tm_cvsx_set - set CFPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets in transaction checkpointed VSX registers.
+ *
+ * When the transaction is active 'ckfp_state' holds the checkpointed
+ * VSX register values for the current transaction to fall back on
+ * if it aborts in between. This function sets these checkpointed
+ * FPR registers. The userspace interface buffer layout is as follows.
+ *
+ * struct data {
+ * u64 vsx[32];
+ *};
+ */
+int tm_cvsx_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u64 buf[32];
+ int ret, i;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ /* Flush the state */
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_vsx_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ buf, 0, 32 * sizeof(double));
+ if (!ret)
+ for (i = 0; i < 32 ; i++)
+ target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+
+ return ret;
+}
+
+/**
+ * tm_spr_active - get active number of registers in TM SPR
+ * @target: The target task.
+ * @regset: The user regset structure.
+ *
+ * This function checks the active number of available
+ * regisers in the transactional memory SPR category.
+ */
+int tm_spr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ return regset->n;
+}
+
+/**
+ * tm_spr_get - get the TM related SPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @to: Destination of copy.
+ *
+ * This function gets transactional memory related SPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct {
+ * u64 tm_tfhar;
+ * u64 tm_texasr;
+ * u64 tm_tfiar;
+ * };
+ */
+int tm_spr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ /* Build tests */
+ BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
+ BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
+ BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ /* Flush the states */
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ /* TFHAR register */
+ membuf_write(&to, &target->thread.tm_tfhar, sizeof(u64));
+ /* TEXASR register */
+ membuf_write(&to, &target->thread.tm_texasr, sizeof(u64));
+ /* TFIAR register */
+ return membuf_write(&to, &target->thread.tm_tfiar, sizeof(u64));
+}
+
+/**
+ * tm_spr_set - set the TM related SPR registers
+ * @target: The target task.
+ * @regset: The user regset structure.
+ * @pos: The buffer position.
+ * @count: Number of bytes to copy.
+ * @kbuf: Kernel buffer to copy into.
+ * @ubuf: User buffer to copy from.
+ *
+ * This function sets transactional memory related SPR registers.
+ * The userspace interface buffer layout is as follows.
+ *
+ * struct {
+ * u64 tm_tfhar;
+ * u64 tm_texasr;
+ * u64 tm_tfiar;
+ * };
+ */
+int tm_spr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
+ BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
+ BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ /* Flush the states */
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+
+ /* TFHAR register */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tfhar, 0, sizeof(u64));
+
+ /* TEXASR register */
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_texasr, sizeof(u64),
+ 2 * sizeof(u64));
+
+ /* TFIAR register */
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tfiar,
+ 2 * sizeof(u64), 3 * sizeof(u64));
+ return ret;
+}
+
+int tm_tar_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ return regset->n;
+
+ return 0;
+}
+
+int tm_tar_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ return membuf_write(&to, &target->thread.tm_tar, sizeof(u64));
+}
+
+int tm_tar_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_tar, 0, sizeof(u64));
+ return ret;
+}
+
+int tm_ppr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ return regset->n;
+
+ return 0;
+}
+
+
+int tm_ppr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ return membuf_write(&to, &target->thread.tm_ppr, sizeof(u64));
+}
+
+int tm_ppr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_ppr, 0, sizeof(u64));
+ return ret;
+}
+
+int tm_dscr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (MSR_TM_ACTIVE(target->thread.regs->msr))
+ return regset->n;
+
+ return 0;
+}
+
+int tm_dscr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ return membuf_write(&to, &target->thread.tm_dscr, sizeof(u64));
+}
+
+int tm_dscr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+
+ if (!cpu_has_feature(CPU_FTR_TM))
+ return -ENODEV;
+
+ if (!MSR_TM_ACTIVE(target->thread.regs->msr))
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tm_dscr, 0, sizeof(u64));
+ return ret;
+}
+
+int tm_cgpr32_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ gpr32_get_common(target, regset, to,
+ &target->thread.ckpt_regs.gpr[0]);
+ return membuf_zero(&to, ELF_NGREG * sizeof(u32));
+}
+
+int tm_cgpr32_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
+ &target->thread.ckpt_regs.gpr[0]);
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-view.c b/arch/powerpc/kernel/ptrace/ptrace-view.c
new file mode 100644
index 000000000000..076d867412c7
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-view.c
@@ -0,0 +1,847 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+#include <linux/elf.h>
+#include <linux/nospec.h>
+#include <linux/pkeys.h>
+
+#include "ptrace-decl.h"
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define STR(s) #s /* convert to string */
+#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
+#define GPR_OFFSET_NAME(num) \
+ {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
+ {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ GPR_OFFSET_NAME(0),
+ GPR_OFFSET_NAME(1),
+ GPR_OFFSET_NAME(2),
+ GPR_OFFSET_NAME(3),
+ GPR_OFFSET_NAME(4),
+ GPR_OFFSET_NAME(5),
+ GPR_OFFSET_NAME(6),
+ GPR_OFFSET_NAME(7),
+ GPR_OFFSET_NAME(8),
+ GPR_OFFSET_NAME(9),
+ GPR_OFFSET_NAME(10),
+ GPR_OFFSET_NAME(11),
+ GPR_OFFSET_NAME(12),
+ GPR_OFFSET_NAME(13),
+ GPR_OFFSET_NAME(14),
+ GPR_OFFSET_NAME(15),
+ GPR_OFFSET_NAME(16),
+ GPR_OFFSET_NAME(17),
+ GPR_OFFSET_NAME(18),
+ GPR_OFFSET_NAME(19),
+ GPR_OFFSET_NAME(20),
+ GPR_OFFSET_NAME(21),
+ GPR_OFFSET_NAME(22),
+ GPR_OFFSET_NAME(23),
+ GPR_OFFSET_NAME(24),
+ GPR_OFFSET_NAME(25),
+ GPR_OFFSET_NAME(26),
+ GPR_OFFSET_NAME(27),
+ GPR_OFFSET_NAME(28),
+ GPR_OFFSET_NAME(29),
+ GPR_OFFSET_NAME(30),
+ GPR_OFFSET_NAME(31),
+ REG_OFFSET_NAME(nip),
+ REG_OFFSET_NAME(msr),
+ REG_OFFSET_NAME(ctr),
+ REG_OFFSET_NAME(link),
+ REG_OFFSET_NAME(xer),
+ REG_OFFSET_NAME(ccr),
+#ifdef CONFIG_PPC64
+ REG_OFFSET_NAME(softe),
+#else
+ REG_OFFSET_NAME(mq),
+#endif
+ REG_OFFSET_NAME(trap),
+ REG_OFFSET_NAME(dar),
+ REG_OFFSET_NAME(dsisr),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+/**
+ * regs_query_register_name() - query register name from its offset
+ * @offset: the offset of a register in struct pt_regs.
+ *
+ * regs_query_register_name() returns the name of a register from its
+ * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
+ */
+const char *regs_query_register_name(unsigned int offset)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (roff->offset == offset)
+ return roff->name;
+ return NULL;
+}
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+static unsigned long get_user_msr(struct task_struct *task)
+{
+ return task->thread.regs->msr | task->thread.fpexc_mode;
+}
+
+static __always_inline int set_user_msr(struct task_struct *task, unsigned long msr)
+{
+ unsigned long newmsr = (task->thread.regs->msr & ~MSR_DEBUGCHANGE) |
+ (msr & MSR_DEBUGCHANGE);
+ regs_set_return_msr(task->thread.regs, newmsr);
+ return 0;
+}
+
+#ifdef CONFIG_PPC64
+static int get_user_dscr(struct task_struct *task, unsigned long *data)
+{
+ *data = task->thread.dscr;
+ return 0;
+}
+
+static int set_user_dscr(struct task_struct *task, unsigned long dscr)
+{
+ task->thread.dscr = dscr;
+ task->thread.dscr_inherit = 1;
+ return 0;
+}
+#else
+static int get_user_dscr(struct task_struct *task, unsigned long *data)
+{
+ return -EIO;
+}
+
+static int set_user_dscr(struct task_struct *task, unsigned long dscr)
+{
+ return -EIO;
+}
+#endif
+
+/*
+ * We prevent mucking around with the reserved area of trap
+ * which are used internally by the kernel.
+ */
+static __always_inline int set_user_trap(struct task_struct *task, unsigned long trap)
+{
+ set_trap(task->thread.regs, trap);
+ return 0;
+}
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
+{
+ unsigned int regs_max;
+
+ if (task->thread.regs == NULL || !data)
+ return -EIO;
+
+ if (regno == PT_MSR) {
+ *data = get_user_msr(task);
+ return 0;
+ }
+
+ if (regno == PT_DSCR)
+ return get_user_dscr(task, data);
+
+ /*
+ * softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
+ * no more used as a flag, lets force usr to always see the softe value as 1
+ * which means interrupts are not soft disabled.
+ */
+ if (IS_ENABLED(CONFIG_PPC64) && regno == PT_SOFTE) {
+ *data = 1;
+ return 0;
+ }
+
+ regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
+ if (regno < regs_max) {
+ regno = array_index_nospec(regno, regs_max);
+ *data = ((unsigned long *)task->thread.regs)[regno];
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
+{
+ if (task->thread.regs == NULL)
+ return -EIO;
+
+ if (regno == PT_MSR)
+ return set_user_msr(task, data);
+ if (regno == PT_TRAP)
+ return set_user_trap(task, data);
+ if (regno == PT_DSCR)
+ return set_user_dscr(task, data);
+
+ if (regno <= PT_MAX_PUT_REG) {
+ regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
+ ((unsigned long *)task->thread.regs)[regno] = data;
+ return 0;
+ }
+ return -EIO;
+}
+
+static int gpr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ struct membuf to_msr = membuf_at(&to, offsetof(struct pt_regs, msr));
+#ifdef CONFIG_PPC64
+ struct membuf to_softe = membuf_at(&to, offsetof(struct pt_regs, softe));
+#endif
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ membuf_write(&to, target->thread.regs, sizeof(struct user_pt_regs));
+
+ membuf_store(&to_msr, get_user_msr(target));
+#ifdef CONFIG_PPC64
+ membuf_store(&to_softe, 0x1ul);
+#endif
+ return membuf_zero(&to, ELF_NGREG * sizeof(unsigned long) -
+ sizeof(struct user_pt_regs));
+}
+
+static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ unsigned long reg;
+ int ret;
+
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ target->thread.regs,
+ 0, PT_MSR * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_MSR * sizeof(reg),
+ (PT_MSR + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_msr(target, reg);
+ }
+
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct pt_regs, msr) + sizeof(long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.regs->orig_gpr3,
+ PT_ORIG_R3 * sizeof(reg),
+ (PT_MAX_PUT_REG + 1) * sizeof(reg));
+
+ if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_MAX_PUT_REG + 1) * sizeof(reg),
+ PT_TRAP * sizeof(reg));
+
+ if (!ret && count > 0) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &reg,
+ PT_TRAP * sizeof(reg),
+ (PT_TRAP + 1) * sizeof(reg));
+ if (!ret)
+ ret = set_user_trap(target, reg);
+ }
+
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_TRAP + 1) * sizeof(reg), -1);
+
+ return ret;
+}
+
+#ifdef CONFIG_PPC64
+static int ppr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ return membuf_write(&to, &target->thread.regs->ppr, sizeof(u64));
+}
+
+static int ppr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.regs->ppr, 0, sizeof(u64));
+}
+
+static int dscr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ return membuf_write(&to, &target->thread.dscr, sizeof(u64));
+}
+static int dscr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.dscr, 0, sizeof(u64));
+}
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+static int tar_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ return membuf_write(&to, &target->thread.tar, sizeof(u64));
+}
+static int tar_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.tar, 0, sizeof(u64));
+}
+
+static int ebb_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ if (target->thread.used_ebb)
+ return regset->n;
+
+ return 0;
+}
+
+static int ebb_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ /* Build tests */
+ BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
+ BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ if (!target->thread.used_ebb)
+ return -ENODATA;
+
+ return membuf_write(&to, &target->thread.ebbrr, 3 * sizeof(unsigned long));
+}
+
+static int ebb_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ int ret = 0;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
+ BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ if (target->thread.used_ebb)
+ return -ENODATA;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.ebbrr,
+ 0, sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.ebbhr, sizeof(unsigned long),
+ 2 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.bescr, 2 * sizeof(unsigned long),
+ 3 * sizeof(unsigned long));
+
+ return ret;
+}
+static int pmu_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ return regset->n;
+}
+
+static int pmu_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ /* Build tests */
+ BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
+ BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
+ BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
+ BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ return membuf_write(&to, &target->thread.siar, 5 * sizeof(unsigned long));
+}
+
+static int pmu_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ int ret = 0;
+
+ /* Build tests */
+ BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
+ BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
+ BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
+ BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return -ENODEV;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &target->thread.siar,
+ 0, sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.sdar, sizeof(unsigned long),
+ 2 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.sier, 2 * sizeof(unsigned long),
+ 3 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.mmcr2, 3 * sizeof(unsigned long),
+ 4 * sizeof(unsigned long));
+
+ if (!ret)
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.mmcr0, 4 * sizeof(unsigned long),
+ 5 * sizeof(unsigned long));
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PPC_MEM_KEYS
+static int pkey_active(struct task_struct *target, const struct user_regset *regset)
+{
+ if (!arch_pkeys_enabled())
+ return -ENODEV;
+
+ return regset->n;
+}
+
+static int pkey_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+
+ if (!arch_pkeys_enabled())
+ return -ENODEV;
+
+ membuf_store(&to, target->thread.regs->amr);
+ membuf_store(&to, target->thread.regs->iamr);
+ return membuf_store(&to, default_uamor);
+}
+
+static int pkey_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ u64 new_amr;
+ int ret;
+
+ if (!arch_pkeys_enabled())
+ return -ENODEV;
+
+ /* Only the AMR can be set from userspace */
+ if (pos != 0 || count != sizeof(new_amr))
+ return -EINVAL;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &new_amr, 0, sizeof(new_amr));
+ if (ret)
+ return ret;
+
+ /*
+ * UAMOR determines which bits of the AMR can be set from userspace.
+ * UAMOR value 0b11 indicates that the AMR value can be modified
+ * from userspace. If the kernel is using a specific key, we avoid
+ * userspace modifying the AMR value for that key by masking them
+ * via UAMOR 0b00.
+ *
+ * Pick the AMR values for the keys that kernel is using. This
+ * will be indicated by the ~default_uamor bits.
+ */
+ target->thread.regs->amr = (new_amr & default_uamor) |
+ (target->thread.regs->amr & ~default_uamor);
+
+ return 0;
+}
+#endif /* CONFIG_PPC_MEM_KEYS */
+
+static const struct user_regset native_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+ .size = sizeof(long), .align = sizeof(long),
+ .regset_get = gpr_get, .set = gpr_set
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .regset_get = fpr_get, .set = fpr_set
+ },
+#ifdef CONFIG_ALTIVEC
+ [REGSET_VMX] = {
+ .core_note_type = NT_PPC_VMX, .n = 34,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = vr_active, .regset_get = vr_get, .set = vr_set
+ },
+#endif
+#ifdef CONFIG_VSX
+ [REGSET_VSX] = {
+ .core_note_type = NT_PPC_VSX, .n = 32,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = vsr_active, .regset_get = vsr_get, .set = vsr_set
+ },
+#endif
+#ifdef CONFIG_SPE
+ [REGSET_SPE] = {
+ .core_note_type = NT_PPC_SPE, .n = 35,
+ .size = sizeof(u32), .align = sizeof(u32),
+ .active = evr_active, .regset_get = evr_get, .set = evr_set
+ },
+#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ [REGSET_TM_CGPR] = {
+ .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
+ .size = sizeof(long), .align = sizeof(long),
+ .active = tm_cgpr_active, .regset_get = tm_cgpr_get, .set = tm_cgpr_set
+ },
+ [REGSET_TM_CFPR] = {
+ .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cfpr_active, .regset_get = tm_cfpr_get, .set = tm_cfpr_set
+ },
+ [REGSET_TM_CVMX] = {
+ .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = tm_cvmx_active, .regset_get = tm_cvmx_get, .set = tm_cvmx_set
+ },
+ [REGSET_TM_CVSX] = {
+ .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cvsx_active, .regset_get = tm_cvsx_get, .set = tm_cvsx_set
+ },
+ [REGSET_TM_SPR] = {
+ .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_spr_active, .regset_get = tm_spr_get, .set = tm_spr_set
+ },
+ [REGSET_TM_CTAR] = {
+ .core_note_type = NT_PPC_TM_CTAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_tar_active, .regset_get = tm_tar_get, .set = tm_tar_set
+ },
+ [REGSET_TM_CPPR] = {
+ .core_note_type = NT_PPC_TM_CPPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_ppr_active, .regset_get = tm_ppr_get, .set = tm_ppr_set
+ },
+ [REGSET_TM_CDSCR] = {
+ .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_dscr_active, .regset_get = tm_dscr_get, .set = tm_dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC64
+ [REGSET_PPR] = {
+ .core_note_type = NT_PPC_PPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .regset_get = ppr_get, .set = ppr_set
+ },
+ [REGSET_DSCR] = {
+ .core_note_type = NT_PPC_DSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .regset_get = dscr_get, .set = dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ [REGSET_TAR] = {
+ .core_note_type = NT_PPC_TAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .regset_get = tar_get, .set = tar_set
+ },
+ [REGSET_EBB] = {
+ .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = ebb_active, .regset_get = ebb_get, .set = ebb_set
+ },
+ [REGSET_PMR] = {
+ .core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = pmu_active, .regset_get = pmu_get, .set = pmu_set
+ },
+#endif
+#ifdef CONFIG_PPC_MEM_KEYS
+ [REGSET_PKEY] = {
+ .core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = pkey_active, .regset_get = pkey_get, .set = pkey_set
+ },
+#endif
+};
+
+const struct user_regset_view user_ppc_native_view = {
+ .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
+ .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
+};
+
+#include <linux/compat.h>
+
+int gpr32_get_common(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to, unsigned long *regs)
+{
+ int i;
+
+ for (i = 0; i < PT_MSR; i++)
+ membuf_store(&to, (u32)regs[i]);
+ membuf_store(&to, (u32)get_user_msr(target));
+ for (i++ ; i < PT_REGS_COUNT; i++)
+ membuf_store(&to, (u32)regs[i]);
+ return membuf_zero(&to, (ELF_NGREG - PT_REGS_COUNT) * sizeof(u32));
+}
+
+int gpr32_set_common(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf,
+ unsigned long *regs)
+{
+ const compat_ulong_t *k = kbuf;
+ const compat_ulong_t __user *u = ubuf;
+ compat_ulong_t reg;
+
+ if (!kbuf && !user_read_access_begin(u, count))
+ return -EFAULT;
+
+ pos /= sizeof(reg);
+ count /= sizeof(reg);
+
+ if (kbuf)
+ for (; count > 0 && pos < PT_MSR; --count)
+ regs[pos++] = *k++;
+ else
+ for (; count > 0 && pos < PT_MSR; --count) {
+ unsafe_get_user(reg, u++, Efault);
+ regs[pos++] = reg;
+ }
+
+
+ if (count > 0 && pos == PT_MSR) {
+ if (kbuf)
+ reg = *k++;
+ else
+ unsafe_get_user(reg, u++, Efault);
+ set_user_msr(target, reg);
+ ++pos;
+ --count;
+ }
+
+ if (kbuf) {
+ for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
+ regs[pos++] = *k++;
+ for (; count > 0 && pos < PT_TRAP; --count, ++pos)
+ ++k;
+ } else {
+ for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
+ unsafe_get_user(reg, u++, Efault);
+ regs[pos++] = reg;
+ }
+ for (; count > 0 && pos < PT_TRAP; --count, ++pos)
+ unsafe_get_user(reg, u++, Efault);
+ }
+
+ if (count > 0 && pos == PT_TRAP) {
+ if (kbuf)
+ reg = *k++;
+ else
+ unsafe_get_user(reg, u++, Efault);
+ set_user_trap(target, reg);
+ ++pos;
+ --count;
+ }
+ if (!kbuf)
+ user_read_access_end();
+
+ kbuf = k;
+ ubuf = u;
+ pos *= sizeof(reg);
+ count *= sizeof(reg);
+ return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ (PT_TRAP + 1) * sizeof(reg), -1);
+
+Efault:
+ user_read_access_end();
+ return -EFAULT;
+}
+
+static int gpr32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ return gpr32_get_common(target, regset, to,
+ &target->thread.regs->gpr[0]);
+}
+
+static int gpr32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ if (target->thread.regs == NULL)
+ return -EIO;
+
+ return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
+ &target->thread.regs->gpr[0]);
+}
+
+/*
+ * These are the regset flavors matching the CONFIG_PPC32 native set.
+ */
+static const struct user_regset compat_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
+ .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
+ .regset_get = gpr32_get, .set = gpr32_set
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .regset_get = fpr_get, .set = fpr_set
+ },
+#ifdef CONFIG_ALTIVEC
+ [REGSET_VMX] = {
+ .core_note_type = NT_PPC_VMX, .n = 34,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = vr_active, .regset_get = vr_get, .set = vr_set
+ },
+#endif
+#ifdef CONFIG_SPE
+ [REGSET_SPE] = {
+ .core_note_type = NT_PPC_SPE, .n = 35,
+ .size = sizeof(u32), .align = sizeof(u32),
+ .active = evr_active, .regset_get = evr_get, .set = evr_set
+ },
+#endif
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ [REGSET_TM_CGPR] = {
+ .core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
+ .size = sizeof(long), .align = sizeof(long),
+ .active = tm_cgpr_active,
+ .regset_get = tm_cgpr32_get, .set = tm_cgpr32_set
+ },
+ [REGSET_TM_CFPR] = {
+ .core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cfpr_active, .regset_get = tm_cfpr_get, .set = tm_cfpr_set
+ },
+ [REGSET_TM_CVMX] = {
+ .core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
+ .size = sizeof(vector128), .align = sizeof(vector128),
+ .active = tm_cvmx_active, .regset_get = tm_cvmx_get, .set = tm_cvmx_set
+ },
+ [REGSET_TM_CVSX] = {
+ .core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
+ .size = sizeof(double), .align = sizeof(double),
+ .active = tm_cvsx_active, .regset_get = tm_cvsx_get, .set = tm_cvsx_set
+ },
+ [REGSET_TM_SPR] = {
+ .core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_spr_active, .regset_get = tm_spr_get, .set = tm_spr_set
+ },
+ [REGSET_TM_CTAR] = {
+ .core_note_type = NT_PPC_TM_CTAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_tar_active, .regset_get = tm_tar_get, .set = tm_tar_set
+ },
+ [REGSET_TM_CPPR] = {
+ .core_note_type = NT_PPC_TM_CPPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_ppr_active, .regset_get = tm_ppr_get, .set = tm_ppr_set
+ },
+ [REGSET_TM_CDSCR] = {
+ .core_note_type = NT_PPC_TM_CDSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = tm_dscr_active, .regset_get = tm_dscr_get, .set = tm_dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC64
+ [REGSET_PPR] = {
+ .core_note_type = NT_PPC_PPR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .regset_get = ppr_get, .set = ppr_set
+ },
+ [REGSET_DSCR] = {
+ .core_note_type = NT_PPC_DSCR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .regset_get = dscr_get, .set = dscr_set
+ },
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ [REGSET_TAR] = {
+ .core_note_type = NT_PPC_TAR, .n = 1,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .regset_get = tar_get, .set = tar_set
+ },
+ [REGSET_EBB] = {
+ .core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
+ .size = sizeof(u64), .align = sizeof(u64),
+ .active = ebb_active, .regset_get = ebb_get, .set = ebb_set
+ },
+#endif
+};
+
+static const struct user_regset_view user_ppc_compat_view = {
+ .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
+ .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ if (IS_ENABLED(CONFIG_COMPAT) && is_tsk_32bit_task(task))
+ return &user_ppc_compat_view;
+ return &user_ppc_native_view;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace-vsx.c b/arch/powerpc/kernel/ptrace/ptrace-vsx.c
new file mode 100644
index 000000000000..7df08004c47d
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace-vsx.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/regset.h>
+
+#include <asm/switch_to.h>
+
+#include "ptrace-decl.h"
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ */
+int fpr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ u64 buf[33];
+ int i;
+
+ flush_fp_to_thread(target);
+
+ /* copy to local buffer then write that out */
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_FPR(i);
+ buf[32] = target->thread.fp_state.fpscr;
+ return membuf_write(&to, buf, 33 * sizeof(u64));
+}
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last checkpointed
+ * value of all FPR registers for the current transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 fpr[32];
+ * u64 fpscr;
+ * };
+ *
+ */
+int fpr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u64 buf[33];
+ int i;
+
+ flush_fp_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_FPR(i);
+ buf[32] = target->thread.fp_state.fpscr;
+
+ /* copy to local buffer then write that out */
+ i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
+ if (i)
+ return i;
+
+ for (i = 0; i < 32 ; i++)
+ target->thread.TS_FPR(i) = buf[i];
+ target->thread.fp_state.fpscr = buf[32];
+ return 0;
+}
+
+/*
+ * Currently to set and get all the vsx state, you need to call
+ * the fp and VMX calls as well. This only get/sets the lower 32
+ * 128bit VSX registers.
+ */
+
+int vsr_active(struct task_struct *target, const struct user_regset *regset)
+{
+ flush_vsx_to_thread(target);
+ return target->thread.used_vsr ? regset->n : 0;
+}
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last
+ * checkpointed value of all FPR registers for the current
+ * transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 vsx[32];
+ * };
+ */
+int vsr_get(struct task_struct *target, const struct user_regset *regset,
+ struct membuf to)
+{
+ u64 buf[32];
+ int i;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_vsx_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+
+ return membuf_write(&to, buf, 32 * sizeof(double));
+}
+
+/*
+ * Regardless of transactions, 'fp_state' holds the current running
+ * value of all FPR registers and 'ckfp_state' holds the last
+ * checkpointed value of all FPR registers for the current
+ * transaction.
+ *
+ * Userspace interface buffer layout:
+ *
+ * struct data {
+ * u64 vsx[32];
+ * };
+ */
+int vsr_set(struct task_struct *target, const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u64 buf[32];
+ int ret, i;
+
+ flush_tmregs_to_thread(target);
+ flush_fp_to_thread(target);
+ flush_altivec_to_thread(target);
+ flush_vsx_to_thread(target);
+
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ buf, 0, 32 * sizeof(double));
+ if (!ret)
+ for (i = 0; i < 32 ; i++)
+ target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+
+ return ret;
+}
diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c
new file mode 100644
index 000000000000..5d7a72b41ae7
--- /dev/null
+++ b/arch/powerpc/kernel/ptrace/ptrace.c
@@ -0,0 +1,450 @@
+/*
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Derived from "arch/m68k/kernel/ptrace.c"
+ * Copyright (C) 1994 by Hamish Macdonald
+ * Taken from linux/kernel/ptrace.c and modified for M680x0.
+ * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * Modified by Cort Dougan (cort@hq.fsmlabs.com)
+ * and Paul Mackerras (paulus@samba.org).
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file README.legal in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/regset.h>
+#include <linux/ptrace.h>
+#include <linux/audit.h>
+#include <linux/context_tracking.h>
+#include <linux/syscalls.h>
+
+#include <asm/switch_to.h>
+#include <asm/debug.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+#include "ptrace-decl.h"
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* make sure the single step bit is not set. */
+ user_disable_single_step(child);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = -EPERM;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = datavp;
+
+ switch (request) {
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long index, tmp;
+
+ ret = -EIO;
+ /* convert to index and check */
+ index = addr / sizeof(long);
+ if ((addr & (sizeof(long) - 1)) || !child->thread.regs)
+ break;
+
+ if (index < PT_FPR0)
+ ret = ptrace_get_reg(child, (int) index, &tmp);
+ else
+ ret = ptrace_get_fpr(child, index, &tmp);
+
+ if (ret)
+ break;
+ ret = put_user(tmp, datalp);
+ break;
+ }
+
+ /* write the word at location addr in the USER area */
+ case PTRACE_POKEUSR: {
+ unsigned long index;
+
+ ret = -EIO;
+ /* convert to index and check */
+ index = addr / sizeof(long);
+ if ((addr & (sizeof(long) - 1)) || !child->thread.regs)
+ break;
+
+ if (index < PT_FPR0)
+ ret = ptrace_put_reg(child, index, data);
+ else
+ ret = ptrace_put_fpr(child, index, data);
+ break;
+ }
+
+ case PPC_PTRACE_GETHWDBGINFO: {
+ struct ppc_debug_info dbginfo;
+
+ ppc_gethwdinfo(&dbginfo);
+
+ if (copy_to_user(datavp, &dbginfo,
+ sizeof(struct ppc_debug_info)))
+ return -EFAULT;
+ return 0;
+ }
+
+ case PPC_PTRACE_SETHWDEBUG: {
+ struct ppc_hw_breakpoint bp_info;
+
+ if (copy_from_user(&bp_info, datavp,
+ sizeof(struct ppc_hw_breakpoint)))
+ return -EFAULT;
+ return ppc_set_hwdebug(child, &bp_info);
+ }
+
+ case PPC_PTRACE_DELHWDEBUG: {
+ ret = ppc_del_hwdebug(child, data);
+ break;
+ }
+
+ case PTRACE_GET_DEBUGREG:
+ ret = ptrace_get_debugreg(child, addr, datalp);
+ break;
+
+ case PTRACE_SET_DEBUGREG:
+ ret = ptrace_set_debugreg(child, addr, data);
+ break;
+
+#ifdef CONFIG_PPC64
+ case PTRACE_GETREGS64:
+#endif
+ case PTRACE_GETREGS: /* Get all pt_regs from the child. */
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_GPR,
+ 0, sizeof(struct user_pt_regs),
+ datavp);
+
+#ifdef CONFIG_PPC64
+ case PTRACE_SETREGS64:
+#endif
+ case PTRACE_SETREGS: /* Set all gp regs in the child. */
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_GPR,
+ 0, sizeof(struct user_pt_regs),
+ datavp);
+
+ case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_FPR,
+ 0, sizeof(elf_fpregset_t),
+ datavp);
+
+ case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_FPR,
+ 0, sizeof(elf_fpregset_t),
+ datavp);
+
+#ifdef CONFIG_ALTIVEC
+ case PTRACE_GETVRREGS:
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_VMX,
+ 0, (33 * sizeof(vector128) +
+ sizeof(u32)),
+ datavp);
+
+ case PTRACE_SETVRREGS:
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_VMX,
+ 0, (33 * sizeof(vector128) +
+ sizeof(u32)),
+ datavp);
+#endif
+#ifdef CONFIG_VSX
+ case PTRACE_GETVSRREGS:
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_VSX,
+ 0, 32 * sizeof(double),
+ datavp);
+
+ case PTRACE_SETVSRREGS:
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_VSX,
+ 0, 32 * sizeof(double),
+ datavp);
+#endif
+#ifdef CONFIG_SPE
+ case PTRACE_GETEVRREGS:
+ /* Get the child spe register state. */
+ return copy_regset_to_user(child, &user_ppc_native_view,
+ REGSET_SPE, 0, 35 * sizeof(u32),
+ datavp);
+
+ case PTRACE_SETEVRREGS:
+ /* Set the child spe register state. */
+ return copy_regset_from_user(child, &user_ppc_native_view,
+ REGSET_SPE, 0, 35 * sizeof(u32),
+ datavp);
+#endif
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_SECCOMP
+static int do_seccomp(struct pt_regs *regs)
+{
+ if (!test_thread_flag(TIF_SECCOMP))
+ return 0;
+
+ /*
+ * The ABI we present to seccomp tracers is that r3 contains
+ * the syscall return value and orig_gpr3 contains the first
+ * syscall parameter. This is different to the ptrace ABI where
+ * both r3 and orig_gpr3 contain the first syscall parameter.
+ */
+ regs->gpr[3] = -ENOSYS;
+
+ /*
+ * We use the __ version here because we have already checked
+ * TIF_SECCOMP. If this fails, there is nothing left to do, we
+ * have already loaded -ENOSYS into r3, or seccomp has put
+ * something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
+ */
+ if (__secure_computing(NULL))
+ return -1;
+
+ /*
+ * The syscall was allowed by seccomp, restore the register
+ * state to what audit expects.
+ * Note that we use orig_gpr3, which means a seccomp tracer can
+ * modify the first syscall parameter (in orig_gpr3) and also
+ * allow the syscall to proceed.
+ */
+ regs->gpr[3] = regs->orig_gpr3;
+
+ return 0;
+}
+#else
+static inline int do_seccomp(struct pt_regs *regs) { return 0; }
+#endif /* CONFIG_SECCOMP */
+
+/**
+ * do_syscall_trace_enter() - Do syscall tracing on kernel entry.
+ * @regs: the pt_regs of the task to trace (current)
+ *
+ * Performs various types of tracing on syscall entry. This includes seccomp,
+ * ptrace, syscall tracepoints and audit.
+ *
+ * The pt_regs are potentially visible to userspace via ptrace, so their
+ * contents is ABI.
+ *
+ * One or more of the tracers may modify the contents of pt_regs, in particular
+ * to modify arguments or even the syscall number itself.
+ *
+ * It's also possible that a tracer can choose to reject the system call. In
+ * that case this function will return an illegal syscall number, and will put
+ * an appropriate return value in regs->r3.
+ *
+ * Return: the (possibly changed) syscall number.
+ */
+long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ u32 flags;
+
+ flags = read_thread_flags() & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
+
+ if (flags) {
+ int rc = ptrace_report_syscall_entry(regs);
+
+ if (unlikely(flags & _TIF_SYSCALL_EMU)) {
+ /*
+ * A nonzero return code from
+ * ptrace_report_syscall_entry() tells us to prevent
+ * the syscall execution, but we are not going to
+ * execute it anyway.
+ *
+ * Returning -1 will skip the syscall execution. We want
+ * to avoid clobbering any registers, so we don't goto
+ * the skip label below.
+ */
+ return -1;
+ }
+
+ if (rc) {
+ /*
+ * The tracer decided to abort the syscall. Note that
+ * the tracer may also just change regs->gpr[0] to an
+ * invalid syscall number, that is handled below on the
+ * exit path.
+ */
+ goto skip;
+ }
+ }
+
+ /* Run seccomp after ptrace; allow it to set gpr[3]. */
+ if (do_seccomp(regs))
+ return -1;
+
+ /* Avoid trace and audit when syscall is invalid. */
+ if (regs->gpr[0] >= NR_syscalls)
+ goto skip;
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->gpr[0]);
+
+ if (!is_32bit_task())
+ audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
+ regs->gpr[5], regs->gpr[6]);
+ else
+ audit_syscall_entry(regs->gpr[0],
+ regs->gpr[3] & 0xffffffff,
+ regs->gpr[4] & 0xffffffff,
+ regs->gpr[5] & 0xffffffff,
+ regs->gpr[6] & 0xffffffff);
+
+ /* Return the possibly modified but valid syscall number */
+ return regs->gpr[0];
+
+skip:
+ /*
+ * If we are aborting explicitly, or if the syscall number is
+ * now invalid, set the return value to -ENOSYS.
+ */
+ regs->gpr[3] = -ENOSYS;
+ return -1;
+}
+
+void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs->result);
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ ptrace_report_syscall_exit(regs, step);
+}
+
+void __init pt_regs_check(void);
+
+/*
+ * Dummy function, its purpose is to break the build if struct pt_regs and
+ * struct user_pt_regs don't match.
+ */
+void __init pt_regs_check(void)
+{
+ BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
+ offsetof(struct user_pt_regs, gpr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
+ offsetof(struct user_pt_regs, nip));
+ BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
+ offsetof(struct user_pt_regs, msr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct user_pt_regs, orig_gpr3));
+ BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
+ offsetof(struct user_pt_regs, ctr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
+ offsetof(struct user_pt_regs, link));
+ BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
+ offsetof(struct user_pt_regs, xer));
+ BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
+ offsetof(struct user_pt_regs, ccr));
+#ifdef __powerpc64__
+ BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
+ offsetof(struct user_pt_regs, softe));
+#else
+ BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
+ offsetof(struct user_pt_regs, mq));
+#endif
+ BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
+ offsetof(struct user_pt_regs, trap));
+ BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
+ offsetof(struct user_pt_regs, dar));
+ BUILD_BUG_ON(offsetof(struct pt_regs, dear) !=
+ offsetof(struct user_pt_regs, dar));
+ BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
+ offsetof(struct user_pt_regs, dsisr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, esr) !=
+ offsetof(struct user_pt_regs, dsisr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
+ offsetof(struct user_pt_regs, result));
+
+ BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
+
+ // Now check that the pt_regs offsets match the uapi #defines
+ #define CHECK_REG(_pt, _reg) \
+ BUILD_BUG_ON(_pt != (offsetof(struct user_pt_regs, _reg) / \
+ sizeof(unsigned long)));
+
+ CHECK_REG(PT_R0, gpr[0]);
+ CHECK_REG(PT_R1, gpr[1]);
+ CHECK_REG(PT_R2, gpr[2]);
+ CHECK_REG(PT_R3, gpr[3]);
+ CHECK_REG(PT_R4, gpr[4]);
+ CHECK_REG(PT_R5, gpr[5]);
+ CHECK_REG(PT_R6, gpr[6]);
+ CHECK_REG(PT_R7, gpr[7]);
+ CHECK_REG(PT_R8, gpr[8]);
+ CHECK_REG(PT_R9, gpr[9]);
+ CHECK_REG(PT_R10, gpr[10]);
+ CHECK_REG(PT_R11, gpr[11]);
+ CHECK_REG(PT_R12, gpr[12]);
+ CHECK_REG(PT_R13, gpr[13]);
+ CHECK_REG(PT_R14, gpr[14]);
+ CHECK_REG(PT_R15, gpr[15]);
+ CHECK_REG(PT_R16, gpr[16]);
+ CHECK_REG(PT_R17, gpr[17]);
+ CHECK_REG(PT_R18, gpr[18]);
+ CHECK_REG(PT_R19, gpr[19]);
+ CHECK_REG(PT_R20, gpr[20]);
+ CHECK_REG(PT_R21, gpr[21]);
+ CHECK_REG(PT_R22, gpr[22]);
+ CHECK_REG(PT_R23, gpr[23]);
+ CHECK_REG(PT_R24, gpr[24]);
+ CHECK_REG(PT_R25, gpr[25]);
+ CHECK_REG(PT_R26, gpr[26]);
+ CHECK_REG(PT_R27, gpr[27]);
+ CHECK_REG(PT_R28, gpr[28]);
+ CHECK_REG(PT_R29, gpr[29]);
+ CHECK_REG(PT_R30, gpr[30]);
+ CHECK_REG(PT_R31, gpr[31]);
+ CHECK_REG(PT_NIP, nip);
+ CHECK_REG(PT_MSR, msr);
+ CHECK_REG(PT_ORIG_R3, orig_gpr3);
+ CHECK_REG(PT_CTR, ctr);
+ CHECK_REG(PT_LNK, link);
+ CHECK_REG(PT_XER, xer);
+ CHECK_REG(PT_CCR, ccr);
+#ifdef CONFIG_PPC64
+ CHECK_REG(PT_SOFTE, softe);
+#else
+ CHECK_REG(PT_MQ, mq);
+#endif
+ CHECK_REG(PT_TRAP, trap);
+ CHECK_REG(PT_DAR, dar);
+ CHECK_REG(PT_DSISR, dsisr);
+ CHECK_REG(PT_RESULT, result);
+ #undef CHECK_REG
+
+ BUILD_BUG_ON(PT_REGS_COUNT != sizeof(struct user_pt_regs) / sizeof(unsigned long));
+
+ /*
+ * PT_DSCR isn't a real reg, but it's important that it doesn't overlap the
+ * real registers.
+ */
+ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long));
+
+ // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible
+ BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX));
+}
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace/ptrace32.c
index f37eb53de1a1..19c224808982 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace/ptrace32.c
@@ -17,23 +17,14 @@
* this archive for more details.
*/
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
-#include <linux/user.h>
-#include <linux/security.h>
-#include <linux/signal.h>
#include <linux/compat.h>
-#include <linux/uaccess.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/switch_to.h>
+#include "ptrace-decl.h"
+
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
@@ -92,7 +83,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if ((addr & 3) || (index > PT_FPSCR32))
break;
- CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_get_reg(child, index, &tmp);
if (ret)
@@ -142,7 +132,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if ((addr & 3) || numReg > PT_FPSCR)
break;
- CHECK_FULL_REGS(child->thread.regs);
if (numReg >= PT_FPR0) {
flush_fp_to_thread(child);
/* get 64 bit FPR */
@@ -196,7 +185,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
if ((addr & 3) || (index > PT_FPSCR32))
break;
- CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
ret = ptrace_put_reg(child, index, data);
} else {
@@ -235,7 +223,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
*/
if ((addr & 3) || (numReg > PT_FPSCR))
break;
- CHECK_FULL_REGS(child->thread.regs);
if (numReg < PT_FPR0) {
unsigned long freg;
ret = ptrace_get_reg(child, numReg, &freg);
@@ -270,8 +257,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
ret = put_user(child->thread.debug.dac1, (u32 __user *)data);
#else
dabr_fake = (
- (child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
- (child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
+ (child->thread.hw_brk[0].address & (~HW_BRK_TYPE_DABR)) |
+ (child->thread.hw_brk[0].type & HW_BRK_TYPE_DABR));
ret = put_user(dabr_fake, (u32 __user *)data);
#endif
break;
diff --git a/arch/powerpc/kernel/reloc_32.S b/arch/powerpc/kernel/reloc_32.S
index 10e96f3e22fe..0508c14b4c28 100644
--- a/arch/powerpc/kernel/reloc_32.S
+++ b/arch/powerpc/kernel/reloc_32.S
@@ -30,7 +30,7 @@ R_PPC_RELATIVE = 22
_GLOBAL(relocate)
mflr r0 /* Save our LR */
- bl 0f /* Find our current runtime address */
+ bcl 20,31,$+4 /* Find our current runtime address */
0: mflr r12 /* Make it accessible */
mtlr r0
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index 02d4719bf43a..efd52f2e7033 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -8,8 +8,10 @@
#include <asm/ppc_asm.h>
RELA = 7
-RELACOUNT = 0x6ffffff9
+RELASZ = 8
+RELAENT = 9
R_PPC64_RELATIVE = 22
+R_PPC64_UADDR64 = 43
/*
* r3 = desired final address of kernel
@@ -25,29 +27,38 @@ _GLOBAL(relocate)
add r9,r9,r12 /* r9 has runtime addr of .rela.dyn section */
ld r10,(p_st - 0b)(r12)
add r10,r10,r12 /* r10 has runtime addr of _stext */
+ ld r4,(p_sym - 0b)(r12)
+ add r4,r4,r12 /* r4 has runtime addr of .dynsym */
/*
- * Scan the dynamic section for the RELA and RELACOUNT entries.
+ * Scan the dynamic section for the RELA, RELASZ and RELAENT entries.
*/
li r7,0
li r8,0
-1: ld r6,0(r11) /* get tag */
+.Ltags:
+ ld r6,0(r11) /* get tag */
cmpdi r6,0
- beq 4f /* end of list */
+ beq .Lend_of_list /* end of list */
cmpdi r6,RELA
bne 2f
ld r7,8(r11) /* get RELA pointer in r7 */
- b 3f
-2: addis r6,r6,(-RELACOUNT)@ha
- cmpdi r6,RELACOUNT@l
+ b 4f
+2: cmpdi r6,RELASZ
bne 3f
- ld r8,8(r11) /* get RELACOUNT value in r8 */
-3: addi r11,r11,16
- b 1b
-4: cmpdi r7,0 /* check we have both RELA and RELACOUNT */
+ ld r8,8(r11) /* get RELASZ value in r8 */
+ b 4f
+3: cmpdi r6,RELAENT
+ bne 4f
+ ld r12,8(r11) /* get RELAENT value in r12 */
+4: addi r11,r11,16
+ b .Ltags
+.Lend_of_list:
+ cmpdi r7,0 /* check we have RELA, RELASZ, RELAENT */
cmpdi cr1,r8,0
- beq 6f
- beq cr1,6f
+ beq .Lout
+ beq cr1,.Lout
+ cmpdi r12,0
+ beq .Lout
/*
* Work out linktime address of _stext and hence the
@@ -62,23 +73,39 @@ _GLOBAL(relocate)
/*
* Run through the list of relocations and process the
- * R_PPC64_RELATIVE ones.
+ * R_PPC64_RELATIVE and R_PPC64_UADDR64 ones.
*/
+ divd r8,r8,r12 /* RELASZ / RELAENT */
mtctr r8
-5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */
+.Lrels: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */
cmpdi r0,R_PPC64_RELATIVE
- bne 6f
+ bne .Luaddr64
ld r6,0(r9) /* reloc->r_offset */
ld r0,16(r9) /* reloc->r_addend */
+ b .Lstore
+.Luaddr64:
+ srdi r5,r0,32 /* ELF64_R_SYM(reloc->r_info) */
+ clrldi r0,r0,32
+ cmpdi r0,R_PPC64_UADDR64
+ bne .Lnext
+ ld r6,0(r9)
+ ld r0,16(r9)
+ mulli r5,r5,24 /* 24 == sizeof(elf64_sym) */
+ add r5,r5,r4 /* elf64_sym[ELF64_R_SYM] */
+ ld r5,8(r5)
+ add r0,r0,r5
+.Lstore:
add r0,r0,r3
stdx r0,r7,r6
- addi r9,r9,24
- bdnz 5b
-
-6: blr
+.Lnext:
+ add r9,r9,r12
+ bdnz .Lrels
+.Lout:
+ blr
.balign 8
p_dyn: .8byte __dynamic_start - 0b
p_rela: .8byte __rela_dyn_start - 0b
+p_sym: .8byte __dynamic_symtab - 0b
p_st: .8byte _stext - 0b
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 2d33f342a293..081b2b741a8c 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -24,11 +24,11 @@
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/rtc.h>
+#include <linux/of.h>
#include <linux/uaccess.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/machdep.h> /* for ppc_md */
#include <asm/time.h>
@@ -259,7 +259,6 @@ __initcall(proc_rtas_init);
static int parse_number(const char __user *p, size_t count, u64 *val)
{
char buf[40];
- char *end;
if (count > 39)
return -EINVAL;
@@ -269,11 +268,7 @@ static int parse_number(const char __user *p, size_t count, u64 *val)
buf[count] = 0;
- *val = simple_strtoull(buf, &end, 10);
- if (*end && *end != '\n')
- return -EINVAL;
-
- return 0;
+ return kstrtoull(buf, 10, val);
}
/* ****************************************************************** */
@@ -755,11 +750,18 @@ static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
return 0;
}
-#define RMO_READ_BUF_MAX 30
-
-/* RTAS Userspace access */
+/**
+ * ppc_rtas_rmo_buf_show() - Describe RTAS-addressable region for user space.
+ *
+ * Base + size description of a range of RTAS-addressable memory set
+ * aside for user space to use as work area(s) for certain RTAS
+ * functions. User space accesses this region via /dev/mem. Apart from
+ * security policies, the kernel does not arbitrate or serialize
+ * access to this region, and user space must ensure that concurrent
+ * users do not interfere with each other.
+ */
static int ppc_rtas_rmo_buf_show(struct seq_file *m, void *v)
{
- seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_RMOBUF_MAX);
+ seq_printf(m, "%016lx %x\n", rtas_rmo_buf, RTAS_USER_REGION_SIZE);
return 0;
}
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c
index a28239b8b0c0..5a31d1829bca 100644
--- a/arch/powerpc/kernel/rtas-rtc.c
+++ b/arch/powerpc/kernel/rtas-rtc.c
@@ -6,13 +6,12 @@
#include <linux/rtc.h>
#include <linux/delay.h>
#include <linux/ratelimit.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/time.h>
#define MAX_RTC_WAIT 5000 /* 5 sec */
-#define RTAS_CLOCK_BUSY (-2)
+
time64_t __init rtas_get_boot_time(void)
{
int ret[8];
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index c5fa251b8950..e847f9b1c5b9 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -7,7 +7,7 @@
* Copyright (C) 2001 IBM.
*/
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
@@ -23,9 +23,12 @@
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/reboot.h>
+#include <linux/security.h>
#include <linux/syscalls.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
-#include <asm/prom.h>
+#include <asm/interrupt.h>
#include <asm/rtas.h>
#include <asm/hvcall.h>
#include <asm/machdep.h>
@@ -45,6 +48,26 @@
/* This is here deliberately so it's only used in this file */
void enter_rtas(unsigned long);
+static inline void do_enter_rtas(unsigned long args)
+{
+ unsigned long msr;
+
+ /*
+ * Make sure MSR[RI] is currently enabled as it will be forced later
+ * in enter_rtas.
+ */
+ msr = mfmsr();
+ BUG_ON(!(msr & MSR_RI));
+
+ BUG_ON(!irqs_disabled());
+
+ hard_irq_disable(); /* Ensure MSR[EE] is disabled on PPC64 */
+
+ enter_rtas(args);
+
+ srr_regs_clobbered(); /* rtas uses SRRs, invalidate */
+}
+
struct rtas_t rtas = {
.lock = __ARCH_SPIN_LOCK_UNLOCKED
};
@@ -383,7 +406,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
save_args = rtas.args;
rtas.args = err_args;
- enter_rtas(__pa(&rtas.args));
+ do_enter_rtas(__pa(&rtas.args));
err_args = rtas.args;
rtas.args = save_args;
@@ -429,7 +452,7 @@ va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
for (i = 0; i < nret; ++i)
args->rets[i] = 0;
- enter_rtas(__pa(args));
+ do_enter_rtas(__pa(args));
}
void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
@@ -441,6 +464,9 @@ void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
va_end(list);
}
+static int ibm_open_errinjct_token;
+static int ibm_errinjct_token;
+
int rtas_call(int token, int nargs, int nret, int *outputs, ...)
{
va_list list;
@@ -453,6 +479,21 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
return -1;
+ if (token == ibm_open_errinjct_token || token == ibm_errinjct_token) {
+ /*
+ * It would be nicer to not discard the error value
+ * from security_locked_down(), but callers expect an
+ * RTAS status, not an errno.
+ */
+ if (security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION))
+ return -1;
+ }
+
+ if ((mfmsr() & (MSR_IR|MSR_DR)) != (MSR_IR|MSR_DR)) {
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+
s = lock_rtas();
/* We use the global rtas args buffer */
@@ -483,8 +524,25 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
}
EXPORT_SYMBOL(rtas_call);
-/* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
- * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
+/**
+ * rtas_busy_delay_time() - From an RTAS status value, calculate the
+ * suggested delay time in milliseconds.
+ *
+ * @status: a value returned from rtas_call() or similar APIs which return
+ * the status of a RTAS function call.
+ *
+ * Context: Any context.
+ *
+ * Return:
+ * * 100000 - If @status is 9905.
+ * * 10000 - If @status is 9904.
+ * * 1000 - If @status is 9903.
+ * * 100 - If @status is 9902.
+ * * 10 - If @status is 9901.
+ * * 1 - If @status is either 9900 or -2. This is "wrong" for -2, but
+ * some callers depend on this behavior, and the worst outcome
+ * is that they will delay for longer than necessary.
+ * * 0 - If @status is not a busy or extended delay value.
*/
unsigned int rtas_busy_delay_time(int status)
{
@@ -504,17 +562,77 @@ unsigned int rtas_busy_delay_time(int status)
}
EXPORT_SYMBOL(rtas_busy_delay_time);
-/* For an RTAS busy status code, perform the hinted delay. */
-unsigned int rtas_busy_delay(int status)
+/**
+ * rtas_busy_delay() - helper for RTAS busy and extended delay statuses
+ *
+ * @status: a value returned from rtas_call() or similar APIs which return
+ * the status of a RTAS function call.
+ *
+ * Context: Process context. May sleep or schedule.
+ *
+ * Return:
+ * * true - @status is RTAS_BUSY or an extended delay hint. The
+ * caller may assume that the CPU has been yielded if necessary,
+ * and that an appropriate delay for @status has elapsed.
+ * Generally the caller should reattempt the RTAS call which
+ * yielded @status.
+ *
+ * * false - @status is not @RTAS_BUSY nor an extended delay hint. The
+ * caller is responsible for handling @status.
+ */
+bool rtas_busy_delay(int status)
{
unsigned int ms;
+ bool ret;
- might_sleep();
- ms = rtas_busy_delay_time(status);
- if (ms && need_resched())
- msleep(ms);
+ switch (status) {
+ case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX:
+ ret = true;
+ ms = rtas_busy_delay_time(status);
+ /*
+ * The extended delay hint can be as high as 100 seconds.
+ * Surely any function returning such a status is either
+ * buggy or isn't going to be significantly slowed by us
+ * polling at 1HZ. Clamp the sleep time to one second.
+ */
+ ms = clamp(ms, 1U, 1000U);
+ /*
+ * The delay hint is an order-of-magnitude suggestion, not
+ * a minimum. It is fine, possibly even advantageous, for
+ * us to pause for less time than hinted. For small values,
+ * use usleep_range() to ensure we don't sleep much longer
+ * than actually needed.
+ *
+ * See Documentation/timers/timers-howto.rst for
+ * explanation of the threshold used here. In effect we use
+ * usleep_range() for 9900 and 9901, msleep() for
+ * 9902-9905.
+ */
+ if (ms <= 20)
+ usleep_range(ms * 100, ms * 1000);
+ else
+ msleep(ms);
+ break;
+ case RTAS_BUSY:
+ ret = true;
+ /*
+ * We should call again immediately if there's no other
+ * work to do.
+ */
+ cond_resched();
+ break;
+ default:
+ ret = false;
+ /*
+ * Not a busy or extended delay status; the caller should
+ * handle @status itself. Ensure we warn on misuses in
+ * atomic context regardless.
+ */
+ might_sleep();
+ break;
+ }
- return ms;
+ return ret;
}
EXPORT_SYMBOL(rtas_busy_delay);
@@ -683,6 +801,63 @@ int rtas_set_indicator_fast(int indicator, int index, int new_value)
return rc;
}
+/**
+ * rtas_ibm_suspend_me() - Call ibm,suspend-me to suspend the LPAR.
+ *
+ * @fw_status: RTAS call status will be placed here if not NULL.
+ *
+ * rtas_ibm_suspend_me() should be called only on a CPU which has
+ * received H_CONTINUE from the H_JOIN hcall. All other active CPUs
+ * should be waiting to return from H_JOIN.
+ *
+ * rtas_ibm_suspend_me() may suspend execution of the OS
+ * indefinitely. Callers should take appropriate measures upon return, such as
+ * resetting watchdog facilities.
+ *
+ * Callers may choose to retry this call if @fw_status is
+ * %RTAS_THREADS_ACTIVE.
+ *
+ * Return:
+ * 0 - The partition has resumed from suspend, possibly after
+ * migration to a different host.
+ * -ECANCELED - The operation was aborted.
+ * -EAGAIN - There were other CPUs not in H_JOIN at the time of the call.
+ * -EBUSY - Some other condition prevented the suspend from succeeding.
+ * -EIO - Hardware/platform error.
+ */
+int rtas_ibm_suspend_me(int *fw_status)
+{
+ int fwrc;
+ int ret;
+
+ fwrc = rtas_call(rtas_token("ibm,suspend-me"), 0, 1, NULL);
+
+ switch (fwrc) {
+ case 0:
+ ret = 0;
+ break;
+ case RTAS_SUSPEND_ABORTED:
+ ret = -ECANCELED;
+ break;
+ case RTAS_THREADS_ACTIVE:
+ ret = -EAGAIN;
+ break;
+ case RTAS_NOT_SUSPENDABLE:
+ case RTAS_OUTSTANDING_COPROC:
+ ret = -EBUSY;
+ break;
+ case -1:
+ default:
+ ret = -EIO;
+ break;
+ }
+
+ if (fw_status)
+ *fw_status = fwrc;
+
+ return ret;
+}
+
void __noreturn rtas_restart(char *cmd)
{
if (rtas_flash_term_hook)
@@ -740,324 +915,238 @@ void rtas_os_term(char *str)
printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
}
-static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
-#ifdef CONFIG_PPC_PSERIES
-static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
+/**
+ * rtas_activate_firmware() - Activate a new version of firmware.
+ *
+ * Context: This function may sleep.
+ *
+ * Activate a new version of partition firmware. The OS must call this
+ * after resuming from a partition hibernation or migration in order
+ * to maintain the ability to perform live firmware updates. It's not
+ * catastrophic for this method to be absent or to fail; just log the
+ * condition in that case.
+ */
+void rtas_activate_firmware(void)
{
- u16 slb_size = mmu_slb_size;
- int rc = H_MULTI_THREADS_ACTIVE;
- int cpu;
-
- slb_set_size(SLB_MIN_SIZE);
- printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
-
- while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
- !atomic_read(&data->error))
- rc = rtas_call(data->token, 0, 1, NULL);
-
- if (rc || atomic_read(&data->error)) {
- printk(KERN_DEBUG "ibm,suspend-me returned %d\n", rc);
- slb_set_size(slb_size);
- }
+ int token;
+ int fwrc;
- if (atomic_read(&data->error))
- rc = atomic_read(&data->error);
-
- atomic_set(&data->error, rc);
- pSeries_coalesce_init();
-
- if (wake_when_done) {
- atomic_set(&data->done, 1);
-
- for_each_online_cpu(cpu)
- plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
+ token = rtas_token("ibm,activate-firmware");
+ if (token == RTAS_UNKNOWN_SERVICE) {
+ pr_notice("ibm,activate-firmware method unavailable\n");
+ return;
}
- if (atomic_dec_return(&data->working) == 0)
- complete(data->complete);
-
- return rc;
-}
+ do {
+ fwrc = rtas_call(token, 0, 1, NULL);
+ } while (rtas_busy_delay(fwrc));
-int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data)
-{
- atomic_inc(&data->working);
- return __rtas_suspend_last_cpu(data, 0);
+ if (fwrc)
+ pr_err("ibm,activate-firmware failed (%i)\n", fwrc);
}
-static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done)
+/**
+ * get_pseries_errorlog() - Find a specific pseries error log in an RTAS
+ * extended event log.
+ * @log: RTAS error/event log
+ * @section_id: two character section identifier
+ *
+ * Return: A pointer to the specified errorlog or NULL if not found.
+ */
+noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
+ uint16_t section_id)
{
- long rc = H_SUCCESS;
- unsigned long msr_save;
- int cpu;
-
- atomic_inc(&data->working);
-
- /* really need to ensure MSR.EE is off for H_JOIN */
- msr_save = mfmsr();
- mtmsr(msr_save & ~(MSR_EE));
+ struct rtas_ext_event_log_v6 *ext_log =
+ (struct rtas_ext_event_log_v6 *)log->buffer;
+ struct pseries_errorlog *sect;
+ unsigned char *p, *log_end;
+ uint32_t ext_log_length = rtas_error_extended_log_length(log);
+ uint8_t log_format = rtas_ext_event_log_format(ext_log);
+ uint32_t company_id = rtas_ext_event_company_id(ext_log);
- while (rc == H_SUCCESS && !atomic_read(&data->done) && !atomic_read(&data->error))
- rc = plpar_hcall_norets(H_JOIN);
+ /* Check that we understand the format */
+ if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
+ log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
+ company_id != RTAS_V6EXT_COMPANY_ID_IBM)
+ return NULL;
- mtmsr(msr_save);
+ log_end = log->buffer + ext_log_length;
+ p = ext_log->vendor_log;
- if (rc == H_SUCCESS) {
- /* This cpu was prodded and the suspend is complete. */
- goto out;
- } else if (rc == H_CONTINUE) {
- /* All other cpus are in H_JOIN, this cpu does
- * the suspend.
- */
- return __rtas_suspend_last_cpu(data, wake_when_done);
- } else {
- printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n",
- smp_processor_id(), rc);
- atomic_set(&data->error, rc);
+ while (p < log_end) {
+ sect = (struct pseries_errorlog *)p;
+ if (pseries_errorlog_id(sect) == section_id)
+ return sect;
+ p += pseries_errorlog_length(sect);
}
- if (wake_when_done) {
- atomic_set(&data->done, 1);
-
- /* This cpu did the suspend or got an error; in either case,
- * we need to prod all other other cpus out of join state.
- * Extra prods are harmless.
- */
- for_each_online_cpu(cpu)
- plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu));
- }
-out:
- if (atomic_dec_return(&data->working) == 0)
- complete(data->complete);
- return rc;
+ return NULL;
}
-int rtas_suspend_cpu(struct rtas_suspend_me_data *data)
-{
- return __rtas_suspend_cpu(data, 0);
-}
+#ifdef CONFIG_PPC_RTAS_FILTER
-static void rtas_percpu_suspend_me(void *info)
-{
- __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
-}
+/*
+ * The sys_rtas syscall, as originally designed, allows root to pass
+ * arbitrary physical addresses to RTAS calls. A number of RTAS calls
+ * can be abused to write to arbitrary memory and do other things that
+ * are potentially harmful to system integrity, and thus should only
+ * be used inside the kernel and not exposed to userspace.
+ *
+ * All known legitimate users of the sys_rtas syscall will only ever
+ * pass addresses that fall within the RMO buffer, and use a known
+ * subset of RTAS calls.
+ *
+ * Accordingly, we filter RTAS requests to check that the call is
+ * permitted, and that provided pointers fall within the RMO buffer.
+ * The rtas_filters list contains an entry for each permitted call,
+ * with the indexes of the parameters which are expected to contain
+ * addresses and sizes of buffers allocated inside the RMO buffer.
+ */
+struct rtas_filter {
+ const char *name;
+ int token;
+ /* Indexes into the args buffer, -1 if not used */
+ int buf_idx1;
+ int size_idx1;
+ int buf_idx2;
+ int size_idx2;
+
+ int fixed_size;
+};
-enum rtas_cpu_state {
- DOWN,
- UP,
+static struct rtas_filter rtas_filters[] __ro_after_init = {
+ { "ibm,activate-firmware", -1, -1, -1, -1, -1 },
+ { "ibm,configure-connector", -1, 0, -1, 1, -1, 4096 }, /* Special cased */
+ { "display-character", -1, -1, -1, -1, -1 },
+ { "ibm,display-message", -1, 0, -1, -1, -1 },
+ { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 },
+ { "ibm,close-errinjct", -1, -1, -1, -1, -1 },
+ { "ibm,open-errinjct", -1, -1, -1, -1, -1 },
+ { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 },
+ { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 },
+ { "ibm,get-indices", -1, 2, 3, -1, -1 },
+ { "get-power-level", -1, -1, -1, -1, -1 },
+ { "get-sensor-state", -1, -1, -1, -1, -1 },
+ { "ibm,get-system-parameter", -1, 1, 2, -1, -1 },
+ { "get-time-of-day", -1, -1, -1, -1, -1 },
+ { "ibm,get-vpd", -1, 0, -1, 1, 2 },
+ { "ibm,lpar-perftools", -1, 2, 3, -1, -1 },
+ { "ibm,platform-dump", -1, 4, 5, -1, -1 }, /* Special cased */
+ { "ibm,read-slot-reset-state", -1, -1, -1, -1, -1 },
+ { "ibm,scan-log-dump", -1, 0, 1, -1, -1 },
+ { "ibm,set-dynamic-indicator", -1, 2, -1, -1, -1 },
+ { "ibm,set-eeh-option", -1, -1, -1, -1, -1 },
+ { "set-indicator", -1, -1, -1, -1, -1 },
+ { "set-power-level", -1, -1, -1, -1, -1 },
+ { "set-time-for-power-on", -1, -1, -1, -1, -1 },
+ { "ibm,set-system-parameter", -1, 1, -1, -1, -1 },
+ { "set-time-of-day", -1, -1, -1, -1, -1 },
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ { "ibm,suspend-me", -1, -1, -1, -1, -1 },
+ { "ibm,update-nodes", -1, 0, -1, -1, -1, 4096 },
+ { "ibm,update-properties", -1, 0, -1, -1, -1, 4096 },
+#endif
+ { "ibm,physical-attestation", -1, 0, 1, -1, -1 },
};
-#ifndef CONFIG_SMP
-static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
- cpumask_var_t cpus)
+static bool in_rmo_buf(u32 base, u32 end)
{
- if (!cpumask_empty(cpus)) {
- cpumask_clear(cpus);
- return -EINVAL;
- } else
- return 0;
-}
-#else
-/* On return cpumask will be altered to indicate CPUs changed.
- * CPUs with states changed will be set in the mask,
- * CPUs with status unchanged will be unset in the mask. */
-static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
- cpumask_var_t cpus)
-{
- int cpu;
- int cpuret = 0;
- int ret = 0;
-
- if (cpumask_empty(cpus))
- return 0;
-
- for_each_cpu(cpu, cpus) {
- struct device *dev = get_cpu_device(cpu);
-
- switch (state) {
- case DOWN:
- cpuret = device_offline(dev);
- break;
- case UP:
- cpuret = device_online(dev);
- break;
- }
- if (cpuret < 0) {
- pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
- __func__,
- ((state == UP) ? "up" : "down"),
- cpu, cpuret);
- if (!ret)
- ret = cpuret;
- if (state == UP) {
- /* clear bits for unchanged cpus, return */
- cpumask_shift_right(cpus, cpus, cpu);
- cpumask_shift_left(cpus, cpus, cpu);
- break;
- } else {
- /* clear bit for unchanged cpu, continue */
- cpumask_clear_cpu(cpu, cpus);
- }
- }
- cond_resched();
- }
-
- return ret;
+ return base >= rtas_rmo_buf &&
+ base < (rtas_rmo_buf + RTAS_USER_REGION_SIZE) &&
+ base <= end &&
+ end >= rtas_rmo_buf &&
+ end < (rtas_rmo_buf + RTAS_USER_REGION_SIZE);
}
-#endif
-int rtas_online_cpus_mask(cpumask_var_t cpus)
+static bool block_rtas_call(int token, int nargs,
+ struct rtas_args *args)
{
- int ret;
-
- ret = rtas_cpu_state_change_mask(UP, cpus);
-
- if (ret) {
- cpumask_var_t tmp_mask;
-
- if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
- return ret;
+ int i;
- /* Use tmp_mask to preserve cpus mask from first failure */
- cpumask_copy(tmp_mask, cpus);
- rtas_offline_cpus_mask(tmp_mask);
- free_cpumask_var(tmp_mask);
- }
+ for (i = 0; i < ARRAY_SIZE(rtas_filters); i++) {
+ struct rtas_filter *f = &rtas_filters[i];
+ u32 base, size, end;
- return ret;
-}
+ if (token != f->token)
+ continue;
-int rtas_offline_cpus_mask(cpumask_var_t cpus)
-{
- return rtas_cpu_state_change_mask(DOWN, cpus);
-}
+ if (f->buf_idx1 != -1) {
+ base = be32_to_cpu(args->args[f->buf_idx1]);
+ if (f->size_idx1 != -1)
+ size = be32_to_cpu(args->args[f->size_idx1]);
+ else if (f->fixed_size)
+ size = f->fixed_size;
+ else
+ size = 1;
-int rtas_ibm_suspend_me(u64 handle)
-{
- long state;
- long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- struct rtas_suspend_me_data data;
- DECLARE_COMPLETION_ONSTACK(done);
- cpumask_var_t offline_mask;
- int cpuret;
-
- if (!rtas_service_present("ibm,suspend-me"))
- return -ENOSYS;
-
- /* Make sure the state is valid */
- rc = plpar_hcall(H_VASI_STATE, retbuf, handle);
-
- state = retbuf[0];
-
- if (rc) {
- printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
- return rc;
- } else if (state == H_VASI_ENABLED) {
- return -EAGAIN;
- } else if (state != H_VASI_SUSPENDING) {
- printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
- state);
- return -EIO;
- }
+ end = base + size - 1;
- if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
- return -ENOMEM;
+ /*
+ * Special case for ibm,platform-dump - NULL buffer
+ * address is used to indicate end of dump processing
+ */
+ if (!strcmp(f->name, "ibm,platform-dump") &&
+ base == 0)
+ return false;
- atomic_set(&data.working, 0);
- atomic_set(&data.done, 0);
- atomic_set(&data.error, 0);
- data.token = rtas_token("ibm,suspend-me");
- data.complete = &done;
+ if (!in_rmo_buf(base, end))
+ goto err;
+ }
- lock_device_hotplug();
+ if (f->buf_idx2 != -1) {
+ base = be32_to_cpu(args->args[f->buf_idx2]);
+ if (f->size_idx2 != -1)
+ size = be32_to_cpu(args->args[f->size_idx2]);
+ else if (f->fixed_size)
+ size = f->fixed_size;
+ else
+ size = 1;
+ end = base + size - 1;
- /* All present CPUs must be online */
- cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
- cpuret = rtas_online_cpus_mask(offline_mask);
- if (cpuret) {
- pr_err("%s: Could not bring present CPUs online.\n", __func__);
- atomic_set(&data.error, cpuret);
- goto out;
- }
+ /*
+ * Special case for ibm,configure-connector where the
+ * address can be 0
+ */
+ if (!strcmp(f->name, "ibm,configure-connector") &&
+ base == 0)
+ return false;
- cpu_hotplug_disable();
+ if (!in_rmo_buf(base, end))
+ goto err;
+ }
- /* Check if we raced with a CPU-Offline Operation */
- if (!cpumask_equal(cpu_present_mask, cpu_online_mask)) {
- pr_info("%s: Raced against a concurrent CPU-Offline\n", __func__);
- atomic_set(&data.error, -EAGAIN);
- goto out_hotplug_enable;
+ return false;
}
- /* Call function on all CPUs. One of us will make the
- * rtas call
- */
- on_each_cpu(rtas_percpu_suspend_me, &data, 0);
-
- wait_for_completion(&done);
+err:
+ pr_err_ratelimited("sys_rtas: RTAS call blocked - exploit attempt?\n");
+ pr_err_ratelimited("sys_rtas: token=0x%x, nargs=%d (called by %s)\n",
+ token, nargs, current->comm);
+ return true;
+}
- if (atomic_read(&data.error) != 0)
- printk(KERN_ERR "Error doing global join\n");
+static void __init rtas_syscall_filter_init(void)
+{
+ unsigned int i;
-out_hotplug_enable:
- cpu_hotplug_enable();
+ for (i = 0; i < ARRAY_SIZE(rtas_filters); i++)
+ rtas_filters[i].token = rtas_token(rtas_filters[i].name);
+}
- /* Take down CPUs not online prior to suspend */
- cpuret = rtas_offline_cpus_mask(offline_mask);
- if (cpuret)
- pr_warn("%s: Could not restore CPUs to offline state.\n",
- __func__);
+#else
-out:
- unlock_device_hotplug();
- free_cpumask_var(offline_mask);
- return atomic_read(&data.error);
-}
-#else /* CONFIG_PPC_PSERIES */
-int rtas_ibm_suspend_me(u64 handle)
+static bool block_rtas_call(int token, int nargs,
+ struct rtas_args *args)
{
- return -ENOSYS;
+ return false;
}
-#endif
-/**
- * Find a specific pseries error log in an RTAS extended event log.
- * @log: RTAS error/event log
- * @section_id: two character section identifier
- *
- * Returns a pointer to the specified errorlog or NULL if not found.
- */
-struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
- uint16_t section_id)
+static void __init rtas_syscall_filter_init(void)
{
- struct rtas_ext_event_log_v6 *ext_log =
- (struct rtas_ext_event_log_v6 *)log->buffer;
- struct pseries_errorlog *sect;
- unsigned char *p, *log_end;
- uint32_t ext_log_length = rtas_error_extended_log_length(log);
- uint8_t log_format = rtas_ext_event_log_format(ext_log);
- uint32_t company_id = rtas_ext_event_company_id(ext_log);
-
- /* Check that we understand the format */
- if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
- log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
- company_id != RTAS_V6EXT_COMPANY_ID_IBM)
- return NULL;
-
- log_end = log->buffer + ext_log_length;
- p = ext_log->vendor_log;
-
- while (p < log_end) {
- sect = (struct pseries_errorlog *)p;
- if (pseries_errorlog_id(sect) == section_id)
- return sect;
- p += pseries_errorlog_length(sect);
- }
-
- return NULL;
}
+#endif /* CONFIG_PPC_RTAS_FILTER */
+
/* We assume to be passed big endian arguments */
SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
{
@@ -1095,8 +1184,19 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
args.rets = &args.args[nargs];
memset(args.rets, 0, nret * sizeof(rtas_arg_t));
+ if (block_rtas_call(token, nargs, &args))
+ return -EINVAL;
+
+ if (token == ibm_open_errinjct_token || token == ibm_errinjct_token) {
+ int err;
+
+ err = security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION);
+ if (err)
+ return err;
+ }
+
/* Need to handle ibm,suspend_me call specially */
- if (token == ibm_suspend_me_token) {
+ if (token == rtas_token("ibm,suspend-me")) {
/*
* rtas_ibm_suspend_me assumes the streamid handle is in cpu
@@ -1105,7 +1205,7 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
int rc = 0;
u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
| be32_to_cpu(args.args[1]);
- rc = rtas_ibm_suspend_me(handle);
+ rc = rtas_syscall_dispatch_ibm_suspend_me(handle);
if (rc == -EAGAIN)
args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
else if (rc == -EIO)
@@ -1120,7 +1220,7 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
flags = lock_rtas();
rtas.args = args;
- enter_rtas(__pa(&rtas.args));
+ do_enter_rtas(__pa(&rtas.args));
args = rtas.args;
/* A -1 return code indicates that the last command couldn't
@@ -1181,12 +1281,10 @@ void __init rtas_initialize(void)
* the stop-self token if any
*/
#ifdef CONFIG_PPC64
- if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (firmware_has_feature(FW_FEATURE_LPAR))
rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
- ibm_suspend_me_token = rtas_token("ibm,suspend-me");
- }
#endif
- rtas_rmo_buf = memblock_phys_alloc_range(RTAS_RMOBUF_MAX, PAGE_SIZE,
+ rtas_rmo_buf = memblock_phys_alloc_range(RTAS_USER_REGION_SIZE, PAGE_SIZE,
0, rtas_region);
if (!rtas_rmo_buf)
panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
@@ -1195,6 +1293,9 @@ void __init rtas_initialize(void)
#ifdef CONFIG_RTAS_ERROR_LOGGING
rtas_last_error_token = rtas_token("rtas-last-error");
#endif
+ ibm_open_errinjct_token = rtas_token("ibm,open-errinjct");
+ ibm_errinjct_token = rtas_token("ibm,errinjct");
+ rtas_syscall_filter_init();
}
int __init early_init_dt_scan_rtas(unsigned long node,
@@ -1209,6 +1310,12 @@ int __init early_init_dt_scan_rtas(unsigned long node,
entryp = of_get_flat_dt_prop(node, "linux,rtas-entry", NULL);
sizep = of_get_flat_dt_prop(node, "rtas-size", NULL);
+#ifdef CONFIG_PPC64
+ /* need this feature to decide the crashkernel offset */
+ if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL))
+ powerpc_firmware_features |= FW_FEATURE_LPAR;
+#endif
+
if (basep && entryp && sizep) {
rtas.base = *basep;
rtas.entry = *entryp;
diff --git a/arch/powerpc/kernel/rtas_entry.S b/arch/powerpc/kernel/rtas_entry.S
new file mode 100644
index 000000000000..6ce95ddadbcd
--- /dev/null
+++ b/arch/powerpc/kernel/rtas_entry.S
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <asm/asm-offsets.h>
+#include <asm/bug.h>
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+
+/*
+ * RTAS is called with MSR IR, DR, EE disabled, and LR in the return address.
+ *
+ * Note: r3 is an input parameter to rtas, so don't trash it...
+ */
+
+#ifdef CONFIG_PPC32
+_GLOBAL(enter_rtas)
+ stwu r1,-INT_FRAME_SIZE(r1)
+ mflr r0
+ stw r0,INT_FRAME_SIZE+4(r1)
+ LOAD_REG_ADDR(r4, rtas)
+ lis r6,1f@ha /* physical return address for rtas */
+ addi r6,r6,1f@l
+ tophys(r6,r6)
+ lwz r8,RTASENTRY(r4)
+ lwz r4,RTASBASE(r4)
+ mfmsr r9
+ stw r9,8(r1)
+ li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
+ mtlr r6
+ stw r1, THREAD + RTAS_SP(r2)
+ mtspr SPRN_SRR0,r8
+ mtspr SPRN_SRR1,r9
+ rfi
+1:
+ lis r8, 1f@h
+ ori r8, r8, 1f@l
+ LOAD_REG_IMMEDIATE(r9,MSR_KERNEL)
+ mtspr SPRN_SRR0,r8
+ mtspr SPRN_SRR1,r9
+ rfi /* Reactivate MMU translation */
+1:
+ lwz r8,INT_FRAME_SIZE+4(r1) /* get return address */
+ lwz r9,8(r1) /* original msr value */
+ addi r1,r1,INT_FRAME_SIZE
+ li r0,0
+ stw r0, THREAD + RTAS_SP(r2)
+ mtlr r8
+ mtmsr r9
+ blr /* return to caller */
+_ASM_NOKPROBE_SYMBOL(enter_rtas)
+
+#else /* CONFIG_PPC32 */
+#include <asm/exception-64s.h>
+
+/*
+ * 32-bit rtas on 64-bit machines has the additional problem that RTAS may
+ * not preserve the upper parts of registers it uses.
+ */
+_GLOBAL(enter_rtas)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
+
+ /* Because RTAS is running in 32b mode, it clobbers the high order half
+ * of all registers that it saves. We therefore save those registers
+ * RTAS might touch to the stack. (r0, r3-r12 are caller saved)
+ */
+ SAVE_GPR(2, r1) /* Save the TOC */
+ SAVE_NVGPRS(r1) /* Save the non-volatiles */
+
+ mfcr r4
+ std r4,_CCR(r1)
+ mfctr r5
+ std r5,_CTR(r1)
+ mfspr r6,SPRN_XER
+ std r6,_XER(r1)
+ mfdar r7
+ std r7,_DAR(r1)
+ mfdsisr r8
+ std r8,_DSISR(r1)
+
+ /* Temporary workaround to clear CR until RTAS can be modified to
+ * ignore all bits.
+ */
+ li r0,0
+ mtcr r0
+
+ mfmsr r6
+
+ /* Unfortunately, the stack pointer and the MSR are also clobbered,
+ * so they are saved in the PACA which allows us to restore
+ * our original state after RTAS returns.
+ */
+ std r1,PACAR1(r13)
+ std r6,PACASAVEDMSR(r13)
+
+ /* Setup our real return addr */
+ LOAD_REG_ADDR(r4,rtas_return_loc)
+ clrldi r4,r4,2 /* convert to realmode address */
+ mtlr r4
+
+__enter_rtas:
+ LOAD_REG_ADDR(r4, rtas)
+ ld r5,RTASENTRY(r4) /* get the rtas->entry value */
+ ld r4,RTASBASE(r4) /* get the rtas->base value */
+
+ /*
+ * RTAS runs in 32-bit big endian real mode, but leave MSR[RI] on as we
+ * may hit NMI (SRESET or MCE) while in RTAS. RTAS should disable RI in
+ * its critical regions (as specified in PAPR+ section 7.2.1). MSR[S]
+ * is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if
+ * MSR[S] is set, it will remain when entering RTAS.
+ * If we're in HV mode, RTAS must also run in HV mode, so extract MSR_HV
+ * from the saved MSR value and insert into the value RTAS will use.
+ */
+ extrdi r0, r6, 1, 63 - MSR_HV_LG
+ LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI)
+ insrdi r6, r0, 1, 63 - MSR_HV_LG
+
+ li r0,0
+ mtmsrd r0,1 /* disable RI before using SRR0/1 */
+
+ mtspr SPRN_SRR0,r5
+ mtspr SPRN_SRR1,r6
+ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+rtas_return_loc:
+ FIXUP_ENDIAN
+
+ /* Set SF before anything. */
+ LOAD_REG_IMMEDIATE(r6, MSR_KERNEL & ~(MSR_IR|MSR_DR))
+ mtmsrd r6
+
+ /* relocation is off at this point */
+ GET_PACA(r13)
+
+ bcl 20,31,$+4
+0: mflr r3
+ ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */
+
+ ld r1,PACAR1(r13) /* Restore our SP */
+ ld r4,PACASAVEDMSR(r13) /* Restore our MSR */
+
+ mtspr SPRN_SRR0,r3
+ mtspr SPRN_SRR1,r4
+ RFI_TO_KERNEL
+ b . /* prevent speculative execution */
+_ASM_NOKPROBE_SYMBOL(enter_rtas)
+_ASM_NOKPROBE_SYMBOL(__enter_rtas)
+_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
+
+ .align 3
+1: .8byte rtas_restore_regs
+
+rtas_restore_regs:
+ /* relocation is on at this point */
+ REST_GPR(2, r1) /* Restore the TOC */
+ REST_NVGPRS(r1) /* Restore the non-volatiles */
+
+ ld r4,_CCR(r1)
+ mtcr r4
+ ld r5,_CTR(r1)
+ mtctr r5
+ ld r6,_XER(r1)
+ mtspr SPRN_XER,r6
+ ld r7,_DAR(r1)
+ mtdar r7
+ ld r8,_DSISR(r1)
+ mtdsisr r8
+
+ addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
+ ld r0,16(r1) /* get return address */
+
+ mtlr r0
+ blr /* return to caller */
+
+#endif /* CONFIG_PPC32 */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index a99179d83538..bc817a5619d6 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -120,7 +120,7 @@ static struct kmem_cache *flash_block_cache = NULL;
/*
* Local copy of the flash block list.
*
- * The rtas_firmware_flash_list varable will be
+ * The rtas_firmware_flash_list variable will be
* set once the data is fully read.
*
* For convenience as we build the list we use virtual addrs,
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index ae5e43eaca48..5a2f5ea3b054 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -13,11 +13,12 @@
#include <linux/pci.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 89b798f8f656..5270b450bbde 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -22,7 +22,6 @@
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/rtas.h>
-#include <asm/prom.h>
#include <asm/nvram.h>
#include <linux/atomic.h>
#include <asm/machdep.h>
@@ -273,37 +272,15 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
}
}
-#ifdef CONFIG_PPC_PSERIES
-static void handle_prrn_event(s32 scope)
-{
- /*
- * For PRRN, we must pass the negative of the scope value in
- * the RTAS event.
- */
- pseries_devicetree_update(-scope);
- numa_update_cpu_topology(false);
-}
-
static void handle_rtas_event(const struct rtas_error_log *log)
{
- if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled())
+ if (!machine_is(pseries))
return;
- /* For PRRN Events the extended log length is used to denote
- * the scope for calling rtas update-nodes.
- */
- handle_prrn_event(rtas_error_extended_log_length(log));
+ if (rtas_error_type(log) == RTAS_TYPE_PRRN)
+ pr_info_ratelimited("Platform resource reassignment ignored.\n");
}
-#else
-
-static void handle_rtas_event(const struct rtas_error_log *log)
-{
- return;
-}
-
-#endif
-
static int rtas_log_open(struct inode * inode, struct file * file)
{
return 0;
@@ -451,7 +428,7 @@ static void rtas_event_scan(struct work_struct *w)
do_event_scan();
- get_online_cpus();
+ cpus_read_lock();
/* raw_ OK because just using CPU as starting point. */
cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
@@ -473,11 +450,11 @@ static void rtas_event_scan(struct work_struct *w)
schedule_delayed_work_on(cpu, &event_scan_work,
__round_jiffies_relative(event_scan_delay, cpu));
- put_online_cpus();
+ cpus_read_unlock();
}
#ifdef CONFIG_PPC64
-static void retrieve_nvram_error_log(void)
+static void __init retrieve_nvram_error_log(void)
{
unsigned int err_type ;
int rc ;
@@ -495,12 +472,12 @@ static void retrieve_nvram_error_log(void)
}
}
#else /* CONFIG_PPC64 */
-static void retrieve_nvram_error_log(void)
+static void __init retrieve_nvram_error_log(void)
{
}
#endif /* CONFIG_PPC64 */
-static void start_event_scan(void)
+static void __init start_event_scan(void)
{
printk(KERN_DEBUG "RTAS daemon started\n");
pr_debug("rtasd: will sleep for %d milliseconds\n",
diff --git a/arch/powerpc/kernel/secure_boot.c b/arch/powerpc/kernel/secure_boot.c
index 4b982324d368..f9af305d9579 100644
--- a/arch/powerpc/kernel/secure_boot.c
+++ b/arch/powerpc/kernel/secure_boot.c
@@ -23,12 +23,19 @@ bool is_ppc_secureboot_enabled(void)
{
struct device_node *node;
bool enabled = false;
+ u32 secureboot;
node = get_ppc_fw_sb_node();
enabled = of_property_read_bool(node, "os-secureboot-enforcing");
-
of_node_put(node);
+ if (enabled)
+ goto out;
+
+ if (!of_property_read_u32(of_root, "ibm,secure-boot", &secureboot))
+ enabled = (secureboot > 1);
+
+out:
pr_info("Secure boot mode %s\n", enabled ? "enabled" : "disabled");
return enabled;
@@ -38,12 +45,19 @@ bool is_ppc_trustedboot_enabled(void)
{
struct device_node *node;
bool enabled = false;
+ u32 trustedboot;
node = get_ppc_fw_sb_node();
enabled = of_property_read_bool(node, "trusted-enabled");
-
of_node_put(node);
+ if (enabled)
+ goto out;
+
+ if (!of_property_read_u32(of_root, "ibm,trusted-boot", &trustedboot))
+ enabled = (trustedboot > 0);
+
+out:
pr_info("Trusted boot mode %s\n", enabled ? "enabled" : "disabled");
return enabled;
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index bd70f5be1c27..206475e3e0b4 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -7,29 +7,35 @@
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/memblock.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
#include <linux/seq_buf.h>
+#include <linux/debugfs.h>
#include <asm/asm-prototypes.h>
#include <asm/code-patching.h>
-#include <asm/debugfs.h>
#include <asm/security_features.h>
+#include <asm/sections.h>
#include <asm/setup.h>
+#include <asm/inst.h>
+#include "setup.h"
u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
-enum count_cache_flush_type {
- COUNT_CACHE_FLUSH_NONE = 0x1,
- COUNT_CACHE_FLUSH_SW = 0x2,
- COUNT_CACHE_FLUSH_HW = 0x4,
+enum branch_cache_flush_type {
+ BRANCH_CACHE_FLUSH_NONE = 0x1,
+ BRANCH_CACHE_FLUSH_SW = 0x2,
+ BRANCH_CACHE_FLUSH_HW = 0x4,
};
-static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
-static bool link_stack_flush_enabled;
+static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
+static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
bool barrier_nospec_enabled;
static bool no_nospec;
static bool btb_flush_enabled;
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
+#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
static bool no_spectrev2;
#endif
@@ -39,7 +45,7 @@ static void enable_barrier_nospec(bool enable)
do_barrier_nospec_fixups(enable);
}
-void setup_barrier_nospec(void)
+void __init setup_barrier_nospec(void)
{
bool enable;
@@ -101,7 +107,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get,
static __init int barrier_nospec_debugfs_init(void)
{
debugfs_create_file_unsafe("barrier_nospec", 0600,
- powerpc_debugfs_root, NULL,
+ arch_debugfs_dir, NULL,
&fops_barrier_nospec);
return 0;
}
@@ -109,14 +115,14 @@ device_initcall(barrier_nospec_debugfs_init);
static __init int security_feature_debugfs_init(void)
{
- debugfs_create_x64("security_features", 0400, powerpc_debugfs_root,
+ debugfs_create_x64("security_features", 0400, arch_debugfs_dir,
&powerpc_security_features);
return 0;
}
device_initcall(security_feature_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64)
+#if defined(CONFIG_PPC_E500) || defined(CONFIG_PPC_BOOK3S_64)
static int __init handle_nospectre_v2(char *p)
{
no_spectrev2 = true;
@@ -124,17 +130,17 @@ static int __init handle_nospectre_v2(char *p)
return 0;
}
early_param("nospectre_v2", handle_nospectre_v2);
-#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_E500 || CONFIG_PPC_BOOK3S_64 */
-#ifdef CONFIG_PPC_FSL_BOOK3E
-void setup_spectre_v2(void)
+#ifdef CONFIG_PPC_E500
+void __init setup_spectre_v2(void)
{
if (no_spectrev2 || cpu_mitigations_off())
do_btb_flush_fixups();
else
btb_flush_enabled = true;
}
-#endif /* CONFIG_PPC_FSL_BOOK3E */
+#endif /* CONFIG_PPC_E500 */
#ifdef CONFIG_PPC_BOOK3S_64
ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
@@ -216,24 +222,25 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
if (ccd)
seq_buf_printf(&s, "Indirect branch cache disabled");
- if (link_stack_flush_enabled)
- seq_buf_printf(&s, ", Software link stack flush");
-
- } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
+ } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
seq_buf_printf(&s, "Mitigation: Software count cache flush");
- if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
+ if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW)
seq_buf_printf(&s, " (hardware accelerated)");
- if (link_stack_flush_enabled)
- seq_buf_printf(&s, ", Software link stack flush");
-
} else if (btb_flush_enabled) {
seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
} else {
seq_buf_printf(&s, "Vulnerable");
}
+ if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) {
+ if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
+ seq_buf_printf(&s, ", Software link stack flush");
+ if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW)
+ seq_buf_printf(&s, " (hardware accelerated)");
+ }
+
seq_buf_printf(&s, "\n");
return s.len;
@@ -246,7 +253,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
static enum stf_barrier_type stf_enabled_flush_types;
static bool no_stf_barrier;
-bool stf_barrier;
+static bool stf_barrier;
static int __init handle_no_stf_barrier(char *p)
{
@@ -257,6 +264,11 @@ static int __init handle_no_stf_barrier(char *p)
early_param("no_stf_barrier", handle_no_stf_barrier);
+enum stf_barrier_type stf_barrier_type_get(void)
+{
+ return stf_enabled_flush_types;
+}
+
/* This is the generic flag used by other architectures */
static int __init handle_ssbd(char *p)
{
@@ -294,9 +306,7 @@ static void stf_barrier_enable(bool enable)
void setup_stf_barrier(void)
{
enum stf_barrier_type type;
- bool enable, hv;
-
- hv = cpu_has_feature(CPU_FTR_HVMODE);
+ bool enable;
/* Default to fallback in case fw-features are not available */
if (cpu_has_feature(CPU_FTR_ARCH_300))
@@ -309,8 +319,7 @@ void setup_stf_barrier(void)
type = STF_BARRIER_NONE;
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
- (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) ||
- (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv));
+ security_ftr_enabled(SEC_FTR_STF_BARRIER);
if (type == STF_BARRIER_FALLBACK) {
pr_info("stf-barrier: fallback barrier available\n");
@@ -353,6 +362,40 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
return sprintf(buf, "Vulnerable\n");
}
+static int ssb_prctl_get(struct task_struct *task)
+{
+ if (stf_enabled_flush_types == STF_BARRIER_NONE)
+ /*
+ * We don't have an explicit signal from firmware that we're
+ * vulnerable or not, we only have certain CPU revisions that
+ * are known to be vulnerable.
+ *
+ * We assume that if we're on another CPU, where the barrier is
+ * NONE, then we are not vulnerable.
+ */
+ return PR_SPEC_NOT_AFFECTED;
+ else
+ /*
+ * If we do have a barrier type then we are vulnerable. The
+ * barrier is not a global or per-process mitigation, so the
+ * only value we can report here is PR_SPEC_ENABLE, which
+ * appears as "vulnerable" in /proc.
+ */
+ return PR_SPEC_ENABLE;
+
+ return -EINVAL;
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssb_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
static int stf_barrier_set(void *data, u64 val)
{
@@ -383,65 +426,103 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set,
static __init int stf_barrier_debugfs_init(void)
{
- debugfs_create_file_unsafe("stf_barrier", 0600, powerpc_debugfs_root,
+ debugfs_create_file_unsafe("stf_barrier", 0600, arch_debugfs_dir,
NULL, &fops_stf_barrier);
return 0;
}
device_initcall(stf_barrier_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
-static void no_count_cache_flush(void)
-{
- count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
- pr_info("count-cache-flush: software flush disabled.\n");
-}
-
-static void toggle_count_cache_flush(bool enable)
+static void update_branch_cache_flush(void)
{
- if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE) &&
- !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK))
- enable = false;
+ u32 *site, __maybe_unused *site2;
- if (!enable) {
- patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- patch_instruction_site(&patch__call_kvm_flush_link_stack, PPC_INST_NOP);
-#endif
- pr_info("link-stack-flush: software flush disabled.\n");
- link_stack_flush_enabled = false;
- no_count_cache_flush();
- return;
+ site = &patch__call_kvm_flush_link_stack;
+ site2 = &patch__call_kvm_flush_link_stack_p9;
+ // This controls the branch from guest_exit_cont to kvm_flush_link_stack
+ if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
+ patch_instruction_site(site2, ppc_inst(PPC_RAW_NOP()));
+ } else {
+ // Could use HW flush, but that could also flush count cache
+ patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
+ patch_branch_site(site2, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
}
+#endif
- // This enables the branch from _switch to flush_count_cache
- patch_branch_site(&patch__call_flush_count_cache,
- (u64)&flush_count_cache, BRANCH_SET_LINK);
+ // Patch out the bcctr first, then nop the rest
+ site = &patch__call_flush_branch_caches3;
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
+ site = &patch__call_flush_branch_caches2;
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
+ site = &patch__call_flush_branch_caches1;
+ patch_instruction_site(site, ppc_inst(PPC_RAW_NOP()));
+
+ // This controls the branch from _switch to flush_branch_caches
+ if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE &&
+ link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) {
+ // Nothing to be done
+
+ } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW &&
+ link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) {
+ // Patch in the bcctr last
+ site = &patch__call_flush_branch_caches1;
+ patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff
+ site = &patch__call_flush_branch_caches2;
+ patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9
+ site = &patch__call_flush_branch_caches3;
+ patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH));
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- // This enables the branch from guest_exit_cont to kvm_flush_link_stack
- patch_branch_site(&patch__call_kvm_flush_link_stack,
- (u64)&kvm_flush_link_stack, BRANCH_SET_LINK);
-#endif
+ } else {
+ patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK);
- pr_info("link-stack-flush: software flush enabled.\n");
- link_stack_flush_enabled = true;
+ // If we just need to flush the link stack, early return
+ if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) {
+ patch_instruction_site(&patch__flush_link_stack_return,
+ ppc_inst(PPC_RAW_BLR()));
- // If we just need to flush the link stack, patch an early return
- if (!security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
- patch_instruction_site(&patch__flush_link_stack_return, PPC_INST_BLR);
- no_count_cache_flush();
- return;
+ // If we have flush instruction, early return
+ } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) {
+ patch_instruction_site(&patch__flush_count_cache_return,
+ ppc_inst(PPC_RAW_BLR()));
+ }
+ }
+}
+
+static void toggle_branch_cache_flush(bool enable)
+{
+ if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
+ if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE)
+ count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE;
+
+ pr_info("count-cache-flush: flush disabled.\n");
+ } else {
+ if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
+ count_cache_flush_type = BRANCH_CACHE_FLUSH_HW;
+ pr_info("count-cache-flush: hardware flush enabled.\n");
+ } else {
+ count_cache_flush_type = BRANCH_CACHE_FLUSH_SW;
+ pr_info("count-cache-flush: software flush enabled.\n");
+ }
}
- if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
- count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
- pr_info("count-cache-flush: full software flush sequence enabled.\n");
- return;
+ if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) {
+ if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE)
+ link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE;
+
+ pr_info("link-stack-flush: flush disabled.\n");
+ } else {
+ if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) {
+ link_stack_flush_type = BRANCH_CACHE_FLUSH_HW;
+ pr_info("link-stack-flush: hardware flush enabled.\n");
+ } else {
+ link_stack_flush_type = BRANCH_CACHE_FLUSH_SW;
+ pr_info("link-stack-flush: software flush enabled.\n");
+ }
}
- patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
- count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
- pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
+ update_branch_cache_flush();
}
void setup_count_cache_flush(void)
@@ -465,7 +546,179 @@ void setup_count_cache_flush(void)
security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE))
security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
- toggle_count_cache_flush(enable);
+ toggle_branch_cache_flush(enable);
+}
+
+static enum l1d_flush_type enabled_flush_types;
+static void *l1d_flush_fallback_area;
+static bool no_rfi_flush;
+static bool no_entry_flush;
+static bool no_uaccess_flush;
+bool rfi_flush;
+static bool entry_flush;
+static bool uaccess_flush;
+DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
+EXPORT_SYMBOL(uaccess_flush_key);
+
+static int __init handle_no_rfi_flush(char *p)
+{
+ pr_info("rfi-flush: disabled on command line.");
+ no_rfi_flush = true;
+ return 0;
+}
+early_param("no_rfi_flush", handle_no_rfi_flush);
+
+static int __init handle_no_entry_flush(char *p)
+{
+ pr_info("entry-flush: disabled on command line.");
+ no_entry_flush = true;
+ return 0;
+}
+early_param("no_entry_flush", handle_no_entry_flush);
+
+static int __init handle_no_uaccess_flush(char *p)
+{
+ pr_info("uaccess-flush: disabled on command line.");
+ no_uaccess_flush = true;
+ return 0;
+}
+early_param("no_uaccess_flush", handle_no_uaccess_flush);
+
+/*
+ * The RFI flush is not KPTI, but because users will see doco that says to use
+ * nopti we hijack that option here to also disable the RFI flush.
+ */
+static int __init handle_no_pti(char *p)
+{
+ pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
+ handle_no_rfi_flush(NULL);
+ return 0;
+}
+early_param("nopti", handle_no_pti);
+
+static void do_nothing(void *unused)
+{
+ /*
+ * We don't need to do the flush explicitly, just enter+exit kernel is
+ * sufficient, the RFI exit handlers will do the right thing.
+ */
+}
+
+void rfi_flush_enable(bool enable)
+{
+ if (enable) {
+ do_rfi_flush_fixups(enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else
+ do_rfi_flush_fixups(L1D_FLUSH_NONE);
+
+ rfi_flush = enable;
+}
+
+static void entry_flush_enable(bool enable)
+{
+ if (enable) {
+ do_entry_flush_fixups(enabled_flush_types);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else {
+ do_entry_flush_fixups(L1D_FLUSH_NONE);
+ }
+
+ entry_flush = enable;
+}
+
+static void uaccess_flush_enable(bool enable)
+{
+ if (enable) {
+ do_uaccess_flush_fixups(enabled_flush_types);
+ static_branch_enable(&uaccess_flush_key);
+ on_each_cpu(do_nothing, NULL, 1);
+ } else {
+ static_branch_disable(&uaccess_flush_key);
+ do_uaccess_flush_fixups(L1D_FLUSH_NONE);
+ }
+
+ uaccess_flush = enable;
+}
+
+static void __ref init_fallback_flush(void)
+{
+ u64 l1d_size, limit;
+ int cpu;
+
+ /* Only allocate the fallback flush area once (at boot time). */
+ if (l1d_flush_fallback_area)
+ return;
+
+ l1d_size = ppc64_caches.l1d.size;
+
+ /*
+ * If there is no d-cache-size property in the device tree, l1d_size
+ * could be zero. That leads to the loop in the asm wrapping around to
+ * 2^64-1, and then walking off the end of the fallback area and
+ * eventually causing a page fault which is fatal. Just default to
+ * something vaguely sane.
+ */
+ if (!l1d_size)
+ l1d_size = (64 * 1024);
+
+ limit = min(ppc64_bolted_size(), ppc64_rma_size);
+
+ /*
+ * Align to L1d size, and size it at 2x L1d size, to catch possible
+ * hardware prefetch runoff. We don't have a recipe for load patterns to
+ * reliably avoid the prefetcher.
+ */
+ l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
+ l1d_size, MEMBLOCK_LOW_LIMIT,
+ limit, NUMA_NO_NODE);
+ if (!l1d_flush_fallback_area)
+ panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
+ __func__, l1d_size * 2, l1d_size, &limit);
+
+
+ for_each_possible_cpu(cpu) {
+ struct paca_struct *paca = paca_ptrs[cpu];
+ paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
+ paca->l1d_flush_size = l1d_size;
+ }
+}
+
+void setup_rfi_flush(enum l1d_flush_type types, bool enable)
+{
+ if (types & L1D_FLUSH_FALLBACK) {
+ pr_info("rfi-flush: fallback displacement flush available\n");
+ init_fallback_flush();
+ }
+
+ if (types & L1D_FLUSH_ORI)
+ pr_info("rfi-flush: ori type flush available\n");
+
+ if (types & L1D_FLUSH_MTTRIG)
+ pr_info("rfi-flush: mttrig type flush available\n");
+
+ enabled_flush_types = types;
+
+ if (!cpu_mitigations_off() && !no_rfi_flush)
+ rfi_flush_enable(enable);
+}
+
+void setup_entry_flush(bool enable)
+{
+ if (cpu_mitigations_off())
+ return;
+
+ if (!no_entry_flush)
+ entry_flush_enable(enable);
+}
+
+void setup_uaccess_flush(bool enable)
+{
+ if (cpu_mitigations_off())
+ return;
+
+ if (!no_uaccess_flush)
+ uaccess_flush_enable(enable);
}
#ifdef CONFIG_DEBUG_FS
@@ -480,14 +733,24 @@ static int count_cache_flush_set(void *data, u64 val)
else
return -EINVAL;
- toggle_count_cache_flush(enable);
+ toggle_branch_cache_flush(enable);
return 0;
}
static int count_cache_flush_get(void *data, u64 *val)
{
- if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
+ if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE)
+ *val = 0;
+ else
+ *val = 1;
+
+ return 0;
+}
+
+static int link_stack_flush_get(void *data, u64 *val)
+{
+ if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE)
*val = 0;
else
*val = 1;
@@ -497,14 +760,106 @@ static int count_cache_flush_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
count_cache_flush_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_link_stack_flush, link_stack_flush_get,
+ count_cache_flush_set, "%llu\n");
static __init int count_cache_flush_debugfs_init(void)
{
debugfs_create_file_unsafe("count_cache_flush", 0600,
- powerpc_debugfs_root, NULL,
+ arch_debugfs_dir, NULL,
&fops_count_cache_flush);
+ debugfs_create_file_unsafe("link_stack_flush", 0600,
+ arch_debugfs_dir, NULL,
+ &fops_link_stack_flush);
return 0;
}
device_initcall(count_cache_flush_debugfs_init);
+
+static int rfi_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ /* Only do anything if we're changing state */
+ if (enable != rfi_flush)
+ rfi_flush_enable(enable);
+
+ return 0;
+}
+
+static int rfi_flush_get(void *data, u64 *val)
+{
+ *val = rfi_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+
+static int entry_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ /* Only do anything if we're changing state */
+ if (enable != entry_flush)
+ entry_flush_enable(enable);
+
+ return 0;
+}
+
+static int entry_flush_get(void *data, u64 *val)
+{
+ *val = entry_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
+
+static int uaccess_flush_set(void *data, u64 val)
+{
+ bool enable;
+
+ if (val == 1)
+ enable = true;
+ else if (val == 0)
+ enable = false;
+ else
+ return -EINVAL;
+
+ /* Only do anything if we're changing state */
+ if (enable != uaccess_flush)
+ uaccess_flush_enable(enable);
+
+ return 0;
+}
+
+static int uaccess_flush_get(void *data, u64 *val)
+{
+ *val = uaccess_flush ? 1 : 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
+
+static __init int rfi_flush_debugfs_init(void)
+{
+ debugfs_create_file("rfi_flush", 0600, arch_debugfs_dir, NULL, &fops_rfi_flush);
+ debugfs_create_file("entry_flush", 0600, arch_debugfs_dir, NULL, &fops_entry_flush);
+ debugfs_create_file("uaccess_flush", 0600, arch_debugfs_dir, NULL, &fops_uaccess_flush);
+ return 0;
+}
+device_initcall(rfi_flush_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/secvar-sysfs.c b/arch/powerpc/kernel/secvar-sysfs.c
index a0a78aba2083..1ee4640a2641 100644
--- a/arch/powerpc/kernel/secvar-sysfs.c
+++ b/arch/powerpc/kernel/secvar-sysfs.c
@@ -26,15 +26,18 @@ static ssize_t format_show(struct kobject *kobj, struct kobj_attribute *attr,
const char *format;
node = of_find_compatible_node(NULL, NULL, "ibm,secvar-backend");
- if (!of_device_is_available(node))
- return -ENODEV;
+ if (!of_device_is_available(node)) {
+ rc = -ENODEV;
+ goto out;
+ }
rc = of_property_read_string(node, "format", &format);
if (rc)
- return rc;
+ goto out;
rc = sprintf(buf, "%s\n", format);
+out:
of_node_put(node);
return rc;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 7f8c890360fe..6d041993a45d 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -9,6 +9,7 @@
#undef DEBUG
#include <linux/export.h>
+#include <linux/panic_notifier.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/init.h>
@@ -17,27 +18,28 @@
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/platform_device.h>
+#include <linux/printk.h>
#include <linux/seq_file.h>
#include <linux/ioport.h>
#include <linux/console.h>
#include <linux/screen_info.h>
#include <linux/root_dev.h>
-#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/unistd.h>
+#include <linux/seq_buf.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
+#include <linux/of_irq.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/hugetlb.h>
-#include <asm/debugfs.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/paca.h>
-#include <asm/prom.h>
#include <asm/processor.h>
#include <asm/vdso_datapage.h>
-#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
@@ -64,11 +66,11 @@
#include <asm/mmu_context.h>
#include <asm/cpu_has_feature.h>
#include <asm/kasan.h>
+#include <asm/mce.h>
#include "setup.h"
#ifdef DEBUG
-#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
@@ -90,10 +92,6 @@ EXPORT_SYMBOL_GPL(boot_cpuid);
*/
int dcache_bsize;
int icache_bsize;
-int ucache_bsize;
-
-
-unsigned long klimit = (unsigned long) _end;
/*
* This still seems to be needed... -- paulus
@@ -165,9 +163,7 @@ void machine_restart(char *cmd)
void machine_power_off(void)
{
machine_shutdown();
- if (pm_power_off)
- pm_power_off();
-
+ do_kernel_power_off();
smp_send_stop();
machine_hang();
}
@@ -177,6 +173,14 @@ EXPORT_SYMBOL_GPL(machine_power_off);
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
+size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
+{
+ if (max_longs && ppc_md.get_random_seed && ppc_md.get_random_seed(v))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(arch_get_random_seed_longs);
+
void machine_halt(void)
{
machine_shutdown();
@@ -239,18 +243,17 @@ static int show_cpuinfo(struct seq_file *m, void *v)
maj = (pvr >> 8) & 0xFF;
min = pvr & 0xFF;
- seq_printf(m, "processor\t: %lu\n", cpu_id);
- seq_printf(m, "cpu\t\t: ");
+ seq_printf(m, "processor\t: %lu\ncpu\t\t: ", cpu_id);
if (cur_cpu_spec->pvr_mask && cur_cpu_spec->cpu_name)
- seq_printf(m, "%s", cur_cpu_spec->cpu_name);
+ seq_puts(m, cur_cpu_spec->cpu_name);
else
seq_printf(m, "unknown (%08x)", pvr);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- seq_printf(m, ", altivec supported");
+ seq_puts(m, ", altivec supported");
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
#ifdef CONFIG_TAU
if (cpu_has_feature(CPU_FTR_TAU)) {
@@ -283,11 +286,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "clock\t\t: %lu.%06luMHz\n",
proc_freq / 1000000, proc_freq % 1000000);
- if (ppc_md.show_percpuinfo != NULL)
- ppc_md.show_percpuinfo(m, cpu_id);
-
/* If we are a Freescale core do a simple check so
- * we dont have to keep adding cases in the future */
+ * we don't have to keep adding cases in the future */
if (PVR_VER(pvr) & 0x8000) {
switch (PVR_VER(pvr)) {
case 0x8000: /* 7441/7450/7451, Voyager */
@@ -306,15 +306,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
} else {
switch (PVR_VER(pvr)) {
- case 0x0020: /* 403 family */
- maj = PVR_MAJ(pvr) + 1;
- min = PVR_MIN(pvr);
- break;
case 0x1008: /* 740P/750P ?? */
maj = ((pvr >> 8) & 0xFF) - 1;
min = pvr & 0xFF;
break;
case 0x004e: /* POWER9 bits 12-15 give chip type */
+ case 0x0080: /* POWER10 bit 12 gives SMT8/4 */
maj = (pvr >> 8) & 0x0F;
min = pvr & 0xFF;
break;
@@ -332,7 +329,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "bogomips\t: %lu.%02lu\n", loops_per_jiffy / (500000 / HZ),
(loops_per_jiffy / (5000 / HZ)) % 100);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
/* If this is the last cpu, print the summary */
if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
@@ -467,8 +464,8 @@ void __init smp_setup_cpu_maps(void)
intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
&len);
if (intserv) {
- DBG(" ibm,ppc-interrupt-server#s -> %d threads\n",
- nthreads);
+ DBG(" ibm,ppc-interrupt-server#s -> %lu threads\n",
+ (len / sizeof(int)));
} else {
DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n");
intserv = of_get_property(dn, "reg", &len);
@@ -593,7 +590,16 @@ static __init int add_pcspkr(void)
device_initcall(add_pcspkr);
#endif /* CONFIG_PCSPKR_PLATFORM */
-void probe_machine(void)
+static char ppc_hw_desc_buf[128] __initdata;
+
+struct seq_buf ppc_hw_desc __initdata = {
+ .buffer = ppc_hw_desc_buf,
+ .size = sizeof(ppc_hw_desc_buf),
+ .len = 0,
+ .readpos = 0,
+};
+
+static __init void probe_machine(void)
{
extern struct machdep_calls __machine_desc_start;
extern struct machdep_calls __machine_desc_end;
@@ -633,7 +639,13 @@ void probe_machine(void)
for (;;);
}
- printk(KERN_INFO "Using %s machine description\n", ppc_md.name);
+ // Append the machine name to other info we've gathered
+ seq_buf_puts(&ppc_hw_desc, ppc_md.name);
+
+ // Set the generic hardware description shown in oopses
+ dump_stack_set_arch_desc(ppc_hw_desc.buffer);
+
+ pr_info("Hardware name: %s\n", ppc_hw_desc.buffer);
}
/* Match a class of boards, not a specific device configuration. */
@@ -691,8 +703,25 @@ int check_legacy_ioport(unsigned long base_port)
}
EXPORT_SYMBOL(check_legacy_ioport);
-static int ppc_panic_event(struct notifier_block *this,
- unsigned long event, void *ptr)
+/*
+ * Panic notifiers setup
+ *
+ * We have 3 notifiers for powerpc, each one from a different "nature":
+ *
+ * - ppc_panic_fadump_handler() is a hypervisor notifier, which hard-disables
+ * IRQs and deal with the Firmware-Assisted dump, when it is configured;
+ * should run early in the panic path.
+ *
+ * - dump_kernel_offset() is an informative notifier, just showing the KASLR
+ * offset if we have RANDOMIZE_BASE set.
+ *
+ * - ppc_panic_platform_handler() is a low-level handler that's registered
+ * only if the platform wishes to perform final actions in the panic path,
+ * hence it should run late and might not even return. Currently, only
+ * pseries and ps3 platforms register callbacks.
+ */
+static int ppc_panic_fadump_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
{
/*
* panic does a local_irq_disable, but we really
@@ -702,45 +731,63 @@ static int ppc_panic_event(struct notifier_block *this,
/*
* If firmware-assisted dump has been registered then trigger
- * firmware-assisted dump and let firmware handle everything else.
+ * its callback and let the firmware handles everything else.
*/
crash_fadump(NULL, ptr);
- if (ppc_md.panic)
- ppc_md.panic(ptr); /* May not return */
+
return NOTIFY_DONE;
}
-static struct notifier_block ppc_panic_block = {
- .notifier_call = ppc_panic_event,
- .priority = INT_MIN /* may not return; must be done last */
-};
-
-/*
- * Dump out kernel offset information on panic.
- */
static int dump_kernel_offset(struct notifier_block *self, unsigned long v,
void *p)
{
pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
kaslr_offset(), KERNELBASE);
- return 0;
+ return NOTIFY_DONE;
}
+static int ppc_panic_platform_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ /*
+ * This handler is only registered if we have a panic callback
+ * on ppc_md, hence NULL check is not needed.
+ * Also, it may not return, so it runs really late on panic path.
+ */
+ ppc_md.panic(ptr);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_fadump_block = {
+ .notifier_call = ppc_panic_fadump_handler,
+ .priority = INT_MAX, /* run early, to notify the firmware ASAP */
+};
+
static struct notifier_block kernel_offset_notifier = {
- .notifier_call = dump_kernel_offset
+ .notifier_call = dump_kernel_offset,
+};
+
+static struct notifier_block ppc_panic_block = {
+ .notifier_call = ppc_panic_platform_handler,
+ .priority = INT_MIN, /* may not return; must be done last */
};
void __init setup_panic(void)
{
+ /* Hard-disables IRQs + deal with FW-assisted dump (fadump) */
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ppc_fadump_block);
+
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0)
atomic_notifier_chain_register(&panic_notifier_list,
&kernel_offset_notifier);
- /* PPC64 always does a hard irq disable in its panic handler */
- if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
- return;
- atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
+ /* Low-level platform-specific routines that should run on panic */
+ if (ppc_md.panic)
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &ppc_panic_block);
}
#ifdef CONFIG_CHECK_CACHE_COHERENCY
@@ -780,19 +827,6 @@ static int __init check_cache_coherency(void)
late_initcall(check_cache_coherency);
#endif /* CONFIG_CHECK_CACHE_COHERENCY */
-#ifdef CONFIG_DEBUG_FS
-struct dentry *powerpc_debugfs_root;
-EXPORT_SYMBOL(powerpc_debugfs_root);
-
-static int powerpc_debugfs_init(void)
-{
- powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL);
-
- return powerpc_debugfs_root == NULL;
-}
-arch_initcall(powerpc_debugfs_init);
-#endif
-
void ppc_printk_progress(char *s, unsigned short hex)
{
pr_info("%s\n", s);
@@ -806,8 +840,6 @@ static __init void print_system_info(void)
pr_info("dcache_bsize = 0x%x\n", dcache_bsize);
pr_info("icache_bsize = 0x%x\n", icache_bsize);
- if (ucache_bsize != 0)
- pr_info("ucache_bsize = 0x%x\n", ucache_bsize);
pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
pr_info(" possible = 0x%016lx\n",
@@ -837,7 +869,7 @@ static __init void print_system_info(void)
}
#ifdef CONFIG_SMP
-static void smp_setup_pacas(void)
+static void __init smp_setup_pacas(void)
{
int cpu;
@@ -848,7 +880,7 @@ static void smp_setup_pacas(void)
set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
}
- memblock_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32));
+ memblock_free(cpu_to_phys_id, nr_cpu_ids * sizeof(u32));
cpu_to_phys_id = NULL;
}
#endif
@@ -923,30 +955,32 @@ void __init setup_arch(char **cmdline_p)
/* On BookE, setup per-core TLB data structures. */
setup_tlb_core_data();
-
- smp_release_cpus();
#endif
/* Print various info about the machine that has been gathered so far. */
print_system_info();
- /* Reserve large chunks of memory for use by CMA for KVM. */
- kvm_cma_reserve();
-
klp_init_thread_info(&init_task);
- init_mm.start_code = (unsigned long)_stext;
- init_mm.end_code = (unsigned long) _etext;
- init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = klimit;
+ setup_initial_init_mm(_stext, _etext, _edata, _end);
mm_iommu_init(&init_mm);
irqstack_early_init();
exc_lvl_early_init();
emergency_stack_init();
+ mce_init();
+ smp_release_cpus();
+
initmem_init();
+ /*
+ * Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
+ * be called after initmem_init(), so that pageblock_order is initialised.
+ */
+ kvm_cma_reserve();
+ gigantic_hugetlb_cma_reserve();
+
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
if (ppc_md.setup_arch)
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
index 2dd0d9cb5a20..7912bb50a7cb 100644
--- a/arch/powerpc/kernel/setup.h
+++ b/arch/powerpc/kernel/setup.h
@@ -14,31 +14,31 @@ void irqstack_early_init(void);
#ifdef CONFIG_PPC32
void setup_power_save(void);
#else
-static inline void setup_power_save(void) { };
+static inline void setup_power_save(void) { }
#endif
#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
void check_smt_enabled(void);
#else
-static inline void check_smt_enabled(void) { };
+static inline void check_smt_enabled(void) { }
#endif
-#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
+#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP)
void setup_tlb_core_data(void);
#else
-static inline void setup_tlb_core_data(void) { };
+static inline void setup_tlb_core_data(void) { }
#endif
-#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
void exc_lvl_early_init(void);
#else
-static inline void exc_lvl_early_init(void) { };
+static inline void exc_lvl_early_init(void) { }
#endif
#if defined(CONFIG_PPC64) || defined(CONFIG_VMAP_STACK)
void emergency_stack_init(void);
#else
-static inline void emergency_stack_init(void) { };
+static inline void emergency_stack_init(void) { }
#endif
#ifdef CONFIG_PPC64
@@ -55,7 +55,7 @@ extern unsigned long spr_default_dscr;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
void kvm_cma_reserve(void);
#else
-static inline void kvm_cma_reserve(void) { };
+static inline void kvm_cma_reserve(void) { }
#endif
#ifdef CONFIG_TAU
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 5b49b26eb154..b761cc1a403c 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -19,11 +19,12 @@
#include <linux/memblock.h>
#include <linux/export.h>
#include <linux/nvram.h>
+#include <linux/pgtable.h>
+#include <linux/of_fdt.h>
+#include <linux/irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/elf.h>
@@ -58,7 +59,6 @@ EXPORT_SYMBOL_GPL(boot_cpuid_phys);
int smp_hw_index[NR_CPUS];
EXPORT_SYMBOL(smp_hw_index);
-unsigned long ISA_DMA_THRESHOLD;
unsigned int DMA_MODE_READ;
unsigned int DMA_MODE_WRITE;
@@ -75,20 +75,20 @@ EXPORT_SYMBOL(DMA_MODE_WRITE);
*/
notrace void __init machine_init(u64 dt_ptr)
{
- unsigned int *addr = (unsigned int *)patch_site_addr(&patch__memset_nocache);
- unsigned long insn;
+ u32 *addr = (u32 *)patch_site_addr(&patch__memset_nocache);
+ ppc_inst_t insn;
/* Configure static keys first, now that we're relocated. */
setup_feature_keys();
- early_ioremap_setup();
+ early_ioremap_init();
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
- patch_instruction_site(&patch__memcpy_nocache, PPC_INST_NOP);
+ patch_instruction_site(&patch__memcpy_nocache, ppc_inst(PPC_RAW_NOP()));
- insn = create_cond_branch(addr, branch_target(addr), 0x820000);
+ create_cond_branch(&insn, addr, branch_target(addr), 0x820000);
patch_instruction(addr, insn); /* replace b by bne cr0 */
/* Do some early initialization based on the flat device tree */
@@ -165,7 +165,7 @@ void __init irqstack_early_init(void)
}
#ifdef CONFIG_VMAP_STACK
-void *emergency_ctx[NR_CPUS] __ro_after_init;
+void *emergency_ctx[NR_CPUS] __ro_after_init = {[0] = &init_stack};
void __init emergency_stack_init(void)
{
@@ -176,7 +176,7 @@ void __init emergency_stack_init(void)
}
#endif
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
void __init exc_lvl_early_init(void)
{
unsigned int i, hw_cpu;
@@ -207,7 +207,7 @@ void __init setup_power_save(void)
ppc_md.power_save = ppc6xx_idle;
#endif
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
if (cpu_has_feature(CPU_FTR_CAN_DOZE) ||
cpu_has_feature(CPU_FTR_CAN_NAP))
ppc_md.power_save = e500_idle;
@@ -223,7 +223,4 @@ __init void initialize_cache_info(void)
*/
dcache_bsize = cur_cpu_spec->dcache_bsize;
icache_bsize = cur_cpu_spec->icache_bsize;
- ucache_bsize = 0;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_601) || IS_ENABLED(CONFIG_E200))
- ucache_bsize = icache_bsize = dcache_bsize;
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e05e6dd67ae6..a0dee7354fe6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -30,13 +30,15 @@
#include <linux/lockdep.h>
#include <linux/memory.h>
#include <linux/nmi.h>
+#include <linux/pgtable.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
-#include <asm/debugfs.h>
+#include <asm/asm-prototypes.h>
+#include <asm/kvm_guest.h>
#include <asm/io.h>
#include <asm/kdump.h>
-#include <asm/prom.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
#include <asm/machdep.h>
@@ -59,13 +61,14 @@
#include <asm/udbg.h>
#include <asm/kexec.h>
#include <asm/code-patching.h>
-#include <asm/livepatch.h>
+#include <asm/ftrace.h>
#include <asm/opal.h>
#include <asm/cputhreads.h>
#include <asm/hw_irq.h>
#include <asm/feature-fixups.h>
#include <asm/kup.h>
#include <asm/early_ioremap.h>
+#include <asm/pgalloc.h>
#include "setup.h"
@@ -84,7 +87,7 @@ struct ppc64_caches ppc64_caches = {
};
EXPORT_SYMBOL_GPL(ppc64_caches);
-#if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
+#if defined(CONFIG_PPC_BOOK3E_64) && defined(CONFIG_SMP)
void __init setup_tlb_core_data(void)
{
int cpu;
@@ -111,7 +114,6 @@ void __init setup_tlb_core_data(void)
* Should we panic instead?
*/
WARN_ONCE(smt_enabled_at_boot >= 2 &&
- !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
book3e_htw_mode != PPC_HTW_E6500,
"%s: unsupported MMU configuration\n", __func__);
}
@@ -175,14 +177,26 @@ early_param("smt-enabled", early_smt_enabled);
#endif /* CONFIG_SMP */
/** Fix up paca fields required for the boot cpu */
-static void __init fixup_boot_paca(void)
+static void __init fixup_boot_paca(struct paca_struct *boot_paca)
{
/* The boot cpu is started */
- get_paca()->cpu_start = 1;
+ boot_paca->cpu_start = 1;
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * Give the early boot machine check stack somewhere to use, use
+ * half of the init stack. This is a bit hacky but there should not be
+ * deep stack usage in early init so shouldn't overflow it or overwrite
+ * things.
+ */
+ boot_paca->mc_emergency_sp = (void *)&init_thread_union +
+ (THREAD_SIZE/2);
+#endif
/* Allow percpu accesses to work until we setup percpu data */
- get_paca()->data_offset = 0;
- /* Mark interrupts disabled in PACA */
- irq_soft_mask_set(IRQS_DISABLED);
+ boot_paca->data_offset = 0;
+ /* Mark interrupts soft and hard disabled in PACA */
+ boot_paca->irq_soft_mask = IRQS_DISABLED;
+ boot_paca->irq_happened = PACA_IRQ_HARD_DIS;
+ WARN_ON(mfmsr() & MSR_EE);
}
static void __init configure_exceptions(void)
@@ -195,8 +209,39 @@ static void __init configure_exceptions(void)
/* Under a PAPR hypervisor, we need hypercalls */
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
+ /*
+ * - PR KVM does not support AIL mode interrupts in the host
+ * while a PR guest is running.
+ *
+ * - SCV system call interrupt vectors are only implemented for
+ * AIL mode interrupts.
+ *
+ * - On pseries, AIL mode can only be enabled and disabled
+ * system-wide so when a PR VM is created on a pseries host,
+ * all CPUs of the host are set to AIL=0 mode.
+ *
+ * - Therefore host CPUs must not execute scv while a PR VM
+ * exists.
+ *
+ * - SCV support can not be disabled dynamically because the
+ * feature is advertised to host userspace. Disabling the
+ * facility and emulating it would be possible but is not
+ * implemented.
+ *
+ * - So SCV support is blanket disabled if PR KVM could possibly
+ * run. That is, PR support compiled in, booting on pseries
+ * with hash MMU.
+ */
+ if (IS_ENABLED(CONFIG_KVM_BOOK3S_PR_POSSIBLE) && !radix_enabled()) {
+ init_task.thread.fscr &= ~FSCR_SCV;
+ cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
+ }
+
/* Enable AIL if possible */
- pseries_enable_reloc_on_exc();
+ if (!pseries_enable_reloc_on_exc()) {
+ init_task.thread.fscr &= ~FSCR_SCV;
+ cur_cpu_spec->cpu_user_features2 &= ~PPC_FEATURE2_SCV;
+ }
/*
* Tell the hypervisor that we want our exceptions to
@@ -227,10 +272,23 @@ static void cpu_ready_for_interrupts(void)
* If we are not in hypervisor mode the job is done once for
* the whole partition in configure_exceptions().
*/
- if (cpu_has_feature(CPU_FTR_HVMODE) &&
- cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
unsigned long lpcr = mfspr(SPRN_LPCR);
- mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
+ unsigned long new_lpcr = lpcr;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ /* P10 DD1 does not have HAIL */
+ if (pvr_version_is(PVR_POWER10) &&
+ (mfspr(SPRN_PVR) & 0xf00) == 0x100)
+ new_lpcr |= LPCR_AIL_3;
+ else
+ new_lpcr |= LPCR_HAIL;
+ } else if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ new_lpcr |= LPCR_AIL_3;
+ }
+
+ if (new_lpcr != lpcr)
+ mtspr(SPRN_LPCR, new_lpcr);
}
/*
@@ -254,7 +312,7 @@ static void cpu_ready_for_interrupts(void)
unsigned long spr_default_dscr = 0;
-void __init record_spr_defaults(void)
+static void __init record_spr_defaults(void)
{
if (early_cpu_has_feature(CPU_FTR_DSCR))
spr_default_dscr = mfspr(SPRN_DSCR);
@@ -285,18 +343,40 @@ void __init early_setup(unsigned long dt_ptr)
/* -------- printk is _NOT_ safe to use here ! ------- */
+ /*
+ * Assume we're on cpu 0 for now.
+ *
+ * We need to load a PACA very early for a few reasons.
+ *
+ * The stack protector canary is stored in the paca, so as soon as we
+ * call any stack protected code we need r13 pointing somewhere valid.
+ *
+ * If we are using kcov it will call in_task() in its instrumentation,
+ * which relies on the current task from the PACA.
+ *
+ * dt_cpu_ftrs_init() calls into generic OF/fdt code, as well as
+ * printk(), which can trigger both stack protector and kcov.
+ *
+ * percpu variables and spin locks also use the paca.
+ *
+ * So set up a temporary paca. It will be replaced below once we know
+ * what CPU we are on.
+ */
+ initialise_paca(&boot_paca, 0);
+ fixup_boot_paca(&boot_paca);
+ WARN_ON(local_paca != 0);
+ setup_paca(&boot_paca); /* install the paca into registers */
+
+ /* -------- printk is now safe to use ------- */
+
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && (mfmsr() & MSR_HV))
+ enable_machine_check();
+
/* Try new device tree based feature discovery ... */
if (!dt_cpu_ftrs_init(__va(dt_ptr)))
/* Otherwise use the old style CPU table */
identify_cpu(0, mfspr(SPRN_PVR));
- /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
- initialise_paca(&boot_paca, 0);
- setup_paca(&boot_paca);
- fixup_boot_paca();
-
- /* -------- printk is now safe to use ------- */
-
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
@@ -314,8 +394,8 @@ void __init early_setup(unsigned long dt_ptr)
/* Poison paca_ptrs[0] again if it's not the boot cpu */
memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
}
- setup_paca(paca_ptrs[boot_cpuid]);
- fixup_boot_paca();
+ fixup_boot_paca(paca_ptrs[boot_cpuid]);
+ setup_paca(paca_ptrs[boot_cpuid]); /* install the paca into registers */
/*
* Configure exception handlers. This include setting up trampolines
@@ -333,11 +413,11 @@ void __init early_setup(unsigned long dt_ptr)
apply_feature_fixups();
setup_feature_keys();
- early_ioremap_setup();
-
/* Initialize the hash table or TLB handling */
early_init_mmu();
+ early_ioremap_setup();
+
/*
* After firmware and early platform setup code has set things up,
* we note the SPR values for configurable control/performance
@@ -463,7 +543,7 @@ void smp_release_cpus(void)
* routines and/or provided to userland
*/
-static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
+static void __init init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
u32 bsize, u32 sets)
{
info->size = size;
@@ -516,6 +596,8 @@ static bool __init parse_cache_info(struct device_node *np,
lsizep = of_get_property(np, propnames[3], NULL);
if (bsizep == NULL)
bsizep = lsizep;
+ if (lsizep == NULL)
+ lsizep = bsizep;
if (lsizep != NULL)
lsize = be32_to_cpu(*lsizep);
if (bsizep != NULL)
@@ -608,7 +690,7 @@ void __init initialize_cache_info(void)
*/
__init u64 ppc64_bolted_size(void)
{
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
/* Freescale BookE bolts the entire linear mapping */
/* XXX: BookE ppc64_rma_limit setup seems to disagree? */
if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
@@ -658,7 +740,7 @@ void __init irqstack_early_init(void)
}
}
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
void __init exc_lvl_early_init(void)
{
unsigned int i;
@@ -691,7 +773,7 @@ void __init exc_lvl_early_init(void)
*/
void __init emergency_stack_init(void)
{
- u64 limit;
+ u64 limit, mce_limit;
unsigned int i;
/*
@@ -708,7 +790,16 @@ void __init emergency_stack_init(void)
* initialized in kernel/irq.c. These are initialized here in order
* to have emergency stacks available as early as possible.
*/
- limit = min(ppc64_bolted_size(), ppc64_rma_size);
+ limit = mce_limit = min(ppc64_bolted_size(), ppc64_rma_size);
+
+ /*
+ * Machine check on pseries calls rtas, but can't use the static
+ * rtas_args due to a machine check hitting while the lock is held.
+ * rtas args have to be under 4GB, so the machine check stack is
+ * limited to 4GB so args can be put on stack.
+ */
+ if (firmware_has_feature(FW_FEATURE_LPAR) && mce_limit > SZ_4G)
+ mce_limit = SZ_4G;
for_each_possible_cpu(i) {
paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
@@ -718,27 +809,12 @@ void __init emergency_stack_init(void)
paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
/* emergency stack for machine check exception handling. */
- paca_ptrs[i]->mc_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
+ paca_ptrs[i]->mc_emergency_sp = alloc_stack(mce_limit, i) + THREAD_SIZE;
#endif
}
}
#ifdef CONFIG_SMP
-#define PCPU_DYN_SIZE ()
-
-static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
-{
- return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_ACCESSIBLE,
- early_cpu_to_node(cpu));
-
-}
-
-static void __init pcpu_fc_free(void *ptr, size_t size)
-{
- memblock_free(__pa(ptr), size);
-}
-
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
if (early_cpu_to_node(from) == early_cpu_to_node(to))
@@ -747,6 +823,11 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to)
return REMOTE_DISTANCE;
}
+static __init int pcpu_cpu_to_node(int cpu)
+{
+ return early_cpu_to_node(cpu);
+}
+
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(__per_cpu_offset);
@@ -756,20 +837,38 @@ void __init setup_per_cpu_areas(void)
size_t atom_size;
unsigned long delta;
unsigned int cpu;
- int rc;
+ int rc = -EINVAL;
/*
- * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
- * to group units. For larger mappings, use 1M atom which
- * should be large enough to contain a number of units.
+ * BookE and BookS radix are historical values and should be revisited.
*/
- if (mmu_linear_psize == MMU_PAGE_4K)
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64)) {
+ atom_size = SZ_1M;
+ } else if (radix_enabled()) {
atom_size = PAGE_SIZE;
- else
- atom_size = 1 << 20;
+ } else if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) {
+ /*
+ * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
+ * to group units. For larger mappings, use 1M atom which
+ * should be large enough to contain a number of units.
+ */
+ if (mmu_linear_psize == MMU_PAGE_4K)
+ atom_size = PAGE_SIZE;
+ else
+ atom_size = SZ_1M;
+ }
+
+ if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+ rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
+ pcpu_cpu_to_node);
+ if (rc)
+ pr_warn("PERCPU: %s allocator failed (%d), "
+ "falling back to page size\n",
+ pcpu_fc_names[pcpu_chosen_fc], rc);
+ }
- rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
- pcpu_fc_alloc, pcpu_fc_free);
+ if (rc < 0)
+ rc = pcpu_page_first_chunk(0, pcpu_cpu_to_node);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
@@ -781,7 +880,7 @@ void __init setup_per_cpu_areas(void)
}
#endif
-#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifdef CONFIG_MEMORY_HOTPLUG
unsigned long memory_block_size_bytes(void)
{
if (ppc_md.memory_block_size)
@@ -808,161 +907,22 @@ u64 hw_nmi_get_sample_period(int watchdog_thresh)
* disable it by default. Book3S has a soft-nmi hardlockup detector based
* on the decrementer interrupt, so it does not suffer from this problem.
*
- * It is likely to get false positives in VM guests, so disable it there
- * by default too.
+ * It is likely to get false positives in KVM guests, so disable it there
+ * by default too. PowerVM will not stop or arbitrarily oversubscribe
+ * CPUs, but give a minimum regular allotment even with SPLPAR, so enable
+ * the detector for non-KVM guests, assume PowerVM.
*/
static int __init disable_hardlockup_detector(void)
{
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
hardlockup_detector_disable();
#else
- if (firmware_has_feature(FW_FEATURE_LPAR))
- hardlockup_detector_disable();
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ if (is_kvm_guest())
+ hardlockup_detector_disable();
+ }
#endif
return 0;
}
early_initcall(disable_hardlockup_detector);
-
-#ifdef CONFIG_PPC_BOOK3S_64
-static enum l1d_flush_type enabled_flush_types;
-static void *l1d_flush_fallback_area;
-static bool no_rfi_flush;
-bool rfi_flush;
-
-static int __init handle_no_rfi_flush(char *p)
-{
- pr_info("rfi-flush: disabled on command line.");
- no_rfi_flush = true;
- return 0;
-}
-early_param("no_rfi_flush", handle_no_rfi_flush);
-
-/*
- * The RFI flush is not KPTI, but because users will see doco that says to use
- * nopti we hijack that option here to also disable the RFI flush.
- */
-static int __init handle_no_pti(char *p)
-{
- pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
- handle_no_rfi_flush(NULL);
- return 0;
-}
-early_param("nopti", handle_no_pti);
-
-static void do_nothing(void *unused)
-{
- /*
- * We don't need to do the flush explicitly, just enter+exit kernel is
- * sufficient, the RFI exit handlers will do the right thing.
- */
-}
-
-void rfi_flush_enable(bool enable)
-{
- if (enable) {
- do_rfi_flush_fixups(enabled_flush_types);
- on_each_cpu(do_nothing, NULL, 1);
- } else
- do_rfi_flush_fixups(L1D_FLUSH_NONE);
-
- rfi_flush = enable;
-}
-
-static void __ref init_fallback_flush(void)
-{
- u64 l1d_size, limit;
- int cpu;
-
- /* Only allocate the fallback flush area once (at boot time). */
- if (l1d_flush_fallback_area)
- return;
-
- l1d_size = ppc64_caches.l1d.size;
-
- /*
- * If there is no d-cache-size property in the device tree, l1d_size
- * could be zero. That leads to the loop in the asm wrapping around to
- * 2^64-1, and then walking off the end of the fallback area and
- * eventually causing a page fault which is fatal. Just default to
- * something vaguely sane.
- */
- if (!l1d_size)
- l1d_size = (64 * 1024);
-
- limit = min(ppc64_bolted_size(), ppc64_rma_size);
-
- /*
- * Align to L1d size, and size it at 2x L1d size, to catch possible
- * hardware prefetch runoff. We don't have a recipe for load patterns to
- * reliably avoid the prefetcher.
- */
- l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
- l1d_size, MEMBLOCK_LOW_LIMIT,
- limit, NUMA_NO_NODE);
- if (!l1d_flush_fallback_area)
- panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
- __func__, l1d_size * 2, l1d_size, &limit);
-
-
- for_each_possible_cpu(cpu) {
- struct paca_struct *paca = paca_ptrs[cpu];
- paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
- paca->l1d_flush_size = l1d_size;
- }
-}
-
-void setup_rfi_flush(enum l1d_flush_type types, bool enable)
-{
- if (types & L1D_FLUSH_FALLBACK) {
- pr_info("rfi-flush: fallback displacement flush available\n");
- init_fallback_flush();
- }
-
- if (types & L1D_FLUSH_ORI)
- pr_info("rfi-flush: ori type flush available\n");
-
- if (types & L1D_FLUSH_MTTRIG)
- pr_info("rfi-flush: mttrig type flush available\n");
-
- enabled_flush_types = types;
-
- if (!no_rfi_flush && !cpu_mitigations_off())
- rfi_flush_enable(enable);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int rfi_flush_set(void *data, u64 val)
-{
- bool enable;
-
- if (val == 1)
- enable = true;
- else if (val == 0)
- enable = false;
- else
- return -EINVAL;
-
- /* Only do anything if we're changing state */
- if (enable != rfi_flush)
- rfi_flush_enable(enable);
-
- return 0;
-}
-
-static int rfi_flush_get(void *data, u64 *val)
-{
- *val = rfi_flush ? 1 : 0;
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
-
-static __init int rfi_flush_debugfs_init(void)
-{
- debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
- return 0;
-}
-device_initcall(rfi_flush_debugfs_init);
-#endif
-#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index d215f9554553..68a91e553e14 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -9,7 +9,7 @@
* this archive for more details.
*/
-#include <linux/tracehook.h>
+#include <linux/resume_user_mode.h>
#include <linux/signal.h>
#include <linux/uprobes.h>
#include <linux/key.h>
@@ -18,35 +18,163 @@
#include <linux/syscalls.h>
#include <asm/hw_breakpoint.h>
#include <linux/uaccess.h>
+#include <asm/switch_to.h>
#include <asm/unistd.h>
#include <asm/debug.h>
#include <asm/tm.h>
#include "signal.h"
+#ifdef CONFIG_VSX
+unsigned long copy_fpr_to_user(void __user *to,
+ struct task_struct *task)
+{
+ u64 buf[ELF_NFPREG];
+ int i;
+
+ /* save FPR copy to local buffer then write to the thread_struct */
+ for (i = 0; i < (ELF_NFPREG - 1) ; i++)
+ buf[i] = task->thread.TS_FPR(i);
+ buf[i] = task->thread.fp_state.fpscr;
+ return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
+}
+
+unsigned long copy_fpr_from_user(struct task_struct *task,
+ void __user *from)
+{
+ u64 buf[ELF_NFPREG];
+ int i;
+
+ if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
+ return 1;
+ for (i = 0; i < (ELF_NFPREG - 1) ; i++)
+ task->thread.TS_FPR(i) = buf[i];
+ task->thread.fp_state.fpscr = buf[i];
+
+ return 0;
+}
+
+unsigned long copy_vsx_to_user(void __user *to,
+ struct task_struct *task)
+{
+ u64 buf[ELF_NVSRHALFREG];
+ int i;
+
+ /* save FPR copy to local buffer then write to the thread_struct */
+ for (i = 0; i < ELF_NVSRHALFREG; i++)
+ buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+ return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
+}
+
+unsigned long copy_vsx_from_user(struct task_struct *task,
+ void __user *from)
+{
+ u64 buf[ELF_NVSRHALFREG];
+ int i;
+
+ if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
+ return 1;
+ for (i = 0; i < ELF_NVSRHALFREG ; i++)
+ task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ return 0;
+}
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+unsigned long copy_ckfpr_to_user(void __user *to,
+ struct task_struct *task)
+{
+ u64 buf[ELF_NFPREG];
+ int i;
+
+ /* save FPR copy to local buffer then write to the thread_struct */
+ for (i = 0; i < (ELF_NFPREG - 1) ; i++)
+ buf[i] = task->thread.TS_CKFPR(i);
+ buf[i] = task->thread.ckfp_state.fpscr;
+ return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
+}
+
+unsigned long copy_ckfpr_from_user(struct task_struct *task,
+ void __user *from)
+{
+ u64 buf[ELF_NFPREG];
+ int i;
+
+ if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
+ return 1;
+ for (i = 0; i < (ELF_NFPREG - 1) ; i++)
+ task->thread.TS_CKFPR(i) = buf[i];
+ task->thread.ckfp_state.fpscr = buf[i];
+
+ return 0;
+}
+
+unsigned long copy_ckvsx_to_user(void __user *to,
+ struct task_struct *task)
+{
+ u64 buf[ELF_NVSRHALFREG];
+ int i;
+
+ /* save FPR copy to local buffer then write to the thread_struct */
+ for (i = 0; i < ELF_NVSRHALFREG; i++)
+ buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+ return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
+}
+
+unsigned long copy_ckvsx_from_user(struct task_struct *task,
+ void __user *from)
+{
+ u64 buf[ELF_NVSRHALFREG];
+ int i;
+
+ if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
+ return 1;
+ for (i = 0; i < ELF_NVSRHALFREG ; i++)
+ task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
+ return 0;
+}
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+#endif
+
/* Log an error when sending an unhandled signal to a process. Controlled
* through debug.exception-trace sysctl.
*/
int show_unhandled_signals = 1;
+unsigned long get_min_sigframe_size(void)
+{
+ if (IS_ENABLED(CONFIG_PPC64))
+ return get_min_sigframe_size_64();
+ else
+ return get_min_sigframe_size_32();
+}
+
+#ifdef CONFIG_COMPAT
+unsigned long get_min_sigframe_size_compat(void)
+{
+ return get_min_sigframe_size_32();
+}
+#endif
+
/*
* Allocate space for the signal frame
*/
-void __user *get_sigframe(struct ksignal *ksig, unsigned long sp,
- size_t frame_size, int is_32)
+static unsigned long get_tm_stackpointer(struct task_struct *tsk);
+
+void __user *get_sigframe(struct ksignal *ksig, struct task_struct *tsk,
+ size_t frame_size, int is_32)
{
unsigned long oldsp, newsp;
+ unsigned long sp = get_tm_stackpointer(tsk);
/* Default to using normal stack */
- oldsp = get_clean_sp(sp, is_32);
+ if (is_32)
+ oldsp = sp & 0x0ffffffffUL;
+ else
+ oldsp = sp;
oldsp = sigsp(oldsp, ksig);
newsp = (oldsp - frame_size) & ~0xFUL;
- /* Check access */
- if (!access_ok((void __user *)newsp, oldsp - newsp))
- return NULL;
-
return (void __user *)newsp;
}
@@ -57,12 +185,21 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
int restart = 1;
/* syscall ? */
- if (TRAP(regs) != 0x0C00)
+ if (!trap_is_syscall(regs))
+ return;
+
+ if (trap_norestart(regs))
return;
/* error signalled ? */
- if (!(regs->ccr & 0x10000000))
+ if (trap_is_scv(regs)) {
+ /* 32-bit compat mode sign extend? */
+ if (!IS_ERR_VALUE(ret))
+ return;
+ ret = -ret;
+ } else if (!(regs->ccr & 0x10000000)) {
return;
+ }
switch (ret) {
case ERESTART_RESTARTBLOCK:
@@ -92,12 +229,17 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
regs->gpr[0] = __NR_restart_syscall;
else
regs->gpr[3] = regs->orig_gpr3;
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
regs->result = 0;
} else {
- regs->result = -EINTR;
- regs->gpr[3] = EINTR;
- regs->ccr |= 0x10000000;
+ if (trap_is_scv(regs)) {
+ regs->result = -EINTR;
+ regs->gpr[3] = -EINTR;
+ } else {
+ regs->result = -EINTR;
+ regs->gpr[3] = EINTR;
+ regs->ccr |= 0x10000000;
+ }
}
}
@@ -106,7 +248,6 @@ static void do_signal(struct task_struct *tsk)
sigset_t *oldset = sigmask_to_save();
struct ksignal ksig = { .sig = 0 };
int ret;
- int is32 = is_32bit_task();
BUG_ON(tsk != current);
@@ -118,25 +259,30 @@ static void do_signal(struct task_struct *tsk)
if (ksig.sig <= 0) {
/* No signal to deliver -- put the saved sigmask back */
restore_saved_sigmask();
- tsk->thread.regs->trap = 0;
+ set_trap_norestart(tsk->thread.regs);
return; /* no signals delivered */
}
-#ifndef CONFIG_PPC_ADV_DEBUG_REGS
/*
* Reenable the DABR before delivering the signal to
* user space. The DABR will have been cleared if it
* triggered inside the kernel.
*/
- if (tsk->thread.hw_brk.address && tsk->thread.hw_brk.type)
- __set_breakpoint(&tsk->thread.hw_brk);
-#endif
+ if (!IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (tsk->thread.hw_brk[i].address && tsk->thread.hw_brk[i].type)
+ __set_breakpoint(i, &tsk->thread.hw_brk[i]);
+ }
+ }
+
/* Re-enable the breakpoints for the signal stack */
thread_change_pc(tsk, tsk->thread.regs);
rseq_signal_deliver(&ksig, tsk->thread.regs);
- if (is32) {
+ if (is_32bit_task()) {
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
ret = handle_rt_signal32(&ksig, oldset, tsk);
else
@@ -145,38 +291,28 @@ static void do_signal(struct task_struct *tsk)
ret = handle_rt_signal64(&ksig, oldset, tsk);
}
- tsk->thread.regs->trap = 0;
+ set_trap_norestart(tsk->thread.regs);
signal_setup_done(ret, &ksig, test_thread_flag(TIF_SINGLESTEP));
}
void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
- user_exit();
-
- /* Check valid addr_limit, TIF check is done there */
- addr_limit_user_check();
-
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
if (thread_info_flags & _TIF_PATCH_PENDING)
klp_update_patch_state(current);
- if (thread_info_flags & _TIF_SIGPENDING) {
+ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
BUG_ON(regs != current->thread.regs);
do_signal(current);
}
- if (thread_info_flags & _TIF_NOTIFY_RESUME) {
- clear_thread_flag(TIF_NOTIFY_RESUME);
- tracehook_notify_resume(regs);
- rseq_handle_notify_resume(NULL, regs);
- }
-
- user_enter();
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(regs);
}
-unsigned long get_tm_stackpointer(struct task_struct *tsk)
+static unsigned long get_tm_stackpointer(struct task_struct *tsk)
{
/* When in an active transaction that takes a signal, we need to be
* careful with the stack. It's possible that the stack has moved back
@@ -199,16 +335,16 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
* For signals taken in non-TM or suspended mode, we use the
* normal/non-checkpointed stack pointer.
*/
-
- unsigned long ret = tsk->thread.regs->gpr[1];
+ struct pt_regs *regs = tsk->thread.regs;
+ unsigned long ret = regs->gpr[1];
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BUG_ON(tsk != current);
- if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
+ if (MSR_TM_ACTIVE(regs->msr)) {
preempt_disable();
tm_reclaim_current(TM_CAUSE_SIGNAL);
- if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
+ if (MSR_TM_TRANSACTIONAL(regs->msr))
ret = tsk->thread.ckpt_regs.gpr[1];
/*
@@ -218,9 +354,20 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
* (tm_recheckpoint_new_task() would recheckpoint). Besides, we
* enter the signal handler in non-transactional state.
*/
- tsk->thread.regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
preempt_enable();
}
#endif
return ret;
}
+
+static const char fm32[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %08lx lr %08lx\n";
+static const char fm64[] = KERN_INFO "%s[%d]: bad frame in %s: %p nip %016lx lr %016lx\n";
+
+void signal_fault(struct task_struct *tsk, struct pt_regs *regs,
+ const char *where, void __user *ptr)
+{
+ if (show_unhandled_signals)
+ printk_ratelimited(regs->msr & MSR_64BIT ? fm64 : fm32, tsk->comm,
+ task_pid_nr(tsk), where, ptr, regs->nip, regs->link);
+}
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 800433685888..a429c57ed433 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -10,10 +10,8 @@
#ifndef _POWERPC_ARCH_SIGNAL_H
#define _POWERPC_ARCH_SIGNAL_H
-extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
-
-extern void __user *get_sigframe(struct ksignal *ksig, unsigned long sp,
- size_t frame_size, int is_32);
+void __user *get_sigframe(struct ksignal *ksig, struct task_struct *tsk,
+ size_t frame_size, int is_32);
extern int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
struct task_struct *tsk);
@@ -21,15 +19,20 @@ extern int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
extern int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
struct task_struct *tsk);
-extern unsigned long copy_fpr_to_user(void __user *to,
- struct task_struct *task);
-extern unsigned long copy_ckfpr_to_user(void __user *to,
- struct task_struct *task);
-extern unsigned long copy_fpr_from_user(struct task_struct *task,
- void __user *from);
-extern unsigned long copy_ckfpr_from_user(struct task_struct *task,
- void __user *from);
-extern unsigned long get_tm_stackpointer(struct task_struct *tsk);
+static inline int __get_user_sigset(sigset_t *dst, const sigset_t __user *src)
+{
+ BUILD_BUG_ON(sizeof(sigset_t) != sizeof(u64));
+
+ return __get_user(dst->sig[0], (u64 __user *)&src->sig[0]);
+}
+#define unsafe_get_user_sigset(dst, src, label) do { \
+ sigset_t *__dst = dst; \
+ const sigset_t __user *__src = src; \
+ int i; \
+ \
+ for (i = 0; i < _NSIG_WORDS; i++) \
+ unsafe_get_user(__dst->sig[i], &__src->sig[i], label); \
+} while (0)
#ifdef CONFIG_VSX
extern unsigned long copy_vsx_to_user(void __user *to,
@@ -40,6 +43,150 @@ extern unsigned long copy_vsx_from_user(struct task_struct *task,
void __user *from);
extern unsigned long copy_ckvsx_from_user(struct task_struct *task,
void __user *from);
+unsigned long copy_fpr_to_user(void __user *to, struct task_struct *task);
+unsigned long copy_ckfpr_to_user(void __user *to, struct task_struct *task);
+unsigned long copy_fpr_from_user(struct task_struct *task, void __user *from);
+unsigned long copy_ckfpr_from_user(struct task_struct *task, void __user *from);
+
+#define unsafe_copy_fpr_to_user(to, task, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)to; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NFPREG - 1 ; i++) \
+ unsafe_put_user(__t->thread.TS_FPR(i), &buf[i], label); \
+ unsafe_put_user(__t->thread.fp_state.fpscr, &buf[i], label); \
+} while (0)
+
+#define unsafe_copy_vsx_to_user(to, task, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)to; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NVSRHALFREG ; i++) \
+ unsafe_put_user(__t->thread.fp_state.fpr[i][TS_VSRLOWOFFSET], \
+ &buf[i], label);\
+} while (0)
+
+#define unsafe_copy_fpr_from_user(task, from, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)from; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NFPREG - 1; i++) \
+ unsafe_get_user(__t->thread.TS_FPR(i), &buf[i], label); \
+ unsafe_get_user(__t->thread.fp_state.fpscr, &buf[i], label); \
+} while (0)
+
+#define unsafe_copy_vsx_from_user(task, from, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)from; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NVSRHALFREG ; i++) \
+ unsafe_get_user(__t->thread.fp_state.fpr[i][TS_VSRLOWOFFSET], \
+ &buf[i], label); \
+} while (0)
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define unsafe_copy_ckfpr_to_user(to, task, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)to; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NFPREG - 1 ; i++) \
+ unsafe_put_user(__t->thread.TS_CKFPR(i), &buf[i], label);\
+ unsafe_put_user(__t->thread.ckfp_state.fpscr, &buf[i], label); \
+} while (0)
+
+#define unsafe_copy_ckvsx_to_user(to, task, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)to; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NVSRHALFREG ; i++) \
+ unsafe_put_user(__t->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET], \
+ &buf[i], label);\
+} while (0)
+
+#define unsafe_copy_ckfpr_from_user(task, from, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)from; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NFPREG - 1 ; i++) \
+ unsafe_get_user(__t->thread.TS_CKFPR(i), &buf[i], label);\
+ unsafe_get_user(__t->thread.ckfp_state.fpscr, &buf[i], failed); \
+} while (0)
+
+#define unsafe_copy_ckvsx_from_user(task, from, label) do { \
+ struct task_struct *__t = task; \
+ u64 __user *buf = (u64 __user *)from; \
+ int i; \
+ \
+ for (i = 0; i < ELF_NVSRHALFREG ; i++) \
+ unsafe_get_user(__t->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET], \
+ &buf[i], label); \
+} while (0)
+#endif
+#elif defined(CONFIG_PPC_FPU_REGS)
+
+#define unsafe_copy_fpr_to_user(to, task, label) \
+ unsafe_copy_to_user(to, (task)->thread.fp_state.fpr, \
+ ELF_NFPREG * sizeof(double), label)
+
+#define unsafe_copy_fpr_from_user(task, from, label) \
+ unsafe_copy_from_user((task)->thread.fp_state.fpr, from, \
+ ELF_NFPREG * sizeof(double), label)
+
+static inline unsigned long
+copy_fpr_to_user(void __user *to, struct task_struct *task)
+{
+ return __copy_to_user(to, task->thread.fp_state.fpr,
+ ELF_NFPREG * sizeof(double));
+}
+
+static inline unsigned long
+copy_fpr_from_user(struct task_struct *task, void __user *from)
+{
+ return __copy_from_user(task->thread.fp_state.fpr, from,
+ ELF_NFPREG * sizeof(double));
+}
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define unsafe_copy_ckfpr_to_user(to, task, label) \
+ unsafe_copy_to_user(to, (task)->thread.ckfp_state.fpr, \
+ ELF_NFPREG * sizeof(double), label)
+
+inline unsigned long copy_ckfpr_to_user(void __user *to, struct task_struct *task)
+{
+ return __copy_to_user(to, task->thread.ckfp_state.fpr,
+ ELF_NFPREG * sizeof(double));
+}
+
+static inline unsigned long
+copy_ckfpr_from_user(struct task_struct *task, void __user *from)
+{
+ return __copy_from_user(task->thread.ckfp_state.fpr, from,
+ ELF_NFPREG * sizeof(double));
+}
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+#else
+#define unsafe_copy_fpr_to_user(to, task, label) do { if (0) goto label;} while (0)
+
+#define unsafe_copy_fpr_from_user(task, from, label) do { if (0) goto label;} while (0)
+
+static inline unsigned long
+copy_fpr_to_user(void __user *to, struct task_struct *task)
+{
+ return 0;
+}
+
+static inline unsigned long
+copy_fpr_from_user(struct task_struct *task, void __user *from)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_PPC64
@@ -49,9 +196,6 @@ extern int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
#else /* CONFIG_PPC64 */
-extern long sys_rt_sigreturn(void);
-extern long sys_sigreturn(void);
-
static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
struct task_struct *tsk)
{
@@ -60,4 +204,7 @@ static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
#endif /* !defined(CONFIG_PPC64) */
+void signal_fault(struct task_struct *tsk, struct pt_regs *regs,
+ const char *where, void __user *ptr);
+
#endif /* _POWERPC_ARCH_SIGNAL_H */
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 1b090a76b444..c114c7f25645 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -43,11 +43,10 @@
#include <asm/tm.h>
#include <asm/asm-prototypes.h>
#ifdef CONFIG_PPC64
-#include "ppc32.h"
+#include <asm/syscalls_32.h>
#include <asm/unistd.h>
#else
#include <asm/ucontext.h>
-#include <asm/pgtable.h>
#endif
#include "signal.h"
@@ -59,8 +58,6 @@
#define mcontext mcontext32
#define ucontext ucontext32
-#define __save_altstack __compat_save_altstack
-
/*
* Userspace code may pass a ucontext which doesn't include VSX added
* at the end. We need to check for this case.
@@ -85,47 +82,35 @@
* Functions for flipping sigsets (thanks to brain dead generic
* implementation that makes things simple for little endian only)
*/
-static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
-{
- return put_compat_sigset(uset, set, sizeof(*uset));
-}
-
-static inline int get_sigset_t(sigset_t *set,
- const compat_sigset_t __user *uset)
-{
- return get_compat_sigset(set, uset);
-}
+#define unsafe_put_sigset_t unsafe_put_compat_sigset
+#define unsafe_get_sigset_t unsafe_get_compat_sigset
#define to_user_ptr(p) ptr_to_compat(p)
#define from_user_ptr(p) compat_ptr(p)
-static inline int save_general_regs(struct pt_regs *regs,
- struct mcontext __user *frame)
+static __always_inline int
+__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
{
elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
- int i;
- /* Force usr to alway see softe as 1 (interrupts enabled) */
- elf_greg_t64 softe = 0x1;
-
- WARN_ON(!FULL_REGS(regs));
+ int val, i;
for (i = 0; i <= PT_RESULT; i ++) {
- if (i == 14 && !FULL_REGS(regs))
- i = 32;
- if ( i == PT_SOFTE) {
- if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
- return -EFAULT;
- else
- continue;
- }
- if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
- return -EFAULT;
+ /* Force usr to alway see softe as 1 (interrupts enabled) */
+ if (i == PT_SOFTE)
+ val = 1;
+ else
+ val = gregs[i];
+
+ unsafe_put_user(val, &frame->mc_gregs[i], failed);
}
return 0;
+
+failed:
+ return 1;
}
-static inline int restore_general_regs(struct pt_regs *regs,
- struct mcontext __user *sr)
+static __always_inline int
+__unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
{
elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
int i;
@@ -133,51 +118,67 @@ static inline int restore_general_regs(struct pt_regs *regs,
for (i = 0; i <= PT_RESULT; i++) {
if ((i == PT_MSR) || (i == PT_SOFTE))
continue;
- if (__get_user(gregs[i], &sr->mc_gregs[i]))
- return -EFAULT;
+ unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
}
return 0;
+
+failed:
+ return 1;
}
#else /* CONFIG_PPC64 */
#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
-static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
-{
- return copy_to_user(uset, set, sizeof(*uset));
-}
+#define unsafe_put_sigset_t(uset, set, label) do { \
+ sigset_t __user *__us = uset ; \
+ const sigset_t *__s = set; \
+ \
+ unsafe_copy_to_user(__us, __s, sizeof(*__us), label); \
+} while (0)
-static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
-{
- return copy_from_user(set, uset, sizeof(*uset));
-}
+#define unsafe_get_sigset_t unsafe_get_user_sigset
#define to_user_ptr(p) ((unsigned long)(p))
#define from_user_ptr(p) ((void __user *)(p))
-static inline int save_general_regs(struct pt_regs *regs,
- struct mcontext __user *frame)
+static __always_inline int
+__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
{
- WARN_ON(!FULL_REGS(regs));
- return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
+ unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
+ return 0;
+
+failed:
+ return 1;
}
-static inline int restore_general_regs(struct pt_regs *regs,
- struct mcontext __user *sr)
+static __always_inline
+int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
{
/* copy up to but not including MSR */
- if (__copy_from_user(regs, &sr->mc_gregs,
- PT_MSR * sizeof(elf_greg_t)))
- return -EFAULT;
+ unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
+
/* copy from orig_r3 (the word after the MSR) up to the end */
- if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
- GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
- return -EFAULT;
+ unsafe_copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
+ GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
+
return 0;
+
+failed:
+ return 1;
}
#endif
+#define unsafe_save_general_regs(regs, frame, label) do { \
+ if (__unsafe_save_general_regs(regs, frame)) \
+ goto label; \
+} while (0)
+
+#define unsafe_restore_general_regs(regs, frame, label) do { \
+ if (__unsafe_restore_general_regs(regs, frame)) \
+ goto label; \
+} while (0)
+
/*
* When we have signals to deliver, we set up on the
* user stack, going down from the original stack pointer:
@@ -204,9 +205,6 @@ struct sigframe {
int abigap[56];
};
-/* We use the mc_pad field for the signal return trampoline. */
-#define tramp mc_pad
-
/*
* When we have rt signals to deliver, we set up on the
* user stack, going down from the original stack pointer:
@@ -235,171 +233,50 @@ struct rt_sigframe {
int abigap[56];
};
-#ifdef CONFIG_VSX
-unsigned long copy_fpr_to_user(void __user *to,
- struct task_struct *task)
+unsigned long get_min_sigframe_size_32(void)
{
- u64 buf[ELF_NFPREG];
- int i;
-
- /* save FPR copy to local buffer then write to the thread_struct */
- for (i = 0; i < (ELF_NFPREG - 1) ; i++)
- buf[i] = task->thread.TS_FPR(i);
- buf[i] = task->thread.fp_state.fpscr;
- return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
+ return max(sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE + 16,
+ sizeof(struct sigframe) + __SIGNAL_FRAMESIZE);
}
-unsigned long copy_fpr_from_user(struct task_struct *task,
- void __user *from)
-{
- u64 buf[ELF_NFPREG];
- int i;
-
- if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
- return 1;
- for (i = 0; i < (ELF_NFPREG - 1) ; i++)
- task->thread.TS_FPR(i) = buf[i];
- task->thread.fp_state.fpscr = buf[i];
-
- return 0;
-}
-
-unsigned long copy_vsx_to_user(void __user *to,
- struct task_struct *task)
-{
- u64 buf[ELF_NVSRHALFREG];
- int i;
-
- /* save FPR copy to local buffer then write to the thread_struct */
- for (i = 0; i < ELF_NVSRHALFREG; i++)
- buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
- return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
-}
-
-unsigned long copy_vsx_from_user(struct task_struct *task,
- void __user *from)
-{
- u64 buf[ELF_NVSRHALFREG];
- int i;
-
- if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
- return 1;
- for (i = 0; i < ELF_NVSRHALFREG ; i++)
- task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
- return 0;
-}
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-unsigned long copy_ckfpr_to_user(void __user *to,
- struct task_struct *task)
-{
- u64 buf[ELF_NFPREG];
- int i;
-
- /* save FPR copy to local buffer then write to the thread_struct */
- for (i = 0; i < (ELF_NFPREG - 1) ; i++)
- buf[i] = task->thread.TS_CKFPR(i);
- buf[i] = task->thread.ckfp_state.fpscr;
- return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
-}
-
-unsigned long copy_ckfpr_from_user(struct task_struct *task,
- void __user *from)
-{
- u64 buf[ELF_NFPREG];
- int i;
-
- if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
- return 1;
- for (i = 0; i < (ELF_NFPREG - 1) ; i++)
- task->thread.TS_CKFPR(i) = buf[i];
- task->thread.ckfp_state.fpscr = buf[i];
-
- return 0;
-}
-
-unsigned long copy_ckvsx_to_user(void __user *to,
- struct task_struct *task)
-{
- u64 buf[ELF_NVSRHALFREG];
- int i;
-
- /* save FPR copy to local buffer then write to the thread_struct */
- for (i = 0; i < ELF_NVSRHALFREG; i++)
- buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
- return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
-}
-
-unsigned long copy_ckvsx_from_user(struct task_struct *task,
- void __user *from)
-{
- u64 buf[ELF_NVSRHALFREG];
- int i;
-
- if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
- return 1;
- for (i = 0; i < ELF_NVSRHALFREG ; i++)
- task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
- return 0;
-}
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-#else
-inline unsigned long copy_fpr_to_user(void __user *to,
- struct task_struct *task)
-{
- return __copy_to_user(to, task->thread.fp_state.fpr,
- ELF_NFPREG * sizeof(double));
-}
-
-inline unsigned long copy_fpr_from_user(struct task_struct *task,
- void __user *from)
-{
- return __copy_from_user(task->thread.fp_state.fpr, from,
- ELF_NFPREG * sizeof(double));
-}
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-inline unsigned long copy_ckfpr_to_user(void __user *to,
- struct task_struct *task)
-{
- return __copy_to_user(to, task->thread.ckfp_state.fpr,
- ELF_NFPREG * sizeof(double));
-}
-
-inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
- void __user *from)
-{
- return __copy_from_user(task->thread.ckfp_state.fpr, from,
- ELF_NFPREG * sizeof(double));
-}
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-#endif
-
/*
* Save the current user registers on the user stack.
* We only save the altivec/spe registers if the process has used
* altivec/spe instructions at some point.
*/
-static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
- struct mcontext __user *tm_frame, int sigret,
- int ctx_has_vsx_region)
+static void prepare_save_user_regs(int ctx_has_vsx_region)
{
- unsigned long msr = regs->msr;
-
/* Make sure floating point registers are stored in regs */
flush_fp_to_thread(current);
+#ifdef CONFIG_ALTIVEC
+ if (current->thread.used_vr)
+ flush_altivec_to_thread(current);
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ current->thread.vrsave = mfspr(SPRN_VRSAVE);
+#endif
+#ifdef CONFIG_VSX
+ if (current->thread.used_vsr && ctx_has_vsx_region)
+ flush_vsx_to_thread(current);
+#endif
+#ifdef CONFIG_SPE
+ if (current->thread.used_spe)
+ flush_spe_to_thread(current);
+#endif
+}
+
+static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
+ struct mcontext __user *tm_frame, int ctx_has_vsx_region)
+{
+ unsigned long msr = regs->msr;
/* save general registers */
- if (save_general_regs(regs, frame))
- return 1;
+ unsafe_save_general_regs(regs, frame, failed);
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
- flush_altivec_to_thread(current);
- if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
- ELF_NVRREG * sizeof(vector128)))
- return 1;
+ unsafe_copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
+ ELF_NVRREG * sizeof(vector128), failed);
/* set MSR_VEC in the saved MSR value to indicate that
frame->mc_vregs contains valid data */
msr |= MSR_VEC;
@@ -412,13 +289,10 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
* most significant bits of that same vector. --BenH
* Note that the current VRSAVE value is in the SPR at this point.
*/
- if (cpu_has_feature(CPU_FTR_ALTIVEC))
- current->thread.vrsave = mfspr(SPRN_VRSAVE);
- if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
- return 1;
+ unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
+ failed);
#endif /* CONFIG_ALTIVEC */
- if (copy_fpr_to_user(&frame->mc_fregs, current))
- return 1;
+ unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
/*
* Clear the MSR VSX bit to indicate there is no valid state attached
@@ -433,19 +307,15 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
* contains valid data
*/
if (current->thread.used_vsr && ctx_has_vsx_region) {
- flush_vsx_to_thread(current);
- if (copy_vsx_to_user(&frame->mc_vsregs, current))
- return 1;
+ unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
msr |= MSR_VSX;
}
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
/* save spe registers */
if (current->thread.used_spe) {
- flush_spe_to_thread(current);
- if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
- ELF_NEVRREG * sizeof(u32)))
- return 1;
+ unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
+ ELF_NEVRREG * sizeof(u32), failed);
/* set MSR_SPE in the saved MSR value to indicate that
frame->mc_vregs contains valid data */
msr |= MSR_SPE;
@@ -453,30 +323,29 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
/* else assert((regs->msr & MSR_SPE) == 0) */
/* We always copy to/from spefscr */
- if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
- return 1;
+ unsafe_put_user(current->thread.spefscr,
+ (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
#endif /* CONFIG_SPE */
- if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
- return 1;
+ unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
+
/* We need to write 0 the MSR top 32 bits in the tm frame so that we
* can check it on the restore to see if TM is active
*/
- if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
- return 1;
-
- if (sigret) {
- /* Set up the sigreturn trampoline: li 0,sigret; sc */
- if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
- || __put_user(PPC_INST_SC, &frame->tramp[1]))
- return 1;
- flush_icache_range((unsigned long) &frame->tramp[0],
- (unsigned long) &frame->tramp[2]);
- }
+ if (tm_frame)
+ unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
return 0;
+
+failed:
+ return 1;
}
+#define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
+ if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx)) \
+ goto label; \
+} while (0)
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Save the current user registers on the user stack.
@@ -485,19 +354,22 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
* We also save the transactional registers to a second ucontext in the
* frame.
*
- * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
+ * See __unsafe_save_user_regs() and signal_64.c:setup_tm_sigcontexts().
*/
-static int save_tm_user_regs(struct pt_regs *regs,
- struct mcontext __user *frame,
- struct mcontext __user *tm_frame, int sigret,
- unsigned long msr)
+static void prepare_save_tm_user_regs(void)
{
WARN_ON(tm_suspend_disabled);
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
+}
+
+static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
+ struct mcontext __user *tm_frame, unsigned long msr)
+{
/* Save both sets of general registers */
- if (save_general_regs(&current->thread.ckpt_regs, frame)
- || save_general_regs(regs, tm_frame))
- return 1;
+ unsafe_save_general_regs(&current->thread.ckpt_regs, frame, failed);
+ unsafe_save_general_regs(regs, tm_frame, failed);
/* Stash the top half of the 64bit MSR into the 32bit MSR word
* of the transactional mcontext. This way we have a backward-compatible
@@ -505,26 +377,20 @@ static int save_tm_user_regs(struct pt_regs *regs,
* also look at what type of transaction (T or S) was active at the
* time of the signal.
*/
- if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
- return 1;
+ unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
-#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
- if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
- ELF_NVRREG * sizeof(vector128)))
- return 1;
- if (msr & MSR_VEC) {
- if (__copy_to_user(&tm_frame->mc_vregs,
- &current->thread.vr_state,
- ELF_NVRREG * sizeof(vector128)))
- return 1;
- } else {
- if (__copy_to_user(&tm_frame->mc_vregs,
- &current->thread.ckvr_state,
- ELF_NVRREG * sizeof(vector128)))
- return 1;
- }
+ unsafe_copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
+ ELF_NVRREG * sizeof(vector128), failed);
+ if (msr & MSR_VEC)
+ unsafe_copy_to_user(&tm_frame->mc_vregs,
+ &current->thread.vr_state,
+ ELF_NVRREG * sizeof(vector128), failed);
+ else
+ unsafe_copy_to_user(&tm_frame->mc_vregs,
+ &current->thread.ckvr_state,
+ ELF_NVRREG * sizeof(vector128), failed);
/* set MSR_VEC in the saved MSR value to indicate that
* frame->mc_vregs contains valid data
@@ -537,33 +403,21 @@ static int save_tm_user_regs(struct pt_regs *regs,
* significant bits of a vector, we "cheat" and stuff VRSAVE in the
* most significant bits of that same vector. --BenH
*/
- if (cpu_has_feature(CPU_FTR_ALTIVEC))
- current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
- if (__put_user(current->thread.ckvrsave,
- (u32 __user *)&frame->mc_vregs[32]))
- return 1;
- if (msr & MSR_VEC) {
- if (__put_user(current->thread.vrsave,
- (u32 __user *)&tm_frame->mc_vregs[32]))
- return 1;
- } else {
- if (__put_user(current->thread.ckvrsave,
- (u32 __user *)&tm_frame->mc_vregs[32]))
- return 1;
- }
-#endif /* CONFIG_ALTIVEC */
+ unsafe_put_user(current->thread.ckvrsave,
+ (u32 __user *)&frame->mc_vregs[32], failed);
+ if (msr & MSR_VEC)
+ unsafe_put_user(current->thread.vrsave,
+ (u32 __user *)&tm_frame->mc_vregs[32], failed);
+ else
+ unsafe_put_user(current->thread.ckvrsave,
+ (u32 __user *)&tm_frame->mc_vregs[32], failed);
- if (copy_ckfpr_to_user(&frame->mc_fregs, current))
- return 1;
- if (msr & MSR_FP) {
- if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
- return 1;
- } else {
- if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
- return 1;
- }
+ unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
+ if (msr & MSR_FP)
+ unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
+ else
+ unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
-#ifdef CONFIG_VSX
/*
* Copy VSR 0-31 upper half from thread_struct to local
* buffer, then write that to userspace. Also set MSR_VSX in
@@ -571,54 +425,37 @@ static int save_tm_user_regs(struct pt_regs *regs,
* contains valid data
*/
if (current->thread.used_vsr) {
- if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
- return 1;
- if (msr & MSR_VSX) {
- if (copy_vsx_to_user(&tm_frame->mc_vsregs,
- current))
- return 1;
- } else {
- if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
- return 1;
- }
+ unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
+ if (msr & MSR_VSX)
+ unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
+ else
+ unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
msr |= MSR_VSX;
}
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
- /* SPE regs are not checkpointed with TM, so this section is
- * simply the same as in save_user_regs().
- */
- if (current->thread.used_spe) {
- flush_spe_to_thread(current);
- if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
- ELF_NEVRREG * sizeof(u32)))
- return 1;
- /* set MSR_SPE in the saved MSR value to indicate that
- * frame->mc_vregs contains valid data */
- msr |= MSR_SPE;
- }
- /* We always copy to/from spefscr */
- if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
- return 1;
-#endif /* CONFIG_SPE */
+ unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
- if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
- return 1;
- if (sigret) {
- /* Set up the sigreturn trampoline: li 0,sigret; sc */
- if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
- || __put_user(PPC_INST_SC, &frame->tramp[1]))
- return 1;
- flush_icache_range((unsigned long) &frame->tramp[0],
- (unsigned long) &frame->tramp[2]);
- }
+ return 0;
+
+failed:
+ return 1;
+}
+#else
+static void prepare_save_tm_user_regs(void) { }
+static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
+ struct mcontext __user *tm_frame, unsigned long msr)
+{
return 0;
}
#endif
+#define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
+ if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr)) \
+ goto label; \
+} while (0)
+
/*
* Restore the current user register values from the user stack,
* (except for MSR).
@@ -626,69 +463,64 @@ static int save_tm_user_regs(struct pt_regs *regs,
static long restore_user_regs(struct pt_regs *regs,
struct mcontext __user *sr, int sig)
{
- long err;
unsigned int save_r2 = 0;
unsigned long msr;
#ifdef CONFIG_VSX
int i;
#endif
+ if (!user_read_access_begin(sr, sizeof(*sr)))
+ return 1;
/*
* restore general registers but not including MSR or SOFTE. Also
* take care of keeping r2 (TLS) intact if not a signal
*/
if (!sig)
save_r2 = (unsigned int)regs->gpr[2];
- err = restore_general_regs(regs, sr);
- regs->trap = 0;
- err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
+ unsafe_restore_general_regs(regs, sr, failed);
+ set_trap_norestart(regs);
+ unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
if (!sig)
regs->gpr[2] = (unsigned long) save_r2;
- if (err)
- return 1;
/* if doing signal return, restore the previous little-endian mode */
if (sig)
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
#ifdef CONFIG_ALTIVEC
/*
* Force the process to reload the altivec registers from
* current->thread when it next does altivec instructions
*/
- regs->msr &= ~MSR_VEC;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
- if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
- sizeof(sr->mc_vregs)))
- return 1;
+ unsafe_copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
+ sizeof(sr->mc_vregs), failed);
current->thread.used_vr = true;
} else if (current->thread.used_vr)
memset(&current->thread.vr_state, 0,
ELF_NVRREG * sizeof(vector128));
/* Always get VRSAVE back */
- if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
- return 1;
+ unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, current->thread.vrsave);
#endif /* CONFIG_ALTIVEC */
- if (copy_fpr_from_user(current, &sr->mc_fregs))
- return 1;
+ unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
#ifdef CONFIG_VSX
/*
* Force the process to reload the VSX registers from
* current->thread when it next does VSX instruction.
*/
- regs->msr &= ~MSR_VSX;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
* buffer, then write this out to the thread_struct
*/
- if (copy_vsx_from_user(current, &sr->mc_vsregs))
- return 1;
+ unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
current->thread.used_vsr = true;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++)
@@ -698,27 +530,34 @@ static long restore_user_regs(struct pt_regs *regs,
* force the process to reload the FP registers from
* current->thread when it next does FP instructions
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
#ifdef CONFIG_SPE
- /* force the process to reload the spe registers from
- current->thread when it next does spe instructions */
- regs->msr &= ~MSR_SPE;
+ /*
+ * Force the process to reload the spe registers from
+ * current->thread when it next does spe instructions.
+ * Since this is user ABI, we must enforce the sizing.
+ */
+ BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
+ regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
if (msr & MSR_SPE) {
/* restore spe registers from the stack */
- if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
- ELF_NEVRREG * sizeof(u32)))
- return 1;
+ unsafe_copy_from_user(&current->thread.spe, &sr->mc_vregs,
+ sizeof(current->thread.spe), failed);
current->thread.used_spe = true;
} else if (current->thread.used_spe)
- memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
+ memset(&current->thread.spe, 0, sizeof(current->thread.spe));
/* Always get SPEFSCR back */
- if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
- return 1;
+ unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
#endif /* CONFIG_SPE */
+ user_read_access_end();
return 0;
+
+failed:
+ user_read_access_end();
+ return 1;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -731,11 +570,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *sr,
struct mcontext __user *tm_sr)
{
- long err;
unsigned long msr, msr_hi;
-#ifdef CONFIG_VSX
int i;
-#endif
if (tm_suspend_disabled)
return 1;
@@ -746,28 +582,21 @@ static long restore_tm_user_regs(struct pt_regs *regs,
* TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
* were set by the signal delivery.
*/
- err = restore_general_regs(regs, tm_sr);
- err |= restore_general_regs(&current->thread.ckpt_regs, sr);
-
- err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
-
- err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
- if (err)
+ if (!user_read_access_begin(sr, sizeof(*sr)))
return 1;
+ unsafe_restore_general_regs(&current->thread.ckpt_regs, sr, failed);
+ unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
+ unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
+
/* Restore the previous little-endian mode */
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
-#ifdef CONFIG_ALTIVEC
- regs->msr &= ~MSR_VEC;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VEC);
if (msr & MSR_VEC) {
/* restore altivec registers from the stack */
- if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
- sizeof(sr->mc_vregs)) ||
- __copy_from_user(&current->thread.vr_state,
- &tm_sr->mc_vregs,
- sizeof(sr->mc_vregs)))
- return 1;
+ unsafe_copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
+ sizeof(sr->mc_vregs), failed);
current->thread.used_vr = true;
} else if (current->thread.used_vr) {
memset(&current->thread.vr_state, 0,
@@ -777,62 +606,62 @@ static long restore_tm_user_regs(struct pt_regs *regs,
}
/* Always get VRSAVE back */
- if (__get_user(current->thread.ckvrsave,
- (u32 __user *)&sr->mc_vregs[32]) ||
- __get_user(current->thread.vrsave,
- (u32 __user *)&tm_sr->mc_vregs[32]))
- return 1;
+ unsafe_get_user(current->thread.ckvrsave,
+ (u32 __user *)&sr->mc_vregs[32], failed);
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
-#endif /* CONFIG_ALTIVEC */
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
- if (copy_fpr_from_user(current, &sr->mc_fregs) ||
- copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
- return 1;
+ unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
-#ifdef CONFIG_VSX
- regs->msr &= ~MSR_VSX;
+ regs_set_return_msr(regs, regs->msr & ~MSR_VSX);
if (msr & MSR_VSX) {
/*
* Restore altivec registers from the stack to a local
* buffer, then write this out to the thread_struct
*/
- if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
- copy_ckvsx_from_user(current, &sr->mc_vsregs))
- return 1;
+ unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
current->thread.used_vsr = true;
} else if (current->thread.used_vsr)
for (i = 0; i < 32 ; i++) {
current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
- /* SPE regs are not checkpointed with TM, so this section is
- * simply the same as in restore_user_regs().
- */
- regs->msr &= ~MSR_SPE;
- if (msr & MSR_SPE) {
- if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
- ELF_NEVRREG * sizeof(u32)))
- return 1;
- current->thread.used_spe = true;
- } else if (current->thread.used_spe)
- memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
+ user_read_access_end();
- /* Always get SPEFSCR back */
- if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
- + ELF_NEVRREG))
+ if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
return 1;
-#endif /* CONFIG_SPE */
+
+ unsafe_restore_general_regs(regs, tm_sr, failed);
+
+ /* restore altivec registers from the stack */
+ if (msr & MSR_VEC)
+ unsafe_copy_from_user(&current->thread.vr_state, &tm_sr->mc_vregs,
+ sizeof(sr->mc_vregs), failed);
+
+ /* Always get VRSAVE back */
+ unsafe_get_user(current->thread.vrsave,
+ (u32 __user *)&tm_sr->mc_vregs[32], failed);
+
+ unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
+
+ if (msr & MSR_VSX) {
+ /*
+ * Restore altivec registers from the stack to a local
+ * buffer, then write this out to the thread_struct
+ */
+ unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
+ current->thread.used_vsr = true;
+ }
/* Get the top half of the MSR from the user context */
- if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
- return 1;
+ unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
msr_hi <<= 32;
+
+ user_read_access_end();
+
/* If TM bits are set to the reserved value, it's an invalid context */
if (MSR_TM_RESV(msr_hi))
return 1;
@@ -853,7 +682,7 @@ static long restore_tm_user_regs(struct pt_regs *regs,
*
* Pull in the MSR TM bits from the user context
*/
- regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK));
/* Now, recheckpoint. This loads up all of the checkpointed (older)
* registers, including FP and V[S]Rs. After recheckpointing, the
* transactional versions should be loaded.
@@ -868,18 +697,26 @@ static long restore_tm_user_regs(struct pt_regs *regs,
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
load_fp_state(&current->thread.fp_state);
- regs->msr |= (MSR_FP | current->thread.fpexc_mode);
+ regs_set_return_msr(regs, regs->msr | (MSR_FP | current->thread.fpexc_mode));
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
load_vr_state(&current->thread.vr_state);
- regs->msr |= MSR_VEC;
+ regs_set_return_msr(regs, regs->msr | MSR_VEC);
}
-#endif
preempt_enable();
return 0;
+
+failed:
+ user_read_access_end();
+ return 1;
+}
+#else
+static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
+ struct mcontext __user *tm_sr)
+{
+ return 0;
}
#endif
@@ -896,96 +733,183 @@ static long restore_tm_user_regs(struct pt_regs *regs,
int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
struct task_struct *tsk)
{
- struct rt_sigframe __user *rt_sf;
- struct mcontext __user *frame;
- struct mcontext __user *tm_frame = NULL;
- void __user *addr;
+ struct rt_sigframe __user *frame;
+ struct mcontext __user *mctx;
+ struct mcontext __user *tm_mctx = NULL;
unsigned long newsp = 0;
- int sigret;
unsigned long tramp;
struct pt_regs *regs = tsk->thread.regs;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* Save the thread's msr before get_tm_stackpointer() changes it */
unsigned long msr = regs->msr;
-#endif
-
- BUG_ON(tsk != current);
/* Set up Signal Frame */
- /* Put a Real Time Context onto stack */
- rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
- addr = rt_sf;
- if (unlikely(rt_sf == NULL))
+ frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
+ mctx = &frame->uc.uc_mcontext;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ tm_mctx = &frame->uc_transact.uc_mcontext;
+#endif
+ if (MSR_TM_ACTIVE(msr))
+ prepare_save_tm_user_regs();
+ else
+ prepare_save_user_regs(1);
+
+ if (!user_access_begin(frame, sizeof(*frame)))
goto badframe;
/* Put the siginfo & fill in most of the ucontext */
- if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
- || __put_user(0, &rt_sf->uc.uc_flags)
- || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
- || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
- &rt_sf->uc.uc_regs)
- || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
- goto badframe;
+ unsafe_put_user(0, &frame->uc.uc_flags, failed);
+#ifdef CONFIG_PPC64
+ unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
+#else
+ unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
+#endif
+ unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
+
+ if (MSR_TM_ACTIVE(msr)) {
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ unsafe_put_user((unsigned long)&frame->uc_transact,
+ &frame->uc.uc_link, failed);
+ unsafe_put_user((unsigned long)tm_mctx,
+ &frame->uc_transact.uc_regs, failed);
+#endif
+ unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
+ } else {
+ unsafe_put_user(0, &frame->uc.uc_link, failed);
+ unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
+ }
/* Save user registers on the stack */
- frame = &rt_sf->uc.uc_mcontext;
- addr = frame;
- if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
- sigret = 0;
- tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
+ if (tsk->mm->context.vdso) {
+ tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
} else {
- sigret = __NR_rt_sigreturn;
- tramp = (unsigned long) frame->tramp;
+ tramp = (unsigned long)mctx->mc_pad;
+ unsafe_put_user(PPC_RAW_LI(_R0, __NR_rt_sigreturn), &mctx->mc_pad[0], failed);
+ unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
+ asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
}
+ unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
+
+ user_access_end();
+
+ if (copy_siginfo_to_user(&frame->info, &ksig->info))
+ goto badframe;
+
+ regs->link = tramp;
+
+#ifdef CONFIG_PPC_FPU_REGS
+ tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
+#endif
+ /* create a stack frame for the caller of the handler */
+ newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
+ if (put_user(regs->gpr[1], (u32 __user *)newsp))
+ goto badframe;
+
+ /* Fill registers for signal handler */
+ regs->gpr[1] = newsp;
+ regs->gpr[3] = ksig->sig;
+ regs->gpr[4] = (unsigned long)&frame->info;
+ regs->gpr[5] = (unsigned long)&frame->uc;
+ regs->gpr[6] = (unsigned long)frame;
+ regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
+ /* enter the signal handler in native-endian mode */
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
+
+ return 0;
+
+failed:
+ user_access_end();
+
+badframe:
+ signal_fault(tsk, regs, "handle_rt_signal32", frame);
+
+ return 1;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
+ struct task_struct *tsk)
+{
+ struct sigcontext __user *sc;
+ struct sigframe __user *frame;
+ struct mcontext __user *mctx;
+ struct mcontext __user *tm_mctx = NULL;
+ unsigned long newsp = 0;
+ unsigned long tramp;
+ struct pt_regs *regs = tsk->thread.regs;
+ /* Save the thread's msr before get_tm_stackpointer() changes it */
+ unsigned long msr = regs->msr;
+
+ /* Set up Signal Frame */
+ frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
+ mctx = &frame->mctx;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- tm_frame = &rt_sf->uc_transact.uc_mcontext;
- if (MSR_TM_ACTIVE(msr)) {
- if (__put_user((unsigned long)&rt_sf->uc_transact,
- &rt_sf->uc.uc_link) ||
- __put_user((unsigned long)tm_frame,
- &rt_sf->uc_transact.uc_regs))
- goto badframe;
- if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
- goto badframe;
- }
+ tm_mctx = &frame->mctx_transact;
+#endif
+ if (MSR_TM_ACTIVE(msr))
+ prepare_save_tm_user_regs();
else
+ prepare_save_user_regs(1);
+
+ if (!user_access_begin(frame, sizeof(*frame)))
+ goto badframe;
+ sc = (struct sigcontext __user *) &frame->sctx;
+
+#if _NSIG != 64
+#error "Please adjust handle_signal()"
#endif
- {
- if (__put_user(0, &rt_sf->uc.uc_link))
- goto badframe;
- if (save_user_regs(regs, frame, tm_frame, sigret, 1))
- goto badframe;
+ unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
+ unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
+#ifdef CONFIG_PPC64
+ unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
+#else
+ unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
+#endif
+ unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
+ unsafe_put_user(ksig->sig, &sc->signal, failed);
+
+ if (MSR_TM_ACTIVE(msr))
+ unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
+ else
+ unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
+
+ if (tsk->mm->context.vdso) {
+ tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
+ } else {
+ tramp = (unsigned long)mctx->mc_pad;
+ unsafe_put_user(PPC_RAW_LI(_R0, __NR_sigreturn), &mctx->mc_pad[0], failed);
+ unsafe_put_user(PPC_RAW_SC(), &mctx->mc_pad[1], failed);
+ asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
}
+ user_access_end();
+
regs->link = tramp;
+#ifdef CONFIG_PPC_FPU_REGS
tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
+#endif
/* create a stack frame for the caller of the handler */
- newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
- addr = (void __user *)regs->gpr[1];
+ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
if (put_user(regs->gpr[1], (u32 __user *)newsp))
goto badframe;
- /* Fill registers for signal handler */
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
- regs->gpr[4] = (unsigned long) &rt_sf->info;
- regs->gpr[5] = (unsigned long) &rt_sf->uc;
- regs->gpr[6] = (unsigned long) rt_sf;
- regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
+ regs->gpr[4] = (unsigned long) sc;
+ regs_set_return_ip(regs, (unsigned long) ksig->ka.sa.sa_handler);
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
+
return 0;
+failed:
+ user_access_end();
+
badframe:
- if (show_unhandled_signals)
- printk_ratelimited(KERN_INFO
- "%s[%d]: bad frame in handle_rt_signal32: "
- "%p nip %08lx lr %08lx\n",
- tsk->comm, tsk->pid,
- addr, regs->nip, regs->link);
+ signal_fault(tsk, regs, "handle_signal32", frame);
return 1;
}
@@ -995,28 +919,31 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
sigset_t set;
struct mcontext __user *mcp;
- if (get_sigset_t(&set, &ucp->uc_sigmask))
+ if (!user_read_access_begin(ucp, sizeof(*ucp)))
return -EFAULT;
+
+ unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
#ifdef CONFIG_PPC64
{
u32 cmcp;
- if (__get_user(cmcp, &ucp->uc_regs))
- return -EFAULT;
+ unsafe_get_user(cmcp, &ucp->uc_regs, failed);
mcp = (struct mcontext __user *)(u64)cmcp;
- /* no need to check access_ok(mcp), since mcp < 4GB */
}
#else
- if (__get_user(mcp, &ucp->uc_regs))
- return -EFAULT;
- if (!access_ok(mcp, sizeof(*mcp)))
- return -EFAULT;
+ unsafe_get_user(mcp, &ucp->uc_regs, failed);
#endif
+ user_read_access_end();
+
set_current_blocked(&set);
if (restore_user_regs(regs, mcp, sig))
return -EFAULT;
return 0;
+
+failed:
+ user_read_access_end();
+ return -EFAULT;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -1030,11 +957,15 @@ static int do_setcontext_tm(struct ucontext __user *ucp,
u32 cmcp;
u32 tm_cmcp;
- if (get_sigset_t(&set, &ucp->uc_sigmask))
+ if (!user_read_access_begin(ucp, sizeof(*ucp)))
return -EFAULT;
- if (__get_user(cmcp, &ucp->uc_regs) ||
- __get_user(tm_cmcp, &tm_ucp->uc_regs))
+ unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
+ unsafe_get_user(cmcp, &ucp->uc_regs, failed);
+
+ user_read_access_end();
+
+ if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
return -EFAULT;
mcp = (struct mcontext __user *)(u64)cmcp;
tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
@@ -1045,6 +976,10 @@ static int do_setcontext_tm(struct ucontext __user *ucp,
return -EFAULT;
return 0;
+
+failed:
+ user_read_access_end();
+ return -EFAULT;
}
#endif
@@ -1112,16 +1047,18 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
*/
mctx = (struct mcontext __user *)
((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
- if (!access_ok(old_ctx, ctx_size)
- || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
- || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
- || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
+ prepare_save_user_regs(ctx_has_vsx_region);
+ if (!user_write_access_begin(old_ctx, ctx_size))
return -EFAULT;
+ unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
+ unsafe_put_sigset_t(&old_ctx->uc_sigmask, &current->blocked, failed);
+ unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
+ user_write_access_end();
}
if (new_ctx == NULL)
return 0;
if (!access_ok(new_ctx, ctx_size) ||
- fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
+ fault_in_readable((char __user *)new_ctx, ctx_size))
return -EFAULT;
/*
@@ -1135,11 +1072,17 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
* or if another thread unmaps the region containing the context.
* We kill the task with a SIGSEGV in this situation.
*/
- if (do_setcontext(new_ctx, regs, 0))
- do_exit(SIGSEGV);
+ if (do_setcontext(new_ctx, regs, 0)) {
+ force_exit_sig(SIGSEGV);
+ return -EFAULT;
+ }
set_thread_flag(TIF_RESTOREALL);
return 0;
+
+failed:
+ user_write_access_end();
+ return -EFAULT;
}
#ifdef CONFIG_PPC64
@@ -1211,7 +1154,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
* set, and recheckpoint was not called. This avoid
* hitting a TM Bad thing at RFID
*/
- regs->msr &= ~MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
}
/* Fall through, for non-TM restore */
#endif
@@ -1237,12 +1180,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
return 0;
bad:
- if (show_unhandled_signals)
- printk_ratelimited(KERN_INFO
- "%s[%d]: bad frame in sys_rt_sigreturn: "
- "%p nip %08lx lr %08lx\n",
- current->comm, current->pid,
- rt_sf, regs->nip, regs->link);
+ signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
force_sig(SIGSEGV);
return 0;
@@ -1305,13 +1243,13 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
affect the contents of these registers. After this point,
failure is a problem, anyway, and it's very unlikely unless
the user is really doing something wrong. */
- regs->msr = new_msr;
+ regs_set_return_msr(regs, new_msr);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
current->thread.debug.dbcr0 = new_dbcr0;
#endif
if (!access_ok(ctx, sizeof(*ctx)) ||
- fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
+ fault_in_readable((char __user *)ctx, sizeof(*ctx)))
return -EFAULT;
/*
@@ -1326,12 +1264,7 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
* We kill the task with a SIGSEGV in this situation.
*/
if (do_setcontext(ctx, regs, 1)) {
- if (show_unhandled_signals)
- printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
- "sys_debug_setcontext: %p nip %08lx "
- "lr %08lx\n",
- current->comm, current->pid,
- ctx, regs->nip, regs->link);
+ signal_fault(current, regs, "sys_debug_setcontext", ctx);
force_sig(SIGSEGV);
goto out;
@@ -1353,96 +1286,6 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
#endif
/*
- * OK, we're invoking a handler
- */
-int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
- struct task_struct *tsk)
-{
- struct sigcontext __user *sc;
- struct sigframe __user *frame;
- struct mcontext __user *tm_mctx = NULL;
- unsigned long newsp = 0;
- int sigret;
- unsigned long tramp;
- struct pt_regs *regs = tsk->thread.regs;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* Save the thread's msr before get_tm_stackpointer() changes it */
- unsigned long msr = regs->msr;
-#endif
-
- BUG_ON(tsk != current);
-
- /* Set up Signal Frame */
- frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
- if (unlikely(frame == NULL))
- goto badframe;
- sc = (struct sigcontext __user *) &frame->sctx;
-
-#if _NSIG != 64
-#error "Please adjust handle_signal()"
-#endif
- if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
- || __put_user(oldset->sig[0], &sc->oldmask)
-#ifdef CONFIG_PPC64
- || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
-#else
- || __put_user(oldset->sig[1], &sc->_unused[3])
-#endif
- || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
- || __put_user(ksig->sig, &sc->signal))
- goto badframe;
-
- if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
- sigret = 0;
- tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
- } else {
- sigret = __NR_sigreturn;
- tramp = (unsigned long) frame->mctx.tramp;
- }
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- tm_mctx = &frame->mctx_transact;
- if (MSR_TM_ACTIVE(msr)) {
- if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
- sigret, msr))
- goto badframe;
- }
- else
-#endif
- {
- if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
- goto badframe;
- }
-
- regs->link = tramp;
-
- tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
-
- /* create a stack frame for the caller of the handler */
- newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
- if (put_user(regs->gpr[1], (u32 __user *)newsp))
- goto badframe;
-
- regs->gpr[1] = newsp;
- regs->gpr[3] = ksig->sig;
- regs->gpr[4] = (unsigned long) sc;
- regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
- /* enter the signal handler in big-endian mode */
- regs->msr &= ~MSR_LE;
- return 0;
-
-badframe:
- if (show_unhandled_signals)
- printk_ratelimited(KERN_INFO
- "%s[%d]: bad frame in handle_signal32: "
- "%p nip %08lx lr %08lx\n",
- tsk->comm, tsk->pid,
- frame, regs->nip, regs->link);
-
- return 1;
-}
-
-/*
* Do a signal return; undo the signal stack.
*/
#ifdef CONFIG_PPC64
@@ -1456,19 +1299,16 @@ SYSCALL_DEFINE0(sigreturn)
struct sigcontext __user *sc;
struct sigcontext sigctx;
struct mcontext __user *sr;
- void __user *addr;
sigset_t set;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- struct mcontext __user *mcp, *tm_mcp;
- unsigned long msr_hi;
-#endif
+ struct mcontext __user *mcp;
+ struct mcontext __user *tm_mcp = NULL;
+ unsigned long long msr_hi = 0;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
sc = &sf->sctx;
- addr = sc;
if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
goto badframe;
@@ -1484,36 +1324,32 @@ SYSCALL_DEFINE0(sigreturn)
#endif
set_current_blocked(&set);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
mcp = (struct mcontext __user *)&sf->mctx;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
goto badframe;
+#endif
if (MSR_TM_ACTIVE(msr_hi<<32)) {
if (!cpu_has_feature(CPU_FTR_TM))
goto badframe;
if (restore_tm_user_regs(regs, mcp, tm_mcp))
goto badframe;
- } else
-#endif
- {
+ } else {
sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
- addr = sr;
- if (!access_ok(sr, sizeof(*sr))
- || restore_user_regs(regs, sr, 1))
- goto badframe;
+ if (restore_user_regs(regs, sr, 1)) {
+ signal_fault(current, regs, "sys_sigreturn", sr);
+
+ force_sig(SIGSEGV);
+ return 0;
+ }
}
set_thread_flag(TIF_RESTOREALL);
return 0;
badframe:
- if (show_unhandled_signals)
- printk_ratelimited(KERN_INFO
- "%s[%d]: bad frame in sys_sigreturn: "
- "%p nip %08lx lr %08lx\n",
- current->comm, current->pid,
- addr, regs->nip, regs->link);
+ signal_fault(current, regs, "sys_sigreturn", sc);
force_sig(SIGSEGV);
return 0;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 84ed2e77ef9c..86bb5bb4c143 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -21,11 +21,11 @@
#include <linux/ptrace.h>
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
+#include <linux/pagemap.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/unistd.h>
#include <asm/cacheflush.h>
#include <asm/syscalls.h>
@@ -40,8 +40,8 @@
#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
#define FP_REGS_SIZE sizeof(elf_fpregset_t)
-#define TRAMP_TRACEBACK 3
-#define TRAMP_SIZE 6
+#define TRAMP_TRACEBACK 4
+#define TRAMP_SIZE 7
/*
* When we have signals to deliver, we set up on the user stack,
@@ -66,10 +66,10 @@ struct rt_sigframe {
char abigap[USER_REDZONE_SIZE];
} __attribute__ ((aligned (16)));
-static const char fmt32[] = KERN_INFO \
- "%s[%d]: bad frame in %s: %08lx nip %08lx lr %08lx\n";
-static const char fmt64[] = KERN_INFO \
- "%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n";
+unsigned long get_min_sigframe_size_64(void)
+{
+ return sizeof(struct rt_sigframe) + __SIGNAL_FRAMESIZE;
+}
/*
* This computes a quad word aligned pointer inside the vmx_reserve array
@@ -84,13 +84,36 @@ static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
}
#endif
+static void prepare_setup_sigcontext(struct task_struct *tsk)
+{
+#ifdef CONFIG_ALTIVEC
+ /* save altivec registers */
+ if (tsk->thread.used_vr)
+ flush_altivec_to_thread(tsk);
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ tsk->thread.vrsave = mfspr(SPRN_VRSAVE);
+#endif /* CONFIG_ALTIVEC */
+
+ flush_fp_to_thread(tsk);
+
+#ifdef CONFIG_VSX
+ if (tsk->thread.used_vsr)
+ flush_vsx_to_thread(tsk);
+#endif /* CONFIG_VSX */
+}
+
/*
* Set up the sigcontext for the signal frame.
*/
-static long setup_sigcontext(struct sigcontext __user *sc,
- struct task_struct *tsk, int signr, sigset_t *set,
- unsigned long handler, int ctx_has_vsx_region)
+#define unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region, label)\
+do { \
+ if (__unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region))\
+ goto label; \
+} while (0)
+static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc,
+ struct task_struct *tsk, int signr, sigset_t *set,
+ unsigned long handler, int ctx_has_vsx_region)
{
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
* process never used altivec yet (MSR_VEC is zero in pt_regs of
@@ -102,25 +125,22 @@ static long setup_sigcontext(struct sigcontext __user *sc,
*/
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
- unsigned long vrsave;
#endif
struct pt_regs *regs = tsk->thread.regs;
unsigned long msr = regs->msr;
- long err = 0;
- /* Force usr to alway see softe as 1 (interrupts enabled) */
+ /* Force usr to always see softe as 1 (interrupts enabled) */
unsigned long softe = 0x1;
BUG_ON(tsk != current);
#ifdef CONFIG_ALTIVEC
- err |= __put_user(v_regs, &sc->v_regs);
+ unsafe_put_user(v_regs, &sc->v_regs, efault_out);
/* save altivec registers */
if (tsk->thread.used_vr) {
- flush_altivec_to_thread(tsk);
/* Copy 33 vec registers (vr0..31 and vscr) to the stack */
- err |= __copy_to_user(v_regs, &tsk->thread.vr_state,
- 33 * sizeof(vector128));
+ unsafe_copy_to_user(v_regs, &tsk->thread.vr_state,
+ 33 * sizeof(vector128), efault_out);
/* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
* contains valid data.
*/
@@ -129,19 +149,12 @@ static long setup_sigcontext(struct sigcontext __user *sc,
/* We always copy to/from vrsave, it's 0 if we don't have or don't
* use altivec.
*/
- vrsave = 0;
- if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
- vrsave = mfspr(SPRN_VRSAVE);
- tsk->thread.vrsave = vrsave;
- }
-
- err |= __put_user(vrsave, (u32 __user *)&v_regs[33]);
+ unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
#else /* CONFIG_ALTIVEC */
- err |= __put_user(0, &sc->v_regs);
+ unsafe_put_user(0, &sc->v_regs, efault_out);
#endif /* CONFIG_ALTIVEC */
- flush_fp_to_thread(tsk);
/* copy fpr regs and fpscr */
- err |= copy_fpr_to_user(&sc->fp_regs, tsk);
+ unsafe_copy_fpr_to_user(&sc->fp_regs, tsk, efault_out);
/*
* Clear the MSR VSX bit to indicate there is no valid state attached
@@ -155,26 +168,27 @@ static long setup_sigcontext(struct sigcontext __user *sc,
* VMX data.
*/
if (tsk->thread.used_vsr && ctx_has_vsx_region) {
- flush_vsx_to_thread(tsk);
v_regs += ELF_NVRREG;
- err |= copy_vsx_to_user(v_regs, tsk);
+ unsafe_copy_vsx_to_user(v_regs, tsk, efault_out);
/* set MSR_VSX in the MSR value in the frame to
* indicate that sc->vs_reg) contains valid data.
*/
msr |= MSR_VSX;
}
#endif /* CONFIG_VSX */
- err |= __put_user(&sc->gp_regs, &sc->regs);
- WARN_ON(!FULL_REGS(regs));
- err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
- err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
- err |= __put_user(softe, &sc->gp_regs[PT_SOFTE]);
- err |= __put_user(signr, &sc->signal);
- err |= __put_user(handler, &sc->handler);
+ unsafe_put_user(&sc->gp_regs, &sc->regs, efault_out);
+ unsafe_copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE, efault_out);
+ unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out);
+ unsafe_put_user(softe, &sc->gp_regs[PT_SOFTE], efault_out);
+ unsafe_put_user(signr, &sc->signal, efault_out);
+ unsafe_put_user(handler, &sc->handler, efault_out);
if (set != NULL)
- err |= __put_user(set->sig[0], &sc->oldmask);
+ unsafe_put_user(set->sig[0], &sc->oldmask, efault_out);
- return err;
+ return 0;
+
+efault_out:
+ return -EFAULT;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -299,7 +313,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
err |= __put_user(&sc->gp_regs, &sc->regs);
err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs);
- WARN_ON(!FULL_REGS(regs));
err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
err |= __copy_to_user(&sc->gp_regs,
&tsk->thread.ckpt_regs, GP_REGS_SIZE);
@@ -317,14 +330,16 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
/*
* Restore the sigcontext from the signal frame.
*/
-
-static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
- struct sigcontext __user *sc)
+#define unsafe_restore_sigcontext(tsk, set, sig, sc, label) do { \
+ if (__unsafe_restore_sigcontext(tsk, set, sig, sc)) \
+ goto label; \
+} while (0)
+static long notrace __unsafe_restore_sigcontext(struct task_struct *tsk, sigset_t *set,
+ int sig, struct sigcontext __user *sc)
{
#ifdef CONFIG_ALTIVEC
elf_vrreg_t __user *v_regs;
#endif
- unsigned long err = 0;
unsigned long save_r13 = 0;
unsigned long msr;
struct pt_regs *regs = tsk->thread.regs;
@@ -339,59 +354,60 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
save_r13 = regs->gpr[13];
/* copy the GPRs */
- err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr));
- err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]);
+ unsafe_copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr), efault_out);
+ unsafe_get_user(regs->nip, &sc->gp_regs[PT_NIP], efault_out);
/* get MSR separately, transfer the LE bit if doing signal return */
- err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
+ unsafe_get_user(msr, &sc->gp_regs[PT_MSR], efault_out);
if (sig)
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
- err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]);
- err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]);
- err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]);
- err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
- err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
- /* skip SOFTE */
- regs->trap = 0;
- err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
- err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
- err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
+ unsafe_get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3], efault_out);
+ unsafe_get_user(regs->ctr, &sc->gp_regs[PT_CTR], efault_out);
+ unsafe_get_user(regs->link, &sc->gp_regs[PT_LNK], efault_out);
+ unsafe_get_user(regs->xer, &sc->gp_regs[PT_XER], efault_out);
+ unsafe_get_user(regs->ccr, &sc->gp_regs[PT_CCR], efault_out);
+ /* Don't allow userspace to set SOFTE */
+ set_trap_norestart(regs);
+ unsafe_get_user(regs->dar, &sc->gp_regs[PT_DAR], efault_out);
+ unsafe_get_user(regs->dsisr, &sc->gp_regs[PT_DSISR], efault_out);
+ unsafe_get_user(regs->result, &sc->gp_regs[PT_RESULT], efault_out);
if (!sig)
regs->gpr[13] = save_r13;
if (set != NULL)
- err |= __get_user(set->sig[0], &sc->oldmask);
+ unsafe_get_user(set->sig[0], &sc->oldmask, efault_out);
/*
- * Force reload of FP/VEC.
- * This has to be done before copying stuff into tsk->thread.fpr/vr
- * for the reasons explained in the previous comment.
+ * Force reload of FP/VEC/VSX so userspace sees any changes.
+ * Clear these bits from the user process' MSR before copying into the
+ * thread struct. If we are rescheduled or preempted and another task
+ * uses FP/VEC/VSX, and this process has the MSR bits set, then the
+ * context switch code will save the current CPU state into the
+ * thread_struct - possibly overwriting the data we are updating here.
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
#ifdef CONFIG_ALTIVEC
- err |= __get_user(v_regs, &sc->v_regs);
- if (err)
- return err;
+ unsafe_get_user(v_regs, &sc->v_regs, efault_out);
if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != NULL && (msr & MSR_VEC) != 0) {
- err |= __copy_from_user(&tsk->thread.vr_state, v_regs,
- 33 * sizeof(vector128));
+ unsafe_copy_from_user(&tsk->thread.vr_state, v_regs,
+ 33 * sizeof(vector128), efault_out);
tsk->thread.used_vr = true;
} else if (tsk->thread.used_vr) {
memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
}
/* Always get VRSAVE back */
if (v_regs != NULL)
- err |= __get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]);
+ unsafe_get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
else
tsk->thread.vrsave = 0;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
#endif /* CONFIG_ALTIVEC */
/* restore floating point */
- err |= copy_fpr_from_user(tsk, &sc->fp_regs);
+ unsafe_copy_fpr_from_user(tsk, &sc->fp_regs, efault_out);
#ifdef CONFIG_VSX
/*
* Get additional VSX data. Update v_regs to point after the
@@ -400,14 +416,17 @@ static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
*/
v_regs += ELF_NVRREG;
if ((msr & MSR_VSX) != 0) {
- err |= copy_vsx_from_user(tsk, v_regs);
+ unsafe_copy_vsx_from_user(tsk, v_regs, efault_out);
tsk->thread.used_vsr = true;
} else {
for (i = 0; i < 32 ; i++)
tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
}
#endif
- return err;
+ return 0;
+
+efault_out:
+ return -EFAULT;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
@@ -457,7 +476,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
return -EINVAL;
/* pull in MSR LE from user context */
- regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (msr & MSR_LE));
/* The following non-GPR non-FPR non-VR state is also checkpointed: */
err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
@@ -472,9 +491,9 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
&sc->gp_regs[PT_XER]);
err |= __get_user(tsk->thread.ckpt_regs.ccr,
&sc->gp_regs[PT_CCR]);
-
+ /* Don't allow userspace to set SOFTE */
+ set_trap_norestart(regs);
/* These regs are not checkpointed; they can go in 'regs'. */
- err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
@@ -484,7 +503,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
* This has to be done before copying stuff into tsk->thread.fpr/vr
* for the reasons explained in the previous comment.
*/
- regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
+ regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX));
#ifdef CONFIG_ALTIVEC
err |= __get_user(v_regs, &sc->v_regs);
@@ -554,7 +573,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
preempt_disable();
/* pull in MSR TS bits from user context */
- regs->msr |= msr & MSR_TS_MASK;
+ regs_set_return_msr(regs, regs->msr | (msr & MSR_TS_MASK));
/*
* Ensure that TM is enabled in regs->msr before we leave the signal
@@ -572,7 +591,7 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
* to be de-scheduled with MSR[TS] set but without calling
* tm_recheckpoint(). This can cause a bug.
*/
- regs->msr |= MSR_TM;
+ regs_set_return_msr(regs, regs->msr | MSR_TM);
/* This loads the checkpointed FP/VEC state, if used */
tm_recheckpoint(&tsk->thread);
@@ -580,17 +599,23 @@ static long restore_tm_sigcontexts(struct task_struct *tsk,
msr_check_and_set(msr & (MSR_FP | MSR_VEC));
if (msr & MSR_FP) {
load_fp_state(&tsk->thread.fp_state);
- regs->msr |= (MSR_FP | tsk->thread.fpexc_mode);
+ regs_set_return_msr(regs, regs->msr | (MSR_FP | tsk->thread.fpexc_mode));
}
if (msr & MSR_VEC) {
load_vr_state(&tsk->thread.vr_state);
- regs->msr |= MSR_VEC;
+ regs_set_return_msr(regs, regs->msr | MSR_VEC);
}
preempt_enable();
return err;
}
+#else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
+static long restore_tm_sigcontexts(struct task_struct *tsk, struct sigcontext __user *sc,
+ struct sigcontext __user *tm_sc)
+{
+ return -EINVAL;
+}
#endif
/*
@@ -601,13 +626,12 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
int i;
long err = 0;
- /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */
- err |= __put_user(PPC_INST_ADDI | __PPC_RT(R1) | __PPC_RA(R1) |
- (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]);
- /* li r0, __NR_[rt_]sigreturn| */
- err |= __put_user(PPC_INST_ADDI | (syscall & 0xffff), &tramp[1]);
- /* sc */
- err |= __put_user(PPC_INST_SC, &tramp[2]);
+ /* Call the handler and pop the dummy stackframe*/
+ err |= __put_user(PPC_RAW_BCTRL(), &tramp[0]);
+ err |= __put_user(PPC_RAW_ADDI(_R1, _R1, __SIGNAL_FRAMESIZE), &tramp[1]);
+
+ err |= __put_user(PPC_RAW_LI(_R0, syscall), &tramp[2]);
+ err |= __put_user(PPC_RAW_SC(), &tramp[3]);
/* Minimal traceback info */
for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
@@ -633,7 +657,6 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
struct ucontext __user *, new_ctx, long, ctx_size)
{
- unsigned char tmp;
sigset_t set;
unsigned long new_msr = 0;
int ctx_has_vsx_region = 0;
@@ -659,18 +682,21 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
ctx_has_vsx_region = 1;
if (old_ctx != NULL) {
- if (!access_ok(old_ctx, ctx_size)
- || setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL, 0,
- ctx_has_vsx_region)
- || __copy_to_user(&old_ctx->uc_sigmask,
- &current->blocked, sizeof(sigset_t)))
+ prepare_setup_sigcontext(current);
+ if (!user_write_access_begin(old_ctx, ctx_size))
return -EFAULT;
+
+ unsafe_setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL,
+ 0, ctx_has_vsx_region, efault_out);
+ unsafe_copy_to_user(&old_ctx->uc_sigmask, &current->blocked,
+ sizeof(sigset_t), efault_out);
+
+ user_write_access_end();
}
if (new_ctx == NULL)
return 0;
- if (!access_ok(new_ctx, ctx_size)
- || __get_user(tmp, (u8 __user *) new_ctx)
- || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
+ if (!access_ok(new_ctx, ctx_size) ||
+ fault_in_readable((char __user *)new_ctx, ctx_size))
return -EFAULT;
/*
@@ -685,15 +711,29 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
* We kill the task with a SIGSEGV in this situation.
*/
- if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
- do_exit(SIGSEGV);
+ if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) {
+ force_exit_sig(SIGSEGV);
+ return -EFAULT;
+ }
set_current_blocked(&set);
- if (restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext))
- do_exit(SIGSEGV);
+
+ if (!user_read_access_begin(new_ctx, ctx_size))
+ return -EFAULT;
+ if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) {
+ user_read_access_end();
+ force_exit_sig(SIGSEGV);
+ return -EFAULT;
+ }
+ user_read_access_end();
/* This returns like rt_sigreturn */
set_thread_flag(TIF_RESTOREALL);
+
return 0;
+
+efault_out:
+ user_write_access_end();
+ return -EFAULT;
}
@@ -706,9 +746,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
struct pt_regs *regs = current_pt_regs();
struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
sigset_t set;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
unsigned long msr;
-#endif
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
@@ -716,52 +754,54 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (!access_ok(uc, sizeof(*uc)))
goto badframe;
- if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
+ if (__get_user_sigset(&set, &uc->uc_sigmask))
goto badframe;
set_current_blocked(&set);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /*
- * If there is a transactional state then throw it away.
- * The purpose of a sigreturn is to destroy all traces of the
- * signal frame, this includes any transactional state created
- * within in. We only check for suspended as we can never be
- * active in the kernel, we are active, there is nothing better to
- * do than go ahead and Bad Thing later.
- * The cause is not important as there will never be a
- * recheckpoint so it's not user visible.
- */
- if (MSR_TM_SUSPENDED(mfmsr()))
- tm_reclaim_current(0);
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM)) {
+ /*
+ * If there is a transactional state then throw it away.
+ * The purpose of a sigreturn is to destroy all traces of the
+ * signal frame, this includes any transactional state created
+ * within in. We only check for suspended as we can never be
+ * active in the kernel, we are active, there is nothing better to
+ * do than go ahead and Bad Thing later.
+ * The cause is not important as there will never be a
+ * recheckpoint so it's not user visible.
+ */
+ if (MSR_TM_SUSPENDED(mfmsr()))
+ tm_reclaim_current(0);
- /*
- * Disable MSR[TS] bit also, so, if there is an exception in the
- * code below (as a page fault in copy_ckvsx_to_user()), it does
- * not recheckpoint this task if there was a context switch inside
- * the exception.
- *
- * A major page fault can indirectly call schedule(). A reschedule
- * process in the middle of an exception can have a side effect
- * (Changing the CPU MSR[TS] state), since schedule() is called
- * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended
- * (switch_to() calls tm_recheckpoint() for the 'new' process). In
- * this case, the process continues to be the same in the CPU, but
- * the CPU state just changed.
- *
- * This can cause a TM Bad Thing, since the MSR in the stack will
- * have the MSR[TS]=0, and this is what will be used to RFID.
- *
- * Clearing MSR[TS] state here will avoid a recheckpoint if there
- * is any process reschedule in kernel space. The MSR[TS] state
- * does not need to be saved also, since it will be replaced with
- * the MSR[TS] that came from user context later, at
- * restore_tm_sigcontexts.
- */
- regs->msr &= ~MSR_TS_MASK;
+ /*
+ * Disable MSR[TS] bit also, so, if there is an exception in the
+ * code below (as a page fault in copy_ckvsx_to_user()), it does
+ * not recheckpoint this task if there was a context switch inside
+ * the exception.
+ *
+ * A major page fault can indirectly call schedule(). A reschedule
+ * process in the middle of an exception can have a side effect
+ * (Changing the CPU MSR[TS] state), since schedule() is called
+ * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended
+ * (switch_to() calls tm_recheckpoint() for the 'new' process). In
+ * this case, the process continues to be the same in the CPU, but
+ * the CPU state just changed.
+ *
+ * This can cause a TM Bad Thing, since the MSR in the stack will
+ * have the MSR[TS]=0, and this is what will be used to RFID.
+ *
+ * Clearing MSR[TS] state here will avoid a recheckpoint if there
+ * is any process reschedule in kernel space. The MSR[TS] state
+ * does not need to be saved also, since it will be replaced with
+ * the MSR[TS] that came from user context later, at
+ * restore_tm_sigcontexts.
+ */
+ regs_set_return_msr(regs, regs->msr & ~MSR_TS_MASK);
- if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
- goto badframe;
- if (MSR_TM_ACTIVE(msr)) {
+ if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
+ goto badframe;
+ }
+
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && MSR_TM_ACTIVE(msr)) {
/* We recheckpoint on return. */
struct ucontext __user *uc_transact;
@@ -774,9 +814,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
&uc_transact->uc_mcontext))
goto badframe;
- } else
-#endif
- {
+ } else {
/*
* Fall through, for non-TM restore
*
@@ -789,22 +827,28 @@ SYSCALL_DEFINE0(rt_sigreturn)
* MSR[TS] set, but without CPU in the proper state,
* causing a TM bad thing.
*/
- current->thread.regs->msr &= ~MSR_TS_MASK;
- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
+ regs_set_return_msr(current->thread.regs,
+ current->thread.regs->msr & ~MSR_TS_MASK);
+ if (!user_read_access_begin(&uc->uc_mcontext, sizeof(uc->uc_mcontext)))
goto badframe;
+
+ unsafe_restore_sigcontext(current, NULL, 1, &uc->uc_mcontext,
+ badframe_block);
+
+ user_read_access_end();
}
if (restore_altstack(&uc->uc_stack))
goto badframe;
set_thread_flag(TIF_RESTOREALL);
+
return 0;
+badframe_block:
+ user_read_access_end();
badframe:
- if (show_unhandled_signals)
- printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
- current->comm, current->pid, "rt_sigreturn",
- (long)uc, regs->nip, regs->link);
+ signal_fault(current, regs, "rt_sigreturn", uc);
force_sig(SIGSEGV);
return 0;
@@ -817,60 +861,73 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
unsigned long newsp = 0;
long err = 0;
struct pt_regs *regs = tsk->thread.regs;
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* Save the thread's msr before get_tm_stackpointer() changes it */
unsigned long msr = regs->msr;
-#endif
- BUG_ON(tsk != current);
+ frame = get_sigframe(ksig, tsk, sizeof(*frame), 0);
- frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 0);
- if (unlikely(frame == NULL))
- goto badframe;
+ /*
+ * This only applies when calling unsafe_setup_sigcontext() and must be
+ * called before opening the uaccess window.
+ */
+ if (!MSR_TM_ACTIVE(msr))
+ prepare_setup_sigcontext(tsk);
- err |= __put_user(&frame->info, &frame->pinfo);
- err |= __put_user(&frame->uc, &frame->puc);
- err |= copy_siginfo_to_user(&frame->info, &ksig->info);
- if (err)
+ if (!user_write_access_begin(frame, sizeof(*frame)))
goto badframe;
+ unsafe_put_user(&frame->info, &frame->pinfo, badframe_block);
+ unsafe_put_user(&frame->uc, &frame->puc, badframe_block);
+
/* Create the ucontext. */
- err |= __put_user(0, &frame->uc.uc_flags);
- err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ unsafe_put_user(0, &frame->uc.uc_flags, badframe_block);
+ unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], badframe_block);
+
if (MSR_TM_ACTIVE(msr)) {
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/* The ucontext_t passed to userland points to the second
* ucontext_t (for transactional state) with its uc_link ptr.
*/
- err |= __put_user(&frame->uc_transact, &frame->uc.uc_link);
+ unsafe_put_user(&frame->uc_transact, &frame->uc.uc_link, badframe_block);
+
+ user_write_access_end();
+
err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
&frame->uc_transact.uc_mcontext,
tsk, ksig->sig, NULL,
(unsigned long)ksig->ka.sa.sa_handler,
msr);
- } else
+
+ if (!user_write_access_begin(&frame->uc.uc_sigmask,
+ sizeof(frame->uc.uc_sigmask)))
+ goto badframe;
+
#endif
- {
- err |= __put_user(0, &frame->uc.uc_link);
- err |= setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
+ } else {
+ unsafe_put_user(0, &frame->uc.uc_link, badframe_block);
+ unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig,
NULL, (unsigned long)ksig->ka.sa.sa_handler,
- 1);
+ 1, badframe_block);
}
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
- if (err)
+
+ unsafe_copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set), badframe_block);
+ user_write_access_end();
+
+ /* Save the siginfo outside of the unsafe block. */
+ if (copy_siginfo_to_user(&frame->info, &ksig->info))
goto badframe;
/* Make sure signal handler doesn't get spurious FP exceptions */
tsk->thread.fp_state.fpscr = 0;
/* Set up to return from userspace. */
- if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) {
- regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp;
+ if (tsk->mm->context.vdso) {
+ regs_set_return_ip(regs, VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64));
} else {
err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
if (err)
goto badframe;
- regs->link = (unsigned long) &frame->tramp[0];
+ regs_set_return_ip(regs, (unsigned long) &frame->tramp[0]);
}
/* Allocate a dummy caller frame for the signal handler. */
@@ -879,30 +936,29 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
/* Set up "regs" so we "return" to the signal handler. */
if (is_elf2_task()) {
- regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
- regs->gpr[12] = regs->nip;
+ regs->ctr = (unsigned long) ksig->ka.sa.sa_handler;
+ regs->gpr[12] = regs->ctr;
} else {
/* Handler is *really* a pointer to the function descriptor for
* the signal routine. The first entry in the function
* descriptor is the entry address of signal and the second
* entry is the TOC value we need to use.
*/
- func_descr_t __user *funct_desc_ptr =
- (func_descr_t __user *) ksig->ka.sa.sa_handler;
+ struct func_desc __user *ptr =
+ (struct func_desc __user *)ksig->ka.sa.sa_handler;
- err |= get_user(regs->nip, &funct_desc_ptr->entry);
- err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
+ err |= get_user(regs->ctr, &ptr->addr);
+ err |= get_user(regs->gpr[2], &ptr->toc);
}
/* enter the signal handler in native-endian mode */
- regs->msr &= ~MSR_LE;
- regs->msr |= (MSR_KERNEL & MSR_LE);
+ regs_set_return_msr(regs, (regs->msr & ~MSR_LE) | (MSR_KERNEL & MSR_LE));
regs->gpr[1] = newsp;
regs->gpr[3] = ksig->sig;
regs->result = 0;
if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
- err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo);
- err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc);
+ regs->gpr[4] = (unsigned long)&frame->info;
+ regs->gpr[5] = (unsigned long)&frame->uc;
regs->gpr[6] = (unsigned long) frame;
} else {
regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
@@ -912,11 +968,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
return 0;
+badframe_block:
+ user_write_access_end();
badframe:
- if (show_unhandled_signals)
- printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
- tsk->comm, tsk->pid, "setup_rt_frame",
- (long)frame, regs->nip, regs->link);
+ signal_fault(current, regs, "handle_rt_signal64", frame);
return 1;
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ea6adbf6a221..0da6e59161cd 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -33,6 +33,9 @@
#include <linux/processor.h>
#include <linux/random.h>
#include <linux/stackprotector.h>
+#include <linux/pgtable.h>
+#include <linux/clockchips.h>
+#include <linux/kexec.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -41,8 +44,6 @@
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/time.h>
#include <asm/machdep.h>
@@ -55,10 +56,10 @@
#endif
#include <asm/vdso.h>
#include <asm/debug.h>
-#include <asm/kexec.h>
-#include <asm/asm-prototypes.h>
#include <asm/cpu_has_feature.h>
#include <asm/ftrace.h>
+#include <asm/kup.h>
+#include <asm/fadump.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -74,19 +75,33 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 };
struct task_struct *secondary_current;
bool has_big_cores;
+bool coregroup_enabled;
+bool thread_group_shares_l2;
+bool thread_group_shares_l3;
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
+static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
EXPORT_SYMBOL_GPL(has_big_cores);
+enum {
+#ifdef CONFIG_SCHED_SMT
+ smt_idx,
+#endif
+ cache_idx,
+ mc_idx,
+ die_idx,
+};
+
#define MAX_THREAD_LIST_SIZE 8
#define THREAD_GROUP_SHARE_L1 1
+#define THREAD_GROUP_SHARE_L2_L3 2
struct thread_groups {
unsigned int property;
unsigned int nr_groups;
@@ -94,11 +109,33 @@ struct thread_groups {
unsigned int thread_list[MAX_THREAD_LIST_SIZE];
};
+/* Maximum number of properties that groups of threads within a core can share */
+#define MAX_THREAD_GROUP_PROPERTIES 2
+
+struct thread_groups_list {
+ unsigned int nr_properties;
+ struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES];
+};
+
+static struct thread_groups_list tgl[NR_CPUS] __initdata;
/*
- * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
+ * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to
* the set its siblings that share the L1-cache.
*/
-DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
+DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map);
+
+/*
+ * On some big-cores system, thread_group_l2_cache_map for each CPU
+ * corresponds to the set its siblings within the core that share the
+ * L2-cache.
+ */
+DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map);
+
+/*
+ * On P10, thread_group_l3_cache_map for each CPU is equal to the
+ * thread_group_l2_cache_map
+ */
+DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
@@ -374,32 +411,32 @@ static struct cpumask nmi_ipi_pending_mask;
static bool nmi_ipi_busy = false;
static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
-static void nmi_ipi_lock_start(unsigned long *flags)
+noinstr static void nmi_ipi_lock_start(unsigned long *flags)
{
raw_local_irq_save(*flags);
hard_irq_disable();
- while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
+ while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
raw_local_irq_restore(*flags);
- spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
+ spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
raw_local_irq_save(*flags);
hard_irq_disable();
}
}
-static void nmi_ipi_lock(void)
+noinstr static void nmi_ipi_lock(void)
{
- while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
- spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
+ while (arch_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
+ spin_until_cond(arch_atomic_read(&__nmi_ipi_lock) == 0);
}
-static void nmi_ipi_unlock(void)
+noinstr static void nmi_ipi_unlock(void)
{
smp_mb();
- WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
- atomic_set(&__nmi_ipi_lock, 0);
+ WARN_ON(arch_atomic_read(&__nmi_ipi_lock) != 1);
+ arch_atomic_set(&__nmi_ipi_lock, 0);
}
-static void nmi_ipi_unlock_end(unsigned long *flags)
+noinstr static void nmi_ipi_unlock_end(unsigned long *flags)
{
nmi_ipi_unlock();
raw_local_irq_restore(*flags);
@@ -408,7 +445,7 @@ static void nmi_ipi_unlock_end(unsigned long *flags)
/*
* Platform NMI handler calls this to ack
*/
-int smp_handle_nmi_ipi(struct pt_regs *regs)
+noinstr int smp_handle_nmi_ipi(struct pt_regs *regs)
{
void (*fn)(struct pt_regs *) = NULL;
unsigned long flags;
@@ -546,7 +583,7 @@ void tick_broadcast(const struct cpumask *mask)
#endif
#ifdef CONFIG_DEBUGGER
-void debugger_ipi_callback(struct pt_regs *regs)
+static void debugger_ipi_callback(struct pt_regs *regs)
{
debugger_ipi(regs);
}
@@ -582,12 +619,42 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
}
#endif
+void crash_smp_send_stop(void)
+{
+ static bool stopped = false;
+
+ /*
+ * In case of fadump, register data for all CPUs is captured by f/w
+ * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
+ * this rtas call to avoid tricky post processing of those CPUs'
+ * backtraces.
+ */
+ if (should_fadump_crash())
+ return;
+
+ if (stopped)
+ return;
+
+ stopped = true;
+
+#ifdef CONFIG_KEXEC_CORE
+ if (kexec_crash_image) {
+ crash_kexec_prepare();
+ return;
+ }
+#endif
+
+ smp_send_stop();
+}
+
#ifdef CONFIG_NMI_IPI
static void nmi_stop_this_cpu(struct pt_regs *regs)
{
/*
* IRQs are already hard disabled by the smp_handle_nmi_ipi.
*/
+ set_cpu_online(smp_processor_id(), false);
+
spin_begin();
while (1)
spin_cpu_relax();
@@ -603,6 +670,15 @@ void smp_send_stop(void)
static void stop_this_cpu(void *dummy)
{
hard_irq_disable();
+
+ /*
+ * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
+ * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
+ * to know other CPUs are offline before it breaks locks to flush
+ * printk buffers, in case we panic()ed while holding the lock.
+ */
+ set_cpu_online(smp_processor_id(), false);
+
spin_begin();
while (1)
spin_cpu_relax();
@@ -627,12 +703,12 @@ void smp_send_stop(void)
}
#endif /* CONFIG_NMI_IPI */
-struct task_struct *current_set[NR_CPUS];
+static struct task_struct *current_set[NR_CPUS];
static void smp_store_cpu_info(int id)
{
per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
per_cpu(next_tlbcam_idx, id)
= (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif
@@ -659,83 +735,124 @@ static void set_cpus_unrelated(int i, int j,
#endif
/*
+ * Extends set_cpus_related. Instead of setting one CPU at a time in
+ * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask.
+ */
+static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int),
+ struct cpumask *(*dstmask)(int))
+{
+ struct cpumask *mask;
+ int k;
+
+ mask = srcmask(j);
+ for_each_cpu(k, srcmask(i))
+ cpumask_or(dstmask(k), dstmask(k), mask);
+
+ if (i == j)
+ return;
+
+ mask = srcmask(i);
+ for_each_cpu(k, srcmask(j))
+ cpumask_or(dstmask(k), dstmask(k), mask);
+}
+
+/*
* parse_thread_groups: Parses the "ibm,thread-groups" device tree
* property for the CPU device node @dn and stores
- * the parsed output in the thread_groups
- * structure @tg if the ibm,thread-groups[0]
- * matches @property.
+ * the parsed output in the thread_groups_list
+ * structure @tglp.
*
* @dn: The device node of the CPU device.
- * @tg: Pointer to a thread group structure into which the parsed
+ * @tglp: Pointer to a thread group list structure into which the parsed
* output of "ibm,thread-groups" is stored.
- * @property: The property of the thread-group that the caller is
- * interested in.
*
* ibm,thread-groups[0..N-1] array defines which group of threads in
* the CPU-device node can be grouped together based on the property.
*
- * ibm,thread-groups[0] tells us the property based on which the
+ * This array can represent thread groupings for multiple properties.
+ *
+ * ibm,thread-groups[i + 0] tells us the property based on which the
* threads are being grouped together. If this value is 1, it implies
- * that the threads in the same group share L1, translation cache.
+ * that the threads in the same group share L1, translation cache. If
+ * the value is 2, it implies that the threads in the same group share
+ * the same L2 cache.
*
- * ibm,thread-groups[1] tells us how many such thread groups exist.
+ * ibm,thread-groups[i+1] tells us how many such thread groups exist for the
+ * property ibm,thread-groups[i]
*
- * ibm,thread-groups[2] tells us the number of threads in each such
+ * ibm,thread-groups[i+2] tells us the number of threads in each such
* group.
+ * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then,
*
- * ibm,thread-groups[3..N-1] is the list of threads identified by
+ * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by
* "ibm,ppc-interrupt-server#s" arranged as per their membership in
* the grouping.
*
- * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
- * implies that there are 2 groups of 4 threads each, where each group
- * of threads share L1, translation cache.
+ * Example:
+ * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15]
+ * This can be decomposed up into two consecutive arrays:
+ * a) [1,2,4,8,10,12,14,9,11,13,15]
+ * b) [2,2,4,8,10,12,14,9,11,13,15]
*
- * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
- * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
- * 11, 12} structure
+ * where in,
+ *
+ * a) provides information of Property "1" being shared by "2" groups,
+ * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of
+ * the first group is {8,10,12,14} and the
+ * "ibm,ppc-interrupt-server#s" of the second group is
+ * {9,11,13,15}. Property "1" is indicative of the thread in the
+ * group sharing L1 cache, translation cache and Instruction Data
+ * flow.
+ *
+ * b) provides information of Property "2" being shared by "2" groups,
+ * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of
+ * the first group is {8,10,12,14} and the
+ * "ibm,ppc-interrupt-server#s" of the second group is
+ * {9,11,13,15}. Property "2" indicates that the threads in each
+ * group share the L2-cache.
*
* Returns 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*/
static int parse_thread_groups(struct device_node *dn,
- struct thread_groups *tg,
- unsigned int property)
+ struct thread_groups_list *tglp)
{
- int i;
- u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
- u32 *thread_list;
+ unsigned int property_idx = 0;
+ u32 *thread_group_array;
size_t total_threads;
- int ret;
+ int ret = 0, count;
+ u32 *thread_list;
+ int i = 0;
+ count = of_property_count_u32_elems(dn, "ibm,thread-groups");
+ thread_group_array = kcalloc(count, sizeof(u32), GFP_KERNEL);
ret = of_property_read_u32_array(dn, "ibm,thread-groups",
- thread_group_array, 3);
+ thread_group_array, count);
if (ret)
- return ret;
-
- tg->property = thread_group_array[0];
- tg->nr_groups = thread_group_array[1];
- tg->threads_per_group = thread_group_array[2];
- if (tg->property != property ||
- tg->nr_groups < 1 ||
- tg->threads_per_group < 1)
- return -ENODATA;
+ goto out_free;
- total_threads = tg->nr_groups * tg->threads_per_group;
+ while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) {
+ int j;
+ struct thread_groups *tg = &tglp->property_tgs[property_idx++];
- ret = of_property_read_u32_array(dn, "ibm,thread-groups",
- thread_group_array,
- 3 + total_threads);
- if (ret)
- return ret;
+ tg->property = thread_group_array[i];
+ tg->nr_groups = thread_group_array[i + 1];
+ tg->threads_per_group = thread_group_array[i + 2];
+ total_threads = tg->nr_groups * tg->threads_per_group;
- thread_list = &thread_group_array[3];
+ thread_list = &thread_group_array[i + 3];
- for (i = 0 ; i < total_threads; i++)
- tg->thread_list[i] = thread_list[i];
+ for (j = 0; j < total_threads; j++)
+ tg->thread_list[j] = thread_list[j];
+ i = i + 3 + total_threads;
+ }
- return 0;
+ tglp->nr_properties = property_idx;
+
+out_free:
+ kfree(thread_group_array);
+ return ret;
}
/*
@@ -746,7 +863,7 @@ static int parse_thread_groups(struct device_node *dn,
* @tg : The thread-group structure of the CPU node which @cpu belongs
* to.
*
- * Returns the index to tg->thread_list that points to the the start
+ * Returns the index to tg->thread_list that points to the start
* of the thread_group that @cpu belongs to.
*
* Returns -1 if cpu doesn't belong to any of the groups pointed to by
@@ -771,59 +888,176 @@ static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
return -1;
}
-static int init_cpu_l1_cache_map(int cpu)
-
+static struct thread_groups *__init get_thread_groups(int cpu,
+ int group_property,
+ int *err)
{
struct device_node *dn = of_get_cpu_node(cpu, NULL);
- struct thread_groups tg = {.property = 0,
- .nr_groups = 0,
- .threads_per_group = 0};
- int first_thread = cpu_first_thread_sibling(cpu);
- int i, cpu_group_start = -1, err = 0;
+ struct thread_groups_list *cpu_tgl = &tgl[cpu];
+ struct thread_groups *tg = NULL;
+ int i;
+ *err = 0;
- if (!dn)
- return -ENODATA;
+ if (!dn) {
+ *err = -ENODATA;
+ return NULL;
+ }
- err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
- if (err)
- goto out;
+ if (!cpu_tgl->nr_properties) {
+ *err = parse_thread_groups(dn, cpu_tgl);
+ if (*err)
+ goto out;
+ }
+
+ for (i = 0; i < cpu_tgl->nr_properties; i++) {
+ if (cpu_tgl->property_tgs[i].property == group_property) {
+ tg = &cpu_tgl->property_tgs[i];
+ break;
+ }
+ }
- zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
- GFP_KERNEL,
- cpu_to_node(cpu));
+ if (!tg)
+ *err = -EINVAL;
+out:
+ of_node_put(dn);
+ return tg;
+}
- cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
+static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg,
+ int cpu, int cpu_group_start)
+{
+ int first_thread = cpu_first_thread_sibling(cpu);
+ int i;
- if (unlikely(cpu_group_start == -1)) {
- WARN_ON_ONCE(1);
- err = -ENODATA;
- goto out;
- }
+ zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu));
for (i = first_thread; i < first_thread + threads_per_core; i++) {
- int i_group_start = get_cpu_thread_group_start(i, &tg);
+ int i_group_start = get_cpu_thread_group_start(i, tg);
if (unlikely(i_group_start == -1)) {
WARN_ON_ONCE(1);
- err = -ENODATA;
- goto out;
+ return -ENODATA;
}
if (i_group_start == cpu_group_start)
- cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
+ cpumask_set_cpu(i, *mask);
}
-out:
- of_node_put(dn);
- return err;
+ return 0;
+}
+
+static int __init init_thread_group_cache_map(int cpu, int cache_property)
+
+{
+ int cpu_group_start = -1, err = 0;
+ struct thread_groups *tg = NULL;
+ cpumask_var_t *mask = NULL;
+
+ if (cache_property != THREAD_GROUP_SHARE_L1 &&
+ cache_property != THREAD_GROUP_SHARE_L2_L3)
+ return -EINVAL;
+
+ tg = get_thread_groups(cpu, cache_property, &err);
+
+ if (!tg)
+ return err;
+
+ cpu_group_start = get_cpu_thread_group_start(cpu, tg);
+
+ if (unlikely(cpu_group_start == -1)) {
+ WARN_ON_ONCE(1);
+ return -ENODATA;
+ }
+
+ if (cache_property == THREAD_GROUP_SHARE_L1) {
+ mask = &per_cpu(thread_group_l1_cache_map, cpu);
+ update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
+ }
+ else if (cache_property == THREAD_GROUP_SHARE_L2_L3) {
+ mask = &per_cpu(thread_group_l2_cache_map, cpu);
+ update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
+ mask = &per_cpu(thread_group_l3_cache_map, cpu);
+ update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start);
+ }
+
+
+ return 0;
+}
+
+static bool shared_caches;
+
+#ifdef CONFIG_SCHED_SMT
+/* cpumask of CPUs with asymmetric SMT dependency */
+static int powerpc_smt_flags(void)
+{
+ int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
+
+ if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
+ printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
+ flags |= SD_ASYM_PACKING;
+ }
+ return flags;
+}
+#endif
+
+/*
+ * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
+ * This topology makes it *much* cheaper to migrate tasks between adjacent cores
+ * since the migrated task remains cache hot. We want to take advantage of this
+ * at the scheduler level so an extra topology level is required.
+ */
+static int powerpc_shared_cache_flags(void)
+{
+ return SD_SHARE_PKG_RESOURCES;
}
-static int init_big_cores(void)
+/*
+ * We can't just pass cpu_l2_cache_mask() directly because
+ * returns a non-const pointer and the compiler barfs on that.
+ */
+static const struct cpumask *shared_cache_mask(int cpu)
+{
+ return per_cpu(cpu_l2_cache_map, cpu);
+}
+
+#ifdef CONFIG_SCHED_SMT
+static const struct cpumask *smallcore_smt_mask(int cpu)
+{
+ return cpu_smallcore_mask(cpu);
+}
+#endif
+
+static struct cpumask *cpu_coregroup_mask(int cpu)
+{
+ return per_cpu(cpu_coregroup_map, cpu);
+}
+
+static bool has_coregroup_support(void)
+{
+ return coregroup_enabled;
+}
+
+static const struct cpumask *cpu_mc_mask(int cpu)
+{
+ return cpu_coregroup_mask(cpu);
+}
+
+static struct sched_domain_topology_level powerpc_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+ { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+ { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
+ { cpu_mc_mask, SD_INIT_NAME(MC) },
+ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { NULL, },
+};
+
+static int __init init_big_cores(void)
{
int cpu;
for_each_possible_cpu(cpu) {
- int err = init_cpu_l1_cache_map(cpu);
+ int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1);
if (err)
return err;
@@ -834,6 +1068,18 @@ static int init_big_cores(void)
}
has_big_cores = true;
+
+ for_each_possible_cpu(cpu) {
+ int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3);
+
+ if (err)
+ return err;
+ }
+
+ thread_group_shares_l2 = true;
+ thread_group_shares_l3 = true;
+ pr_debug("L2/L3 cache only shared by the threads in the small core\n");
+
return 0;
}
@@ -844,7 +1090,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
DBG("smp_prepare_cpus\n");
/*
- * setup_cpu may need to be called on the boot cpu. We havent
+ * setup_cpu may need to be called on the boot cpu. We haven't
* spun any cpus up but lets be paranoid.
*/
BUG_ON(boot_cpuid != smp_processor_id());
@@ -860,6 +1106,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
GFP_KERNEL, cpu_to_node(cpu));
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
+ if (has_coregroup_support())
+ zalloc_cpumask_var_node(&per_cpu(cpu_coregroup_map, cpu),
+ GFP_KERNEL, cpu_to_node(cpu));
+
+#ifdef CONFIG_NUMA
/*
* numa_node_id() works after this.
*/
@@ -868,6 +1119,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
set_cpu_numa_mem(cpu,
local_memory_node(numa_cpu_lookup_table[cpu]));
}
+#endif
}
/* Init the cpumasks so the boot CPU is related to itself */
@@ -875,12 +1127,29 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
+ if (has_coregroup_support())
+ cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
+
init_big_cores();
if (has_big_cores) {
cpumask_set_cpu(boot_cpuid,
cpu_smallcore_mask(boot_cpuid));
}
+ if (cpu_to_chip_id(boot_cpuid) != -1) {
+ int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
+
+ /*
+ * All threads of a core will all belong to the same core,
+ * chip_id_lookup_table will have one entry per core.
+ * Assumption: if boot_cpuid doesn't have a chip-id, then no
+ * other CPUs, will also not have chip-id.
+ */
+ chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL);
+ if (chip_id_lookup_table)
+ memset(chip_id_lookup_table, -1, sizeof(int) * idx);
+ }
+
if (smp_ops && smp_ops->probe)
smp_ops->probe();
}
@@ -982,13 +1251,18 @@ static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) +
THREAD_SIZE - STACK_FRAME_OVERHEAD;
#endif
- idle->cpu = cpu;
+ task_thread_info(idle)->cpu = cpu;
secondary_current = current_set[cpu] = idle;
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- int rc, c;
+ const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC;
+ const bool booting = system_state < SYSTEM_RUNNING;
+ const unsigned long hp_spin_ms = 1;
+ unsigned long deadline;
+ int rc;
+ const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms;
/*
* Don't allow secondary threads to come online if inhibited
@@ -1033,22 +1307,23 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
}
/*
- * wait to see if the cpu made a callin (is actually up).
- * use this value that I found through experimentation.
- * -- Cort
+ * At boot time, simply spin on the callin word until the
+ * deadline passes.
+ *
+ * At run time, spin for an optimistic amount of time to avoid
+ * sleeping in the common case.
*/
- if (system_state < SYSTEM_RUNNING)
- for (c = 50000; c && !cpu_callin_map[cpu]; c--)
- udelay(100);
-#ifdef CONFIG_HOTPLUG_CPU
- else
- /*
- * CPUs can take much longer to come up in the
- * hotplug case. Wait five seconds.
- */
- for (c = 5000; c && !cpu_callin_map[cpu]; c--)
- msleep(1);
-#endif
+ deadline = jiffies + msecs_to_jiffies(spin_wait_ms);
+ spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline));
+
+ if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) {
+ const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC;
+ const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC;
+
+ deadline = jiffies + msecs_to_jiffies(sleep_wait_ms);
+ while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline))
+ fsleep(sleep_interval_us);
+ }
if (!cpu_callin_map[cpu]) {
printk(KERN_ERR "Processor %u is stuck.\n", cpu);
@@ -1072,18 +1347,13 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
int cpu_to_core_id(int cpu)
{
struct device_node *np;
- const __be32 *reg;
int id = -1;
np = of_get_cpu_node(cpu, NULL);
if (!np)
goto out;
- reg = of_get_property(np, "reg", NULL);
- if (!reg)
- goto out;
-
- id = be32_to_cpup(reg);
+ id = of_get_cpu_hwid(np, 0);
out:
of_node_put(np);
return id;
@@ -1125,26 +1395,68 @@ static struct device_node *cpu_to_l2cache(int cpu)
return cache;
}
-static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
+static bool update_mask_by_l2(int cpu, cpumask_var_t *mask)
{
+ struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
struct device_node *l2_cache, *np;
int i;
+ if (has_big_cores)
+ submask_fn = cpu_smallcore_mask;
+
+ /*
+ * If the threads in a thread-group share L2 cache, then the
+ * L2-mask can be obtained from thread_group_l2_cache_map.
+ */
+ if (thread_group_shares_l2) {
+ cpumask_set_cpu(cpu, cpu_l2_cache_mask(cpu));
+
+ for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) {
+ if (cpu_online(i))
+ set_cpus_related(i, cpu, cpu_l2_cache_mask);
+ }
+
+ /* Verify that L1-cache siblings are a subset of L2 cache-siblings */
+ if (!cpumask_equal(submask_fn(cpu), cpu_l2_cache_mask(cpu)) &&
+ !cpumask_subset(submask_fn(cpu), cpu_l2_cache_mask(cpu))) {
+ pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n",
+ cpu);
+ }
+
+ return true;
+ }
+
l2_cache = cpu_to_l2cache(cpu);
- if (!l2_cache)
+ if (!l2_cache || !*mask) {
+ /* Assume only core siblings share cache with this CPU */
+ for_each_cpu(i, cpu_sibling_mask(cpu))
+ set_cpus_related(cpu, i, cpu_l2_cache_mask);
+
return false;
+ }
+
+ cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
+
+ /* Update l2-cache mask with all the CPUs that are part of submask */
+ or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask);
- for_each_cpu(i, cpu_online_mask) {
+ /* Skip all CPUs already part of current CPU l2-cache mask */
+ cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(cpu));
+
+ for_each_cpu(i, *mask) {
/*
* when updating the marks the current CPU has not been marked
* online, but we need to update the cache masks
*/
np = cpu_to_l2cache(i);
- if (!np)
- continue;
- if (np == l2_cache)
- set_cpus_related(cpu, i, mask_fn);
+ /* Skip all CPUs already part of current CPU l2-cache */
+ if (np == l2_cache) {
+ or_cpumasks_related(cpu, i, submask_fn, cpu_l2_cache_mask);
+ cpumask_andnot(*mask, *mask, submask_fn(i));
+ } else {
+ cpumask_andnot(*mask, *mask, cpu_l2_cache_mask(i));
+ }
of_node_put(np);
}
@@ -1156,89 +1468,155 @@ static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
#ifdef CONFIG_HOTPLUG_CPU
static void remove_cpu_from_masks(int cpu)
{
+ struct cpumask *(*mask_fn)(int) = cpu_sibling_mask;
int i;
- /* NB: cpu_core_mask is a superset of the others */
- for_each_cpu(i, cpu_core_mask(cpu)) {
- set_cpus_unrelated(cpu, i, cpu_core_mask);
+ unmap_cpu_from_node(cpu);
+
+ if (shared_caches)
+ mask_fn = cpu_l2_cache_mask;
+
+ for_each_cpu(i, mask_fn(cpu)) {
set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
set_cpus_unrelated(cpu, i, cpu_sibling_mask);
if (has_big_cores)
set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
}
+
+ for_each_cpu(i, cpu_core_mask(cpu))
+ set_cpus_unrelated(cpu, i, cpu_core_mask);
+
+ if (has_coregroup_support()) {
+ for_each_cpu(i, cpu_coregroup_mask(cpu))
+ set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
+ }
}
#endif
static inline void add_cpu_to_smallcore_masks(int cpu)
{
- struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
- int i, first_thread = cpu_first_thread_sibling(cpu);
+ int i;
if (!has_big_cores)
return;
cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
- for (i = first_thread; i < first_thread + threads_per_core; i++) {
- if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
+ for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) {
+ if (cpu_online(i))
set_cpus_related(i, cpu, cpu_smallcore_mask);
}
}
+static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
+{
+ struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
+ int coregroup_id = cpu_to_coregroup_id(cpu);
+ int i;
+
+ if (shared_caches)
+ submask_fn = cpu_l2_cache_mask;
+
+ if (!*mask) {
+ /* Assume only siblings are part of this CPU's coregroup */
+ for_each_cpu(i, submask_fn(cpu))
+ set_cpus_related(cpu, i, cpu_coregroup_mask);
+
+ return;
+ }
+
+ cpumask_and(*mask, cpu_online_mask, cpu_cpu_mask(cpu));
+
+ /* Update coregroup mask with all the CPUs that are part of submask */
+ or_cpumasks_related(cpu, cpu, submask_fn, cpu_coregroup_mask);
+
+ /* Skip all CPUs already part of coregroup mask */
+ cpumask_andnot(*mask, *mask, cpu_coregroup_mask(cpu));
+
+ for_each_cpu(i, *mask) {
+ /* Skip all CPUs not part of this coregroup */
+ if (coregroup_id == cpu_to_coregroup_id(i)) {
+ or_cpumasks_related(cpu, i, submask_fn, cpu_coregroup_mask);
+ cpumask_andnot(*mask, *mask, submask_fn(i));
+ } else {
+ cpumask_andnot(*mask, *mask, cpu_coregroup_mask(i));
+ }
+ }
+}
+
static void add_cpu_to_masks(int cpu)
{
+ struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
int first_thread = cpu_first_thread_sibling(cpu);
- int chipid = cpu_to_chip_id(cpu);
+ cpumask_var_t mask;
+ int chip_id = -1;
+ bool ret;
int i;
/*
* This CPU will not be in the online mask yet so we need to manually
* add it to it's own thread sibling mask.
*/
+ map_cpu_to_node(cpu, cpu_to_node(cpu));
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
+ cpumask_set_cpu(cpu, cpu_core_mask(cpu));
for (i = first_thread; i < first_thread + threads_per_core; i++)
if (cpu_online(i))
set_cpus_related(i, cpu, cpu_sibling_mask);
add_cpu_to_smallcore_masks(cpu);
- /*
- * Copy the thread sibling mask into the cache sibling mask
- * and mark any CPUs that share an L2 with this CPU.
- */
- for_each_cpu(i, cpu_sibling_mask(cpu))
- set_cpus_related(cpu, i, cpu_l2_cache_mask);
- update_mask_by_l2(cpu, cpu_l2_cache_mask);
- /*
- * Copy the cache sibling mask into core sibling mask and mark
- * any CPUs on the same chip as this CPU.
- */
- for_each_cpu(i, cpu_l2_cache_mask(cpu))
- set_cpus_related(cpu, i, cpu_core_mask);
+ /* In CPU-hotplug path, hence use GFP_ATOMIC */
+ ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
+ update_mask_by_l2(cpu, &mask);
- if (chipid == -1)
- return;
+ if (has_coregroup_support())
+ update_coregroup_mask(cpu, &mask);
- for_each_cpu(i, cpu_online_mask)
- if (cpu_to_chip_id(i) == chipid)
- set_cpus_related(cpu, i, cpu_core_mask);
-}
+ if (chip_id_lookup_table && ret)
+ chip_id = cpu_to_chip_id(cpu);
-static bool shared_caches;
+ if (shared_caches)
+ submask_fn = cpu_l2_cache_mask;
+
+ /* Update core_mask with all the CPUs that are part of submask */
+ or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
+
+ /* Skip all CPUs already part of current CPU core mask */
+ cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
+
+ /* If chip_id is -1; limit the cpu_core_mask to within DIE*/
+ if (chip_id == -1)
+ cpumask_and(mask, mask, cpu_cpu_mask(cpu));
+
+ for_each_cpu(i, mask) {
+ if (chip_id == cpu_to_chip_id(i)) {
+ or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
+ cpumask_andnot(mask, mask, submask_fn(i));
+ } else {
+ cpumask_andnot(mask, mask, cpu_core_mask(i));
+ }
+ }
+
+ free_cpumask_var(mask);
+}
/* Activate a secondary processor. */
void start_secondary(void *unused)
{
- unsigned int cpu = smp_processor_id();
- struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
+ unsigned int cpu = raw_smp_processor_id();
+
+ /* PPC64 calls setup_kup() in early_setup_secondary() */
+ if (IS_ENABLED(CONFIG_PPC32))
+ setup_kup();
mmgrab(&init_mm);
current->active_mm = &init_mm;
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
- preempt_disable();
+ rcu_cpu_starting(cpu);
cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
@@ -1254,20 +1632,26 @@ void start_secondary(void *unused)
vdso_getcpu_init();
#endif
+ set_numa_node(numa_cpu_lookup_table[cpu]);
+ set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
+
/* Update topology CPU masks */
add_cpu_to_masks(cpu);
- if (has_big_cores)
- sibling_mask = cpu_smallcore_mask;
/*
* Check for any shared caches. Note that this must be done on a
* per-core basis because one core in the pair might be disabled.
*/
- if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
- shared_caches = true;
+ if (!shared_caches) {
+ struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
+ struct cpumask *mask = cpu_l2_cache_mask(cpu);
- set_numa_node(numa_cpu_lookup_table[cpu]);
- set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
+ if (has_big_cores)
+ sibling_mask = cpu_smallcore_mask;
+
+ if (cpumask_weight(mask) > cpumask_weight(sibling_mask(cpu)))
+ shared_caches = true;
+ }
smp_wmb();
notify_cpu_starting(cpu);
@@ -1285,68 +1669,44 @@ void start_secondary(void *unused)
BUG();
}
-int setup_profiling_timer(unsigned int multiplier)
+static void __init fixup_topology(void)
{
- return 0;
-}
+ int i;
#ifdef CONFIG_SCHED_SMT
-/* cpumask of CPUs with asymetric SMT dependancy */
-static int powerpc_smt_flags(void)
-{
- int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
-
- if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
- printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
- flags |= SD_ASYM_PACKING;
+ if (has_big_cores) {
+ pr_info("Big cores detected but using small core scheduling\n");
+ powerpc_topology[smt_idx].mask = smallcore_smt_mask;
}
- return flags;
-}
#endif
-static struct sched_domain_topology_level powerpc_topology[] = {
-#ifdef CONFIG_SCHED_SMT
- { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
-#endif
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
- { NULL, },
-};
+ if (!has_coregroup_support())
+ powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask;
-/*
- * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
- * This topology makes it *much* cheaper to migrate tasks between adjacent cores
- * since the migrated task remains cache hot. We want to take advantage of this
- * at the scheduler level so an extra topology level is required.
- */
-static int powerpc_shared_cache_flags(void)
-{
- return SD_SHARE_PKG_RESOURCES;
-}
+ /*
+ * Try to consolidate topology levels here instead of
+ * allowing scheduler to degenerate.
+ * - Dont consolidate if masks are different.
+ * - Dont consolidate if sd_flags exists and are different.
+ */
+ for (i = 1; i <= die_idx; i++) {
+ if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask)
+ continue;
-/*
- * We can't just pass cpu_l2_cache_mask() directly because
- * returns a non-const pointer and the compiler barfs on that.
- */
-static const struct cpumask *shared_cache_mask(int cpu)
-{
- return cpu_l2_cache_mask(cpu);
-}
+ if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags &&
+ powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags)
+ continue;
-#ifdef CONFIG_SCHED_SMT
-static const struct cpumask *smallcore_smt_mask(int cpu)
-{
- return cpu_smallcore_mask(cpu);
-}
-#endif
+ if (!powerpc_topology[i - 1].sd_flags)
+ powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags;
-static struct sched_domain_topology_level power9_topology[] = {
-#ifdef CONFIG_SCHED_SMT
- { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
+ powerpc_topology[i].mask = powerpc_topology[i + 1].mask;
+ powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags;
+#ifdef CONFIG_SCHED_DEBUG
+ powerpc_topology[i].name = powerpc_topology[i + 1].name;
#endif
- { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
- { cpu_cpu_mask, SD_INIT_NAME(DIE) },
- { NULL, },
-};
+ }
+}
void __init smp_cpus_done(unsigned int max_cpus)
{
@@ -1359,31 +1719,10 @@ void __init smp_cpus_done(unsigned int max_cpus)
if (smp_ops && smp_ops->bringup_done)
smp_ops->bringup_done();
- /*
- * On a shared LPAR, associativity needs to be requested.
- * Hence, get numa topology before dumping cpu topology
- */
- shared_proc_topology_init();
dump_numa_cpu_topology();
-#ifdef CONFIG_SCHED_SMT
- if (has_big_cores) {
- pr_info("Using small cores at SMT level\n");
- power9_topology[0].mask = smallcore_smt_mask;
- powerpc_topology[0].mask = smallcore_smt_mask;
- }
-#endif
- /*
- * If any CPU detects that it's sharing a cache with another CPU then
- * use the deeper topology that is aware of this sharing.
- */
- if (shared_caches) {
- pr_info("Using shared cache scheduler topology\n");
- set_sched_topology(power9_topology);
- } else {
- pr_info("Using standard scheduler topology\n");
- set_sched_topology(powerpc_topology);
- }
+ fixup_topology();
+ set_sched_topology(powerpc_topology);
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -1413,7 +1752,7 @@ void __cpu_die(unsigned int cpu)
smp_ops->cpu_die(cpu);
}
-void cpu_die(void)
+void arch_cpu_idle_dead(void)
{
/*
* Disable on the down path. This will be re-enabled by
@@ -1421,8 +1760,8 @@ void cpu_die(void)
*/
this_cpu_disable_ftrace();
- if (ppc_md.cpu_die)
- ppc_md.cpu_die();
+ if (smp_ops->cpu_offline_self)
+ smp_ops->cpu_offline_self();
/* If we return, we re-enter start_secondary */
start_secondary_resume();
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index e2a46cfed5fd..a2443d61728e 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -8,6 +8,7 @@
* Copyright 2018 Nick Piggin, Michael Ellerman, IBM Corp.
*/
+#include <linux/delay.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/module.h>
@@ -23,90 +24,56 @@
#include <asm/paca.h>
-/*
- * Save stack-backtrace addresses into a stack_trace buffer.
- */
-static void save_context_stack(struct stack_trace *trace, unsigned long sp,
- struct task_struct *tsk, int savesched)
+void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
+ unsigned long sp;
+
+ if (regs && !consume_entry(cookie, regs->nip))
+ return;
+
+ if (regs)
+ sp = regs->gpr[1];
+ else if (task == current)
+ sp = current_stack_frame();
+ else
+ sp = task->thread.ksp;
+
for (;;) {
unsigned long *stack = (unsigned long *) sp;
unsigned long newsp, ip;
- if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
+ if (!validate_sp(sp, task, STACK_FRAME_OVERHEAD))
return;
newsp = stack[0];
ip = stack[STACK_FRAME_LR_SAVE];
- if (savesched || !in_sched_functions(ip)) {
- if (!trace->skip)
- trace->entries[trace->nr_entries++] = ip;
- else
- trace->skip--;
- }
-
- if (trace->nr_entries >= trace->max_entries)
+ if (!consume_entry(cookie, ip))
return;
sp = newsp;
}
}
-void save_stack_trace(struct stack_trace *trace)
-{
- unsigned long sp;
-
- sp = current_stack_pointer();
-
- save_context_stack(trace, sp, current, 1);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace);
-
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
-{
- unsigned long sp;
-
- if (!try_get_task_stack(tsk))
- return;
-
- if (tsk == current)
- sp = current_stack_pointer();
- else
- sp = tsk->thread.ksp;
-
- save_context_stack(trace, sp, tsk, 0);
-
- put_task_stack(tsk);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-
-void
-save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
-{
- save_context_stack(trace, regs->gpr[1], current, 0);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace_regs);
-
-#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
/*
* This function returns an error if it detects any unreliable features of the
* stack. Otherwise it guarantees that the stack trace is reliable.
*
* If the task is not 'current', the caller *must* ensure the task is inactive.
*/
-static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
- struct stack_trace *trace)
+int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
+ void *cookie, struct task_struct *task)
{
unsigned long sp;
unsigned long newsp;
- unsigned long stack_page = (unsigned long)task_stack_page(tsk);
+ unsigned long stack_page = (unsigned long)task_stack_page(task);
unsigned long stack_end;
int graph_idx = 0;
bool firstframe;
stack_end = stack_page + THREAD_SIZE;
- if (!is_idle_task(tsk)) {
+ if (!is_idle_task(task)) {
/*
* For user tasks, this is the SP value loaded on
* kernel entry, see "PACAKSAVE(r13)" in _switch() and
@@ -130,10 +97,10 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
stack_end -= STACK_FRAME_OVERHEAD;
}
- if (tsk == current)
- sp = current_stack_pointer();
+ if (task == current)
+ sp = current_stack_frame();
else
- sp = tsk->thread.ksp;
+ sp = task->thread.ksp;
if (sp < stack_page + sizeof(struct thread_struct) ||
sp > stack_end - STACK_FRAME_MIN_SIZE) {
@@ -182,46 +149,22 @@ static int __save_stack_trace_tsk_reliable(struct task_struct *tsk,
* FIXME: IMHO these tests do not belong in
* arch-dependent code, they are generic.
*/
- ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, stack);
+ ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
#ifdef CONFIG_KPROBES
/*
* Mark stacktraces with kretprobed functions on them
* as unreliable.
*/
- if (ip == (unsigned long)kretprobe_trampoline)
+ if (ip == (unsigned long)__kretprobe_trampoline)
return -EINVAL;
#endif
- if (trace->nr_entries >= trace->max_entries)
- return -E2BIG;
- if (!trace->skip)
- trace->entries[trace->nr_entries++] = ip;
- else
- trace->skip--;
+ if (!consume_entry(cookie, ip))
+ return -EINVAL;
}
return 0;
}
-int save_stack_trace_tsk_reliable(struct task_struct *tsk,
- struct stack_trace *trace)
-{
- int ret;
-
- /*
- * If the task doesn't have a stack (e.g., a zombie), the stack is
- * "reliably" empty.
- */
- if (!try_get_task_stack(tsk))
- return 0;
-
- ret = __save_stack_trace_tsk_reliable(tsk, trace);
-
- put_task_stack(tsk);
-
- return ret;
-}
-#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
-
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
static void handle_backtrace_ipi(struct pt_regs *regs)
{
@@ -230,17 +173,31 @@ static void handle_backtrace_ipi(struct pt_regs *regs)
static void raise_backtrace_ipi(cpumask_t *mask)
{
+ struct paca_struct *p;
unsigned int cpu;
+ u64 delay_us;
for_each_cpu(cpu, mask) {
- if (cpu == smp_processor_id())
+ if (cpu == smp_processor_id()) {
handle_backtrace_ipi(NULL);
- else
- smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, 5 * USEC_PER_SEC);
- }
+ continue;
+ }
- for_each_cpu(cpu, mask) {
- struct paca_struct *p = paca_ptrs[cpu];
+ delay_us = 5 * USEC_PER_SEC;
+
+ if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
+ // Now wait up to 5s for the other CPU to do its backtrace
+ while (cpumask_test_cpu(cpu, mask) && delay_us) {
+ udelay(1);
+ delay_us--;
+ }
+
+ // Other CPU cleared itself from the mask
+ if (delay_us)
+ continue;
+ }
+
+ p = paca_ptrs[cpu];
cpumask_clear_cpu(cpu, mask);
@@ -260,7 +217,7 @@ static void raise_backtrace_ipi(cpumask_t *mask)
pr_cont(" current pointer corrupt? (%px)\n", p->__current);
pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
- show_stack(p->__current, (unsigned long *)p->saved_r1);
+ show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
}
}
diff --git a/arch/powerpc/kernel/static_call.c b/arch/powerpc/kernel/static_call.c
new file mode 100644
index 000000000000..863a7aa24650
--- /dev/null
+++ b/arch/powerpc/kernel/static_call.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/memory.h>
+#include <linux/static_call.h>
+
+#include <asm/code-patching.h>
+
+void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
+{
+ int err;
+ bool is_ret0 = (func == __static_call_return0);
+ unsigned long target = (unsigned long)(is_ret0 ? tramp + PPC_SCT_RET0 : func);
+ bool is_short = is_offset_in_branch_range((long)target - (long)tramp);
+
+ if (!tramp)
+ return;
+
+ mutex_lock(&text_mutex);
+
+ if (func && !is_short) {
+ err = patch_instruction(tramp + PPC_SCT_DATA, ppc_inst(target));
+ if (err)
+ goto out;
+ }
+
+ if (!func)
+ err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR()));
+ else if (is_short)
+ err = patch_branch(tramp, target, 0);
+ else
+ err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP()));
+out:
+ mutex_unlock(&text_mutex);
+
+ if (err)
+ panic("%s: patching failed %pS at %pS\n", __func__, func, tramp);
+}
+EXPORT_SYMBOL_GPL(arch_static_call_transform);
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index cbdf86228eaa..e0cbd63007f2 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -181,7 +181,7 @@ _GLOBAL(swsusp_arch_resume)
#ifdef CONFIG_ALTIVEC
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
sync
@@ -395,6 +395,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
li r3,0
blr
+_ASM_NOKPROBE_SYMBOL(swsusp_arch_resume)
/* FIXME:This construct is actually not useful since we don't shut
* down the instruction MMU, we could just flip back MSR-DR on.
@@ -406,4 +407,5 @@ turn_on_mmu:
sync
isync
rfi
+_ASM_NOKPROBE_SYMBOL(turn_on_mmu)
diff --git a/arch/powerpc/kernel/swsusp_64.c b/arch/powerpc/kernel/swsusp_64.c
index aeea97ad85cf..16ee3baaf09a 100644
--- a/arch/powerpc/kernel/swsusp_64.c
+++ b/arch/powerpc/kernel/swsusp_64.c
@@ -17,8 +17,3 @@ void do_after_copyback(void)
touch_softlockup_watchdog();
mb();
}
-
-void _iommu_save(void)
-{
- iommu_save();
-}
diff --git a/arch/powerpc/kernel/swsusp_booke.S b/arch/powerpc/kernel/swsusp_85xx.S
index 88cfdbd530f1..88cfdbd530f1 100644
--- a/arch/powerpc/kernel/swsusp_booke.S
+++ b/arch/powerpc/kernel/swsusp_85xx.S
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index 6d3189830dd3..f645652c2654 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -76,16 +76,10 @@
swsusp_save_area:
.space SL_SIZE
- .section ".toc","aw"
-swsusp_save_area_ptr:
- .tc swsusp_save_area[TC],swsusp_save_area
-restore_pblist_ptr:
- .tc restore_pblist[TC],restore_pblist
-
.section .text
.align 5
_GLOBAL(swsusp_arch_suspend)
- ld r11,swsusp_save_area_ptr@toc(r2)
+ LOAD_REG_ADDR(r11, swsusp_save_area)
SAVE_SPECIAL(LR)
SAVE_REGISTER(r1)
SAVE_SPECIAL(CR)
@@ -128,11 +122,10 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
* stack pointer on the stack like a real stackframe */
addi r1,r1,-128
- bl _iommu_save
bl swsusp_save
/* restore LR */
- ld r11,swsusp_save_area_ptr@toc(r2)
+ LOAD_REG_ADDR(r11, swsusp_save_area)
RESTORE_SPECIAL(LR)
addi r1,r1,128
@@ -142,11 +135,11 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
_GLOBAL(swsusp_arch_resume)
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync
- ld r12,restore_pblist_ptr@toc(r2)
+ LOAD_REG_ADDR(r11, restore_pblist)
ld r12,0(r12)
cmpdi r12,0
@@ -188,7 +181,7 @@ nothing_to_copy:
tlbia
#endif
- ld r11,swsusp_save_area_ptr@toc(r2)
+ LOAD_REG_ADDR(r11, swsusp_save_area)
RESTORE_SPECIAL(CR)
@@ -266,7 +259,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
bl do_after_copyback
addi r1,r1,128
- ld r11,swsusp_save_area_ptr@toc(r2)
+ LOAD_REG_ADDR(r11, swsusp_save_area)
RESTORE_SPECIAL(LR)
li r3, 0
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index d36c6391eaf5..d451a8229223 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -1,13 +1,23 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * sys_ppc32.c: Conversion between 32bit and 64bit native syscalls.
+ * sys_ppc32.c: 32-bit system calls with complex calling conventions.
*
* Copyright (C) 2001 IBM
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*
- * These routines maintain argument size conversion between 32bit and 64bit
- * environment.
+ * 32-bit system calls with 64-bit arguments pass those in register pairs.
+ * This must be specially dealt with on 64-bit kernels. The compat_arg_u64_dual
+ * in generic compat syscalls is not always usable because the register
+ * pairing is constrained depending on preceding arguments.
+ *
+ * An analogous problem exists on 32-bit kernels with ARCH_HAS_SYSCALL_WRAPPER,
+ * the defined system call functions take the pt_regs as an argument, and there
+ * is a mapping macro which maps registers to arguments
+ * (SC_POWERPC_REGS_TO_ARGS) which also does not deal with these 64-bit
+ * arguments.
+ *
+ * This file contains these system calls.
*/
#include <linux/kernel.h>
@@ -25,7 +35,6 @@
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
-#include <linux/mman.h>
#include <linux/in.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
@@ -48,68 +57,79 @@
#include <asm/syscalls.h>
#include <asm/switch_to.h>
-unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long pgoff)
-{
- /* This should remain 12 even if PAGE_SIZE changes */
- return sys_mmap(addr, len, prot, flags, fd, pgoff << 12);
-}
-
-/*
- * long long munging:
- * The 32 bit ABI passes long longs in an odd even register pair.
- */
-
-compat_ssize_t compat_sys_pread64(unsigned int fd, char __user *ubuf, compat_size_t count,
- u32 reg6, u32 poshi, u32 poslo)
-{
- return ksys_pread64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
-}
+#ifdef CONFIG_PPC32
+#define PPC32_SYSCALL_DEFINE4 SYSCALL_DEFINE4
+#define PPC32_SYSCALL_DEFINE5 SYSCALL_DEFINE5
+#define PPC32_SYSCALL_DEFINE6 SYSCALL_DEFINE6
+#else
+#define PPC32_SYSCALL_DEFINE4 COMPAT_SYSCALL_DEFINE4
+#define PPC32_SYSCALL_DEFINE5 COMPAT_SYSCALL_DEFINE5
+#define PPC32_SYSCALL_DEFINE6 COMPAT_SYSCALL_DEFINE6
+#endif
-compat_ssize_t compat_sys_pwrite64(unsigned int fd, const char __user *ubuf, compat_size_t count,
- u32 reg6, u32 poshi, u32 poslo)
+PPC32_SYSCALL_DEFINE6(ppc_pread64,
+ unsigned int, fd,
+ char __user *, ubuf, compat_size_t, count,
+ u32, reg6, u32, pos1, u32, pos2)
{
- return ksys_pwrite64(fd, ubuf, count, ((loff_t)poshi << 32) | poslo);
+ return ksys_pread64(fd, ubuf, count, merge_64(pos1, pos2));
}
-compat_ssize_t compat_sys_readahead(int fd, u32 r4, u32 offhi, u32 offlo, u32 count)
+PPC32_SYSCALL_DEFINE6(ppc_pwrite64,
+ unsigned int, fd,
+ const char __user *, ubuf, compat_size_t, count,
+ u32, reg6, u32, pos1, u32, pos2)
{
- return ksys_readahead(fd, ((loff_t)offhi << 32) | offlo, count);
+ return ksys_pwrite64(fd, ubuf, count, merge_64(pos1, pos2));
}
-asmlinkage int compat_sys_truncate64(const char __user * path, u32 reg4,
- unsigned long high, unsigned long low)
+PPC32_SYSCALL_DEFINE5(ppc_readahead,
+ int, fd, u32, r4,
+ u32, offset1, u32, offset2, u32, count)
{
- return ksys_truncate(path, (high << 32) | low);
+ return ksys_readahead(fd, merge_64(offset1, offset2), count);
}
-asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
- u32 lenhi, u32 lenlo)
+PPC32_SYSCALL_DEFINE4(ppc_truncate64,
+ const char __user *, path, u32, reg4,
+ unsigned long, len1, unsigned long, len2)
{
- return ksys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
- ((loff_t)lenhi << 32) | lenlo);
+ return ksys_truncate(path, merge_64(len1, len2));
}
-asmlinkage int compat_sys_ftruncate64(unsigned int fd, u32 reg4, unsigned long high,
- unsigned long low)
+PPC32_SYSCALL_DEFINE4(ppc_ftruncate64,
+ unsigned int, fd, u32, reg4,
+ unsigned long, len1, unsigned long, len2)
{
- return ksys_ftruncate(fd, (high << 32) | low);
+ return ksys_ftruncate(fd, merge_64(len1, len2));
}
-long ppc32_fadvise64(int fd, u32 unused, u32 offset_high, u32 offset_low,
- size_t len, int advice)
+PPC32_SYSCALL_DEFINE6(ppc32_fadvise64,
+ int, fd, u32, unused, u32, offset1, u32, offset2,
+ size_t, len, int, advice)
{
- return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low, len,
+ return ksys_fadvise64_64(fd, merge_64(offset1, offset2), len,
advice);
}
-asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags,
- unsigned offset_hi, unsigned offset_lo,
- unsigned nbytes_hi, unsigned nbytes_lo)
+PPC32_SYSCALL_DEFINE6(ppc_sync_file_range2,
+ int, fd, unsigned int, flags,
+ unsigned int, offset1, unsigned int, offset2,
+ unsigned int, nbytes1, unsigned int, nbytes2)
{
- loff_t offset = ((loff_t)offset_hi << 32) | offset_lo;
- loff_t nbytes = ((loff_t)nbytes_hi << 32) | nbytes_lo;
+ loff_t offset = merge_64(offset1, offset2);
+ loff_t nbytes = merge_64(nbytes1, nbytes2);
return ksys_sync_file_range(fd, offset, nbytes, flags);
}
+
+#ifdef CONFIG_PPC32
+SYSCALL_DEFINE6(ppc_fallocate,
+ int, fd, int, mode,
+ u32, offset1, u32, offset2, u32, len1, u32, len2)
+{
+ return ksys_fallocate(fd, mode,
+ merge_64(offset1, offset2),
+ merge_64(len1, len2));
+}
+#endif
diff --git a/arch/powerpc/kernel/syscall.c b/arch/powerpc/kernel/syscall.c
new file mode 100644
index 000000000000..18b9d325395f
--- /dev/null
+++ b/arch/powerpc/kernel/syscall.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/compat.h>
+#include <linux/context_tracking.h>
+#include <linux/randomize_kstack.h>
+
+#include <asm/interrupt.h>
+#include <asm/kup.h>
+#include <asm/syscall.h>
+#include <asm/time.h>
+#include <asm/tm.h>
+#include <asm/unistd.h>
+
+
+/* Has to run notrace because it is entered not completely "reconciled" */
+notrace long system_call_exception(struct pt_regs *regs, unsigned long r0)
+{
+ long ret;
+ syscall_fn f;
+
+ kuap_lock();
+
+ add_random_kstack_offset();
+
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(irq_soft_mask_return() != IRQS_ALL_DISABLED);
+
+ trace_hardirqs_off(); /* finish reconciling */
+
+ CT_WARN_ON(ct_state() == CONTEXT_KERNEL);
+ user_exit_irqoff();
+
+ BUG_ON(regs_is_unrecoverable(regs));
+ BUG_ON(!(regs->msr & MSR_PR));
+ BUG_ON(arch_irq_disabled_regs(regs));
+
+#ifdef CONFIG_PPC_PKEY
+ if (mmu_has_feature(MMU_FTR_PKEY)) {
+ unsigned long amr, iamr;
+ bool flush_needed = false;
+ /*
+ * When entering from userspace we mostly have the AMR/IAMR
+ * different from kernel default values. Hence don't compare.
+ */
+ amr = mfspr(SPRN_AMR);
+ iamr = mfspr(SPRN_IAMR);
+ regs->amr = amr;
+ regs->iamr = iamr;
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
+ mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
+ flush_needed = true;
+ }
+ if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
+ mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
+ flush_needed = true;
+ }
+ if (flush_needed)
+ isync();
+ } else
+#endif
+ kuap_assert_locked();
+
+ booke_restore_dbcr0();
+
+ account_cpu_user_entry();
+
+ account_stolen_time();
+
+ /*
+ * This is not required for the syscall exit path, but makes the
+ * stack frame look nicer. If this was initialised in the first stack
+ * frame, or if the unwinder was taught the first stack frame always
+ * returns to user with IRQS_ENABLED, this store could be avoided!
+ */
+ irq_soft_mask_regs_set_state(regs, IRQS_ENABLED);
+
+ /*
+ * If system call is called with TM active, set _TIF_RESTOREALL to
+ * prevent RFSCV being used to return to userspace, because POWER9
+ * TM implementation has problems with this instruction returning to
+ * transactional state. Final register values are not relevant because
+ * the transaction will be aborted upon return anyway. Or in the case
+ * of unsupported_scv SIGILL fault, the return state does not much
+ * matter because it's an edge case.
+ */
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ unlikely(MSR_TM_TRANSACTIONAL(regs->msr)))
+ set_bits(_TIF_RESTOREALL, &current_thread_info()->flags);
+
+ /*
+ * If the system call was made with a transaction active, doom it and
+ * return without performing the system call. Unless it was an
+ * unsupported scv vector, in which case it's treated like an illegal
+ * instruction.
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (unlikely(MSR_TM_TRANSACTIONAL(regs->msr)) &&
+ !trap_is_unsupported_scv(regs)) {
+ /* Enable TM in the kernel, and disable EE (for scv) */
+ hard_irq_disable();
+ mtmsr(mfmsr() | MSR_TM);
+
+ /* tabort, this dooms the transaction, nothing else */
+ asm volatile(".long 0x7c00071d | ((%0) << 16)"
+ :: "r"(TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT));
+
+ /*
+ * Userspace will never see the return value. Execution will
+ * resume after the tbegin. of the aborted transaction with the
+ * checkpointed register state. A context switch could occur
+ * or signal delivered to the process before resuming the
+ * doomed transaction context, but that should all be handled
+ * as expected.
+ */
+ return -ENOSYS;
+ }
+#endif // CONFIG_PPC_TRANSACTIONAL_MEM
+
+ local_irq_enable();
+
+ if (unlikely(read_thread_flags() & _TIF_SYSCALL_DOTRACE)) {
+ if (unlikely(trap_is_unsupported_scv(regs))) {
+ /* Unsupported scv vector */
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ return regs->gpr[3];
+ }
+ /*
+ * We use the return value of do_syscall_trace_enter() as the
+ * syscall number. If the syscall was rejected for any reason
+ * do_syscall_trace_enter() returns an invalid syscall number
+ * and the test against NR_syscalls will fail and the return
+ * value to be used is in regs->gpr[3].
+ */
+ r0 = do_syscall_trace_enter(regs);
+ if (unlikely(r0 >= NR_syscalls))
+ return regs->gpr[3];
+
+ } else if (unlikely(r0 >= NR_syscalls)) {
+ if (unlikely(trap_is_unsupported_scv(regs))) {
+ /* Unsupported scv vector */
+ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
+ return regs->gpr[3];
+ }
+ return -ENOSYS;
+ }
+
+ /* May be faster to do array_index_nospec? */
+ barrier_nospec();
+
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+ // No COMPAT if we have SYSCALL_WRAPPER, see Kconfig
+ f = (void *)sys_call_table[r0];
+ ret = f(regs);
+#else
+ if (unlikely(is_compat_task())) {
+ unsigned long r3, r4, r5, r6, r7, r8;
+
+ f = (void *)compat_sys_call_table[r0];
+
+ r3 = regs->gpr[3] & 0x00000000ffffffffULL;
+ r4 = regs->gpr[4] & 0x00000000ffffffffULL;
+ r5 = regs->gpr[5] & 0x00000000ffffffffULL;
+ r6 = regs->gpr[6] & 0x00000000ffffffffULL;
+ r7 = regs->gpr[7] & 0x00000000ffffffffULL;
+ r8 = regs->gpr[8] & 0x00000000ffffffffULL;
+
+ ret = f(r3, r4, r5, r6, r7, r8);
+ } else {
+ f = (void *)sys_call_table[r0];
+
+ ret = f(regs->gpr[3], regs->gpr[4], regs->gpr[5],
+ regs->gpr[6], regs->gpr[7], regs->gpr[8]);
+ }
+#endif
+
+ /*
+ * Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
+ * so the maximum stack offset is 1k bytes (10 bits).
+ *
+ * The actual entropy will be further reduced by the compiler when
+ * applying stack alignment constraints: the powerpc architecture
+ * may have two kinds of stack alignment (16-bytes and 8-bytes).
+ *
+ * So the resulting 6 or 7 bits of entropy is seen in SP[9:4] or SP[9:3].
+ */
+ choose_random_kstack_offset(mftb());
+
+ return ret;
+}
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 078608ec2e92..68ebb23a5af4 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -35,26 +35,18 @@
#include <asm/syscalls.h>
#include <asm/time.h>
#include <asm/unistd.h>
-#include <asm/asm-prototypes.h>
-static inline long do_mmap2(unsigned long addr, size_t len,
- unsigned long prot, unsigned long flags,
- unsigned long fd, unsigned long off, int shift)
+static long do_mmap2(unsigned long addr, size_t len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long off, int shift)
{
- long ret = -EINVAL;
-
if (!arch_validate_prot(prot, addr))
- goto out;
+ return -EINVAL;
- if (shift) {
- if (off & ((1 << shift) - 1))
- goto out;
- off >>= shift;
- }
+ if (!IS_ALIGNED(off, 1 << shift))
+ return -EINVAL;
- ret = ksys_mmap_pgoff(addr, len, prot, flags, fd, off);
-out:
- return ret;
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> shift);
}
SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len,
@@ -64,6 +56,16 @@ SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len,
return do_mmap2(addr, len, prot, flags, fd, pgoff, PAGE_SHIFT-12);
}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE6(mmap2,
+ unsigned long, addr, size_t, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, unsigned long, off_4k)
+{
+ return do_mmap2(addr, len, prot, flags, fd, off_4k, PAGE_SHIFT-12);
+}
+#endif
+
SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, off_t, offset)
@@ -71,58 +73,47 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
}
-#ifdef CONFIG_PPC32
-/*
- * Due to some executables calling the wrong select we sometimes
- * get wrong args. This determines how the args are being passed
- * (a single ptr to them all args passed) then calls
- * sys_select() with the appropriate args. -- Cort
- */
-int
-ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct __kernel_old_timeval __user *tvp)
-{
- if ( (unsigned long)n >= 4096 )
- {
- unsigned long __user *buffer = (unsigned long __user *)n;
- if (!access_ok(buffer, 5*sizeof(unsigned long))
- || __get_user(n, buffer)
- || __get_user(inp, ((fd_set __user * __user *)(buffer+1)))
- || __get_user(outp, ((fd_set __user * __user *)(buffer+2)))
- || __get_user(exp, ((fd_set __user * __user *)(buffer+3)))
- || __get_user(tvp, ((struct __kernel_old_timeval __user * __user *)(buffer+4))))
- return -EFAULT;
- }
- return sys_select(n, inp, outp, exp, tvp);
-}
-#endif
-
#ifdef CONFIG_PPC64
-long ppc64_personality(unsigned long personality)
+static long do_ppc64_personality(unsigned long personality)
{
long ret;
if (personality(current->personality) == PER_LINUX32
&& personality(personality) == PER_LINUX)
personality = (personality & ~PER_MASK) | PER_LINUX32;
- ret = sys_personality(personality);
+ ret = ksys_personality(personality);
if (personality(ret) == PER_LINUX32)
ret = (ret & ~PER_MASK) | PER_LINUX;
return ret;
}
-#endif
-long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
- u32 len_high, u32 len_low)
+SYSCALL_DEFINE1(ppc64_personality, unsigned long, personality)
+{
+ return do_ppc64_personality(personality);
+}
+
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE1(ppc64_personality, unsigned long, personality)
+{
+ return do_ppc64_personality(personality);
+}
+#endif /* CONFIG_COMPAT */
+#endif /* CONFIG_PPC64 */
+
+SYSCALL_DEFINE6(ppc_fadvise64_64,
+ int, fd, int, advice, u32, offset_high, u32, offset_low,
+ u32, len_high, u32, len_low)
{
- return ksys_fadvise64_64(fd, (u64)offset_high << 32 | offset_low,
- (u64)len_high << 32 | len_low, advice);
+ return ksys_fadvise64_64(fd, merge_64(offset_high, offset_low),
+ merge_64(len_high, len_low), advice);
}
SYSCALL_DEFINE0(switch_endian)
{
struct thread_info *ti;
- current->thread.regs->msr ^= MSR_LE;
+ regs_set_return_msr(current->thread.regs,
+ current->thread.regs->msr ^ MSR_LE);
/*
* Set TIF_RESTOREALL so that r3 isn't clobbered on return to
diff --git a/arch/powerpc/kernel/syscalls/Makefile b/arch/powerpc/kernel/syscalls/Makefile
index 27b48954808d..9d7bd81510b8 100644
--- a/arch/powerpc/kernel/syscalls/Makefile
+++ b/arch/powerpc/kernel/syscalls/Makefile
@@ -2,62 +2,47 @@
kapi := arch/$(SRCARCH)/include/generated/asm
uapi := arch/$(SRCARCH)/include/generated/uapi/asm
-_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
- $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+$(shell mkdir -p $(uapi) $(kapi))
-syscall := $(srctree)/$(src)/syscall.tbl
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
+syscall := $(src)/syscall.tbl
+syshdr := $(srctree)/scripts/syscallhdr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
quiet_cmd_syshdr = SYSHDR $@
- cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
- '$(syshdr_abis_$(basetarget))' \
- '$(syshdr_pfx_$(basetarget))' \
- '$(syshdr_offset_$(basetarget))'
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr --abis $(abis) $< $@
quiet_cmd_systbl = SYSTBL $@
- cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
- '$(systbl_abis_$(basetarget))' \
- '$(systbl_abi_$(basetarget))' \
- '$(systbl_offset_$(basetarget))'
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) --abis $(abis) $< $@
-syshdr_abis_unistd_32 := common,nospu,32
-$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+$(uapi)/unistd_32.h: abis := common,nospu,32
+$(uapi)/unistd_32.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-syshdr_abis_unistd_64 := common,nospu,64
-$(uapi)/unistd_64.h: $(syscall) $(syshdr)
+$(uapi)/unistd_64.h: abis := common,nospu,64
+$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE
$(call if_changed,syshdr)
-systbl_abis_syscall_table_32 := common,nospu,32
-systbl_abi_syscall_table_32 := 32
-$(kapi)/syscall_table_32.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_32.h: abis := common,nospu,32
+$(kapi)/syscall_table_32.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
-systbl_abis_syscall_table_64 := common,nospu,64
-systbl_abi_syscall_table_64 := 64
-$(kapi)/syscall_table_64.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_64.h: abis := common,nospu,64
+$(kapi)/syscall_table_64.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
-systbl_abis_syscall_table_c32 := common,nospu,32
-systbl_abi_syscall_table_c32 := c32
-$(kapi)/syscall_table_c32.h: $(syscall) $(systbl)
- $(call if_changed,systbl)
-
-systbl_abis_syscall_table_spu := common,spu
-systbl_abi_syscall_table_spu := spu
-$(kapi)/syscall_table_spu.h: $(syscall) $(systbl)
+$(kapi)/syscall_table_spu.h: abis := common,spu
+$(kapi)/syscall_table_spu.h: $(syscall) $(systbl) FORCE
$(call if_changed,systbl)
uapisyshdr-y += unistd_32.h unistd_64.h
kapisyshdr-y += syscall_table_32.h \
syscall_table_64.h \
- syscall_table_c32.h \
syscall_table_spu.h
-targets += $(uapisyshdr-y) $(kapisyshdr-y)
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+all: $(uapisyshdr-y) $(kapisyshdr-y)
@:
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 35b61bfc1b1a..a0be127475b1 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -9,7 +9,7 @@
#
0 nospu restart_syscall sys_restart_syscall
1 nospu exit sys_exit
-2 nospu fork ppc_fork
+2 nospu fork sys_fork
3 common read sys_read
4 common write sys_write
5 common open sys_open compat_sys_open
@@ -32,7 +32,7 @@
18 spu oldstat sys_ni_syscall
19 common lseek sys_lseek compat_sys_lseek
20 common getpid sys_getpid
-21 nospu mount sys_mount compat_sys_mount
+21 nospu mount sys_mount
22 32 umount sys_oldumount
22 64 umount sys_ni_syscall
22 spu umount sys_ni_syscall
@@ -110,7 +110,7 @@
79 common settimeofday sys_settimeofday compat_sys_settimeofday
80 common getgroups sys_getgroups
81 common setgroups sys_setgroups
-82 32 select ppc_select sys_ni_syscall
+82 32 select sys_old_select compat_sys_old_select
82 64 select sys_ni_syscall
82 spu select sys_ni_syscall
83 common symlink sys_symlink
@@ -158,7 +158,7 @@
119 32 sigreturn sys_sigreturn compat_sys_sigreturn
119 64 sigreturn sys_ni_syscall
119 spu sigreturn sys_ni_syscall
-120 nospu clone ppc_clone
+120 nospu clone sys_clone
121 common setdomainname sys_setdomainname
122 common uname sys_newuname
123 common modify_ldt sys_ni_syscall
@@ -176,11 +176,11 @@
131 nospu quotactl sys_quotactl
132 common getpgid sys_getpgid
133 common fchdir sys_fchdir
-134 common bdflush sys_bdflush
+134 common bdflush sys_ni_syscall
135 common sysfs sys_sysfs
-136 32 personality sys_personality ppc64_personality
-136 64 personality ppc64_personality
-136 spu personality ppc64_personality
+136 32 personality sys_personality compat_sys_ppc64_personality
+136 64 personality sys_ppc64_personality
+136 spu personality sys_ppc64_personality
137 common afs_syscall sys_ni_syscall
138 common setfsuid sys_setfsuid
139 common setfsgid sys_setfsgid
@@ -189,11 +189,11 @@
142 common _newselect sys_select compat_sys_select
143 common flock sys_flock
144 common msync sys_msync
-145 common readv sys_readv compat_sys_readv
-146 common writev sys_writev compat_sys_writev
+145 common readv sys_readv
+146 common writev sys_writev
147 common getsid sys_getsid
148 common fdatasync sys_fdatasync
-149 nospu _sysctl sys_sysctl compat_sys_sysctl
+149 nospu _sysctl sys_ni_syscall
150 common mlock sys_mlock
151 common munlock sys_munlock
152 common mlockall sys_mlockall
@@ -228,8 +228,10 @@
176 64 rt_sigtimedwait sys_rt_sigtimedwait
177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
-179 common pread64 sys_pread64 compat_sys_pread64
-180 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
+179 64 pread64 sys_pread64
+180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
+180 64 pwrite64 sys_pwrite64
181 common chown sys_chown
182 common getcwd sys_getcwd
183 common capget sys_capget
@@ -240,12 +242,13 @@
186 spu sendfile sys_sendfile64
187 common getpmsg sys_ni_syscall
188 common putpmsg sys_ni_syscall
-189 nospu vfork ppc_vfork
+189 nospu vfork sys_vfork
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
-191 common readahead sys_readahead compat_sys_readahead
+191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
+191 64 readahead sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
-193 32 truncate64 sys_truncate64 compat_sys_truncate64
-194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
+194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
195 32 stat64 sys_stat64
196 32 lstat64 sys_lstat64
197 32 fstat64 sys_fstat64
@@ -288,7 +291,8 @@
230 common io_submit sys_io_submit compat_sys_io_submit
231 common io_cancel sys_io_cancel
232 nospu set_tid_address sys_set_tid_address
-233 common fadvise64 sys_fadvise64 ppc32_fadvise64
+233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
+233 64 fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
236 common epoll_create sys_epoll_create
@@ -316,26 +320,24 @@
248 32 clock_nanosleep sys_clock_nanosleep_time32
248 64 clock_nanosleep sys_clock_nanosleep
248 spu clock_nanosleep sys_clock_nanosleep
-249 32 swapcontext ppc_swapcontext ppc32_swapcontext
-249 64 swapcontext ppc64_swapcontext
-249 spu swapcontext sys_ni_syscall
+249 nospu swapcontext sys_swapcontext compat_sys_swapcontext
250 common tgkill sys_tgkill
251 32 utimes sys_utimes_time32
251 64 utimes sys_utimes
251 spu utimes sys_utimes
252 common statfs64 sys_statfs64 compat_sys_statfs64
253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
-254 32 fadvise64_64 ppc_fadvise64_64
+254 32 fadvise64_64 sys_ppc_fadvise64_64
254 spu fadvise64_64 sys_ni_syscall
255 common rtas sys_rtas
256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall
256 64 sys_debug_setcontext sys_ni_syscall
256 spu sys_debug_setcontext sys_ni_syscall
# 257 reserved for vserver
-258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages
-259 nospu mbind sys_mbind compat_sys_mbind
-260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+258 nospu migrate_pages sys_migrate_pages
+259 nospu mbind sys_mbind
+260 nospu get_mempolicy sys_get_mempolicy
+261 nospu set_mempolicy sys_set_mempolicy
262 nospu mq_open sys_mq_open compat_sys_mq_open
263 nospu mq_unlink sys_mq_unlink
264 32 mq_timedsend sys_mq_timedsend_time32
@@ -363,7 +365,7 @@
282 common unshare sys_unshare
283 common splice sys_splice
284 common tee sys_tee
-285 common vmsplice sys_vmsplice compat_sys_vmsplice
+285 common vmsplice sys_vmsplice
286 common openat sys_openat compat_sys_openat
287 common mkdirat sys_mkdirat
288 common mknodat sys_mknodat
@@ -383,7 +385,7 @@
298 common faccessat sys_faccessat
299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
-301 common move_pages sys_move_pages compat_sys_move_pages
+301 common move_pages sys_move_pages
302 common getcpu sys_getcpu
303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
304 32 utimensat sys_utimensat_time32
@@ -392,8 +394,11 @@
305 common signalfd sys_signalfd compat_sys_signalfd
306 common timerfd_create sys_timerfd_create
307 common eventfd sys_eventfd
-308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2
-309 nospu fallocate sys_fallocate compat_sys_fallocate
+308 32 sync_file_range2 sys_ppc_sync_file_range2 compat_sys_ppc_sync_file_range2
+308 64 sync_file_range2 sys_sync_file_range2
+308 spu sync_file_range2 sys_sync_file_range2
+309 32 fallocate sys_ppc_fallocate compat_sys_fallocate
+309 64 fallocate sys_fallocate
310 nospu subpage_prot sys_subpage_prot
311 32 timerfd_settime sys_timerfd_settime32
311 64 timerfd_settime sys_timerfd_settime
@@ -427,8 +432,8 @@
336 common recv sys_recv compat_sys_recv
337 common recvfrom sys_recvfrom compat_sys_recvfrom
338 common shutdown sys_shutdown
-339 common setsockopt sys_setsockopt compat_sys_setsockopt
-340 common getsockopt sys_getsockopt compat_sys_getsockopt
+339 common setsockopt sys_setsockopt sys_setsockopt
+340 common getsockopt sys_getsockopt sys_getsockopt
341 common sendmsg sys_sendmsg compat_sys_sendmsg
342 common recvmsg sys_recvmsg compat_sys_recvmsg
343 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
@@ -443,8 +448,8 @@
348 common syncfs sys_syncfs
349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
350 common setns sys_setns
-351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+351 nospu process_vm_readv sys_process_vm_readv
+352 nospu process_vm_writev sys_process_vm_writev
353 nospu finit_module sys_finit_module
354 nospu kcmp sys_kcmp
355 common sched_setattr sys_sched_setattr
@@ -456,7 +461,7 @@
361 common bpf sys_bpf
362 nospu execveat sys_execveat compat_sys_execveat
363 32 switch_endian sys_ni_syscall
-363 64 switch_endian ppc_switch_endian
+363 64 switch_endian sys_switch_endian
363 spu switch_endian sys_ni_syscall
364 common userfaultfd sys_userfaultfd
365 common membarrier sys_membarrier
@@ -516,6 +521,19 @@
432 common fsmount sys_fsmount
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
-435 nospu clone3 ppc_clone3
+435 nospu clone3 sys_clone3
+436 common close_range sys_close_range
437 common openat2 sys_openat2
438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/powerpc/kernel/syscalls/syscallhdr.sh b/arch/powerpc/kernel/syscalls/syscallhdr.sh
deleted file mode 100644
index c0a9a32937f1..000000000000
--- a/arch/powerpc/kernel/syscalls/syscallhdr.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_UAPI_ASM_POWERPC_`basename "$out" | sed \
- -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
- -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- printf "#ifndef %s\n" "${fileguard}"
- printf "#define %s\n" "${fileguard}"
- printf "\n"
-
- nxt=0
- while read nr abi name entry compat ; do
- if [ -z "$offset" ]; then
- printf "#define __NR_%s%s\t%s\n" \
- "${prefix}" "${name}" "${nr}"
- else
- printf "#define __NR_%s%s\t(%s + %s)\n" \
- "${prefix}" "${name}" "${offset}" "${nr}"
- fi
- nxt=$((nr+1))
- done
-
- printf "\n"
- printf "#ifdef __KERNEL__\n"
- printf "#define __NR_syscalls\t%s\n" "${nxt}"
- printf "#endif\n"
- printf "\n"
- printf "#endif /* %s */" "${fileguard}"
- printf "\n"
-) > "$out"
diff --git a/arch/powerpc/kernel/syscalls/syscalltbl.sh b/arch/powerpc/kernel/syscalls/syscalltbl.sh
deleted file mode 100644
index f7393a7b18aa..000000000000
--- a/arch/powerpc/kernel/syscalls/syscalltbl.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-my_abi="$4"
-offset="$5"
-
-emit() {
- t_nxt="$1"
- t_nr="$2"
- t_entry="$3"
-
- while [ $t_nxt -lt $t_nr ]; do
- printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
- t_nxt=$((t_nxt+1))
- done
- printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
-}
-
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
- nxt=0
- if [ -z "$offset" ]; then
- offset=0
- fi
-
- while read nr abi name entry compat ; do
- if [ "$my_abi" = "c32" ] && [ ! -z "$compat" ]; then
- emit $((nxt+offset)) $((nr+offset)) $compat
- else
- emit $((nxt+offset)) $((nr+offset)) $entry
- fi
- nxt=$((nr+1))
- done
-) > "$out"
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 80a676da11cb..ef9a61718940 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -9,16 +9,17 @@
#include <linux/nodemask.h>
#include <linux/cpumask.h>
#include <linux/notifier.h>
+#include <linux/of.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/hvcall.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smp.h>
#include <asm/pmc.h>
#include <asm/firmware.h>
+#include <asm/idle.h>
#include <asm/svm.h>
#include "cacheinfo.h"
@@ -31,29 +32,27 @@
static DEFINE_PER_CPU(struct cpu, cpu_devices);
-/*
- * SMT snooze delay stuff, 64-bit only for now
- */
-
#ifdef CONFIG_PPC64
-/* Time in microseconds we delay before sleeping in the idle loop */
-static DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
+/*
+ * Snooze delay has not been hooked up since 3fa8cad82b94 ("powerpc/pseries/cpuidle:
+ * smt-snooze-delay cleanup.") and has been broken even longer. As was foretold in
+ * 2014:
+ *
+ * "ppc64_util currently utilises it. Once we fix ppc64_util, propose to clean
+ * up the kernel code."
+ *
+ * powerpc-utils stopped using it as of 1.3.8. At some point in the future this
+ * code should be removed.
+ */
static ssize_t store_smt_snooze_delay(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
- ssize_t ret;
- long snooze;
-
- ret = sscanf(buf, "%ld", &snooze);
- if (ret != 1)
- return -EINVAL;
-
- per_cpu(smt_snooze_delay, cpu->dev.id) = snooze;
+ pr_warn_once("%s (%d) stored to unsupported smt_snooze_delay, which has no effect.\n",
+ current->comm, current->pid);
return count;
}
@@ -61,9 +60,9 @@ static ssize_t show_smt_snooze_delay(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct cpu *cpu = container_of(dev, struct cpu, dev);
-
- return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id));
+ pr_warn_once("%s (%d) read from unsupported smt_snooze_delay\n",
+ current->comm, current->pid);
+ return sprintf(buf, "100\n");
}
static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
@@ -71,23 +70,165 @@ static DEVICE_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
static int __init setup_smt_snooze_delay(char *str)
{
- unsigned int cpu;
- long snooze;
-
if (!cpu_has_feature(CPU_FTR_SMT))
return 1;
- snooze = simple_strtol(str, NULL, 10);
- for_each_possible_cpu(cpu)
- per_cpu(smt_snooze_delay, cpu) = snooze;
-
+ pr_warn("smt-snooze-delay command line option has no effect\n");
return 1;
}
__setup("smt-snooze-delay=", setup_smt_snooze_delay);
#endif /* CONFIG_PPC64 */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
+static void read_##NAME(void *val) \
+{ \
+ *(unsigned long *)val = mfspr(ADDRESS); \
+} \
+static void write_##NAME(void *val) \
+{ \
+ EXTRA; \
+ mtspr(ADDRESS, *(unsigned long *)val); \
+}
+
+#define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
+static ssize_t show_##NAME(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct cpu *cpu = container_of(dev, struct cpu, dev); \
+ unsigned long val; \
+ smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
+ return sprintf(buf, "%lx\n", val); \
+} \
+static ssize_t __used \
+ store_##NAME(struct device *dev, struct device_attribute *attr, \
+ const char *buf, size_t count) \
+{ \
+ struct cpu *cpu = container_of(dev, struct cpu, dev); \
+ unsigned long val; \
+ int ret = sscanf(buf, "%lx", &val); \
+ if (ret != 1) \
+ return -EINVAL; \
+ smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
+ return count; \
+}
+
+#define SYSFS_PMCSETUP(NAME, ADDRESS) \
+ __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
+ __SYSFS_SPRSETUP_SHOW_STORE(NAME)
+#define SYSFS_SPRSETUP(NAME, ADDRESS) \
+ __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
+ __SYSFS_SPRSETUP_SHOW_STORE(NAME)
+
+#define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
+ __SYSFS_SPRSETUP_SHOW_STORE(NAME)
+
+#ifdef CONFIG_PPC64
+
+/*
+ * This is the system wide DSCR register default value. Any
+ * change to this default value through the sysfs interface
+ * will update all per cpu DSCR default values across the
+ * system stored in their respective PACA structures.
+ */
+static unsigned long dscr_default;
+
+/**
+ * read_dscr() - Fetch the cpu specific DSCR default
+ * @val: Returned cpu specific DSCR default value
+ *
+ * This function returns the per cpu DSCR default value
+ * for any cpu which is contained in it's PACA structure.
+ */
+static void read_dscr(void *val)
+{
+ *(unsigned long *)val = get_paca()->dscr_default;
+}
+
+
+/**
+ * write_dscr() - Update the cpu specific DSCR default
+ * @val: New cpu specific DSCR default value to update
+ *
+ * This function updates the per cpu DSCR default value
+ * for any cpu which is contained in it's PACA structure.
+ */
+static void write_dscr(void *val)
+{
+ get_paca()->dscr_default = *(unsigned long *)val;
+ if (!current->thread.dscr_inherit) {
+ current->thread.dscr = *(unsigned long *)val;
+ mtspr(SPRN_DSCR, *(unsigned long *)val);
+ }
+}
+
+SYSFS_SPRSETUP_SHOW_STORE(dscr);
+static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
+
+static void add_write_permission_dev_attr(struct device_attribute *attr)
+{
+ attr->attr.mode |= 0200;
+}
+
+/**
+ * show_dscr_default() - Fetch the system wide DSCR default
+ * @dev: Device structure
+ * @attr: Device attribute structure
+ * @buf: Interface buffer
+ *
+ * This function returns the system wide DSCR default value.
+ */
+static ssize_t show_dscr_default(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lx\n", dscr_default);
+}
+
+/**
+ * store_dscr_default() - Update the system wide DSCR default
+ * @dev: Device structure
+ * @attr: Device attribute structure
+ * @buf: Interface buffer
+ * @count: Size of the update
+ *
+ * This function updates the system wide DSCR default value.
+ */
+static ssize_t __used store_dscr_default(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int ret = 0;
+
+ ret = sscanf(buf, "%lx", &val);
+ if (ret != 1)
+ return -EINVAL;
+ dscr_default = val;
+
+ on_each_cpu(write_dscr, &val, 1);
+
+ return count;
+}
+
+static DEVICE_ATTR(dscr_default, 0600,
+ show_dscr_default, store_dscr_default);
+
+static void __init sysfs_create_dscr_default(void)
+{
+ if (cpu_has_feature(CPU_FTR_DSCR)) {
+ int cpu;
+
+ dscr_default = spr_default_dscr;
+ for_each_possible_cpu(cpu)
+ paca_ptrs[cpu]->dscr_default = dscr_default;
+
+ device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
+ }
+}
+#endif /* CONFIG_PPC64 */
+
+#ifdef CONFIG_PPC_E500
#define MAX_BIT 63
static u64 pw20_wt;
@@ -407,84 +548,35 @@ void ppc_enable_pmcs(void)
}
EXPORT_SYMBOL(ppc_enable_pmcs);
-#define __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, EXTRA) \
-static void read_##NAME(void *val) \
-{ \
- *(unsigned long *)val = mfspr(ADDRESS); \
-} \
-static void write_##NAME(void *val) \
-{ \
- EXTRA; \
- mtspr(ADDRESS, *(unsigned long *)val); \
-}
-
-#define __SYSFS_SPRSETUP_SHOW_STORE(NAME) \
-static ssize_t show_##NAME(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- struct cpu *cpu = container_of(dev, struct cpu, dev); \
- unsigned long val; \
- smp_call_function_single(cpu->dev.id, read_##NAME, &val, 1); \
- return sprintf(buf, "%lx\n", val); \
-} \
-static ssize_t __used \
- store_##NAME(struct device *dev, struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct cpu *cpu = container_of(dev, struct cpu, dev); \
- unsigned long val; \
- int ret = sscanf(buf, "%lx", &val); \
- if (ret != 1) \
- return -EINVAL; \
- smp_call_function_single(cpu->dev.id, write_##NAME, &val, 1); \
- return count; \
-}
-
-#define SYSFS_PMCSETUP(NAME, ADDRESS) \
- __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ppc_enable_pmcs()) \
- __SYSFS_SPRSETUP_SHOW_STORE(NAME)
-#define SYSFS_SPRSETUP(NAME, ADDRESS) \
- __SYSFS_SPRSETUP_READ_WRITE(NAME, ADDRESS, ) \
- __SYSFS_SPRSETUP_SHOW_STORE(NAME)
-#define SYSFS_SPRSETUP_SHOW_STORE(NAME) \
- __SYSFS_SPRSETUP_SHOW_STORE(NAME)
/* Let's define all possible registers, we'll only hook up the ones
* that are implemented on the current processor
*/
-#if defined(CONFIG_PPC64)
-#define HAS_PPC_PMC_CLASSIC 1
-#define HAS_PPC_PMC_IBM 1
-#define HAS_PPC_PMC_PA6T 1
-#elif defined(CONFIG_PPC_BOOK3S_32)
+#ifdef CONFIG_PMU_SYSFS
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC_BOOK3S_32)
#define HAS_PPC_PMC_CLASSIC 1
#define HAS_PPC_PMC_IBM 1
-#define HAS_PPC_PMC_G4 1
#endif
+#ifdef CONFIG_PPC64
+#define HAS_PPC_PMC_PA6T 1
+#define HAS_PPC_PMC56 1
+#endif
-#ifdef HAS_PPC_PMC_CLASSIC
-SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
-SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
-SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
-SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
-SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
-SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
-SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
-SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
-
-#ifdef HAS_PPC_PMC_G4
-SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
+#ifdef CONFIG_PPC_BOOK3S_32
+#define HAS_PPC_PMC_G4 1
#endif
+#endif /* CONFIG_PMU_SYSFS */
+#if defined(CONFIG_PPC64) && defined(CONFIG_DEBUG_MISC)
+#define HAS_PPC_PA6T
+#endif
+/*
+ * SPRs which are not related to PMU.
+ */
#ifdef CONFIG_PPC64
-SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
-SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
-
-SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
SYSFS_SPRSETUP(purr, SPRN_PURR);
SYSFS_SPRSETUP(spurr, SPRN_SPURR);
SYSFS_SPRSETUP(pir, SPRN_PIR);
@@ -495,115 +587,40 @@ SYSFS_SPRSETUP(tscr, SPRN_TSCR);
enable write when needed with a separate function.
Lets be conservative and default to pseries.
*/
-static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
static DEVICE_ATTR(pir, 0400, show_pir, NULL);
static DEVICE_ATTR(tscr, 0600, show_tscr, store_tscr);
+#endif /* CONFIG_PPC64 */
-/*
- * This is the system wide DSCR register default value. Any
- * change to this default value through the sysfs interface
- * will update all per cpu DSCR default values across the
- * system stored in their respective PACA structures.
- */
-static unsigned long dscr_default;
-
-/**
- * read_dscr() - Fetch the cpu specific DSCR default
- * @val: Returned cpu specific DSCR default value
- *
- * This function returns the per cpu DSCR default value
- * for any cpu which is contained in it's PACA structure.
- */
-static void read_dscr(void *val)
-{
- *(unsigned long *)val = get_paca()->dscr_default;
-}
-
-
-/**
- * write_dscr() - Update the cpu specific DSCR default
- * @val: New cpu specific DSCR default value to update
- *
- * This function updates the per cpu DSCR default value
- * for any cpu which is contained in it's PACA structure.
- */
-static void write_dscr(void *val)
-{
- get_paca()->dscr_default = *(unsigned long *)val;
- if (!current->thread.dscr_inherit) {
- current->thread.dscr = *(unsigned long *)val;
- mtspr(SPRN_DSCR, *(unsigned long *)val);
- }
-}
-
-SYSFS_SPRSETUP_SHOW_STORE(dscr);
-static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
-
-static void add_write_permission_dev_attr(struct device_attribute *attr)
-{
- attr->attr.mode |= 0200;
-}
-
-/**
- * show_dscr_default() - Fetch the system wide DSCR default
- * @dev: Device structure
- * @attr: Device attribute structure
- * @buf: Interface buffer
- *
- * This function returns the system wide DSCR default value.
- */
-static ssize_t show_dscr_default(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%lx\n", dscr_default);
-}
-
-/**
- * store_dscr_default() - Update the system wide DSCR default
- * @dev: Device structure
- * @attr: Device attribute structure
- * @buf: Interface buffer
- * @count: Size of the update
- *
- * This function updates the system wide DSCR default value.
- */
-static ssize_t __used store_dscr_default(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- unsigned long val;
- int ret = 0;
-
- ret = sscanf(buf, "%lx", &val);
- if (ret != 1)
- return -EINVAL;
- dscr_default = val;
+#ifdef HAS_PPC_PMC_CLASSIC
+SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
+SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
+SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
+SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
+SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
+SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
+SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
+SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
+#endif
- on_each_cpu(write_dscr, &val, 1);
+#ifdef HAS_PPC_PMC_G4
+SYSFS_PMCSETUP(mmcr2, SPRN_MMCR2);
+#endif
- return count;
-}
+#ifdef HAS_PPC_PMC56
+SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
+SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
-static DEVICE_ATTR(dscr_default, 0600,
- show_dscr_default, store_dscr_default);
+SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
+SYSFS_PMCSETUP(mmcr3, SPRN_MMCR3);
-static void sysfs_create_dscr_default(void)
-{
- if (cpu_has_feature(CPU_FTR_DSCR)) {
- int err = 0;
- int cpu;
+static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
+static DEVICE_ATTR(mmcr3, 0600, show_mmcr3, store_mmcr3);
+#endif /* HAS_PPC_PMC56 */
- dscr_default = spr_default_dscr;
- for_each_possible_cpu(cpu)
- paca_ptrs[cpu]->dscr_default = dscr_default;
- err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
- }
-}
-#endif /* CONFIG_PPC64 */
#ifdef HAS_PPC_PMC_PA6T
SYSFS_PMCSETUP(pa6t_pmc0, SPRN_PA6T_PMC0);
@@ -612,7 +629,9 @@ SYSFS_PMCSETUP(pa6t_pmc2, SPRN_PA6T_PMC2);
SYSFS_PMCSETUP(pa6t_pmc3, SPRN_PA6T_PMC3);
SYSFS_PMCSETUP(pa6t_pmc4, SPRN_PA6T_PMC4);
SYSFS_PMCSETUP(pa6t_pmc5, SPRN_PA6T_PMC5);
-#ifdef CONFIG_DEBUG_MISC
+#endif
+
+#ifdef HAS_PPC_PA6T
SYSFS_SPRSETUP(hid0, SPRN_HID0);
SYSFS_SPRSETUP(hid1, SPRN_HID1);
SYSFS_SPRSETUP(hid4, SPRN_HID4);
@@ -641,15 +660,14 @@ SYSFS_SPRSETUP(tsr0, SPRN_PA6T_TSR0);
SYSFS_SPRSETUP(tsr1, SPRN_PA6T_TSR1);
SYSFS_SPRSETUP(tsr2, SPRN_PA6T_TSR2);
SYSFS_SPRSETUP(tsr3, SPRN_PA6T_TSR3);
-#endif /* CONFIG_DEBUG_MISC */
-#endif /* HAS_PPC_PMC_PA6T */
+#endif /* HAS_PPC_PA6T */
#ifdef HAS_PPC_PMC_IBM
static struct device_attribute ibm_common_attrs[] = {
__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
};
-#endif /* HAS_PPC_PMC_G4 */
+#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
static struct device_attribute g4_common_attrs[] = {
@@ -659,6 +677,7 @@ static struct device_attribute g4_common_attrs[] = {
};
#endif /* HAS_PPC_PMC_G4 */
+#ifdef HAS_PPC_PMC_CLASSIC
static struct device_attribute classic_pmc_attrs[] = {
__ATTR(pmc1, 0600, show_pmc1, store_pmc1),
__ATTR(pmc2, 0600, show_pmc2, store_pmc2),
@@ -666,14 +685,16 @@ static struct device_attribute classic_pmc_attrs[] = {
__ATTR(pmc4, 0600, show_pmc4, store_pmc4),
__ATTR(pmc5, 0600, show_pmc5, store_pmc5),
__ATTR(pmc6, 0600, show_pmc6, store_pmc6),
-#ifdef CONFIG_PPC64
+#ifdef HAS_PPC_PMC56
__ATTR(pmc7, 0600, show_pmc7, store_pmc7),
__ATTR(pmc8, 0600, show_pmc8, store_pmc8),
#endif
};
+#endif
-#ifdef HAS_PPC_PMC_PA6T
+#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
static struct device_attribute pa6t_attrs[] = {
+#ifdef HAS_PPC_PMC_PA6T
__ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0),
__ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1),
__ATTR(pmc0, 0600, show_pa6t_pmc0, store_pa6t_pmc0),
@@ -682,7 +703,8 @@ static struct device_attribute pa6t_attrs[] = {
__ATTR(pmc3, 0600, show_pa6t_pmc3, store_pa6t_pmc3),
__ATTR(pmc4, 0600, show_pa6t_pmc4, store_pa6t_pmc4),
__ATTR(pmc5, 0600, show_pa6t_pmc5, store_pa6t_pmc5),
-#ifdef CONFIG_DEBUG_MISC
+#endif
+#ifdef HAS_PPC_PA6T
__ATTR(hid0, 0600, show_hid0, store_hid0),
__ATTR(hid1, 0600, show_hid1, store_hid1),
__ATTR(hid4, 0600, show_hid4, store_hid4),
@@ -711,10 +733,9 @@ static struct device_attribute pa6t_attrs[] = {
__ATTR(tsr1, 0600, show_tsr1, store_tsr1),
__ATTR(tsr2, 0600, show_tsr2, store_tsr2),
__ATTR(tsr3, 0600, show_tsr3, store_tsr3),
-#endif /* CONFIG_DEBUG_MISC */
+#endif /* HAS_PPC_PA6T */
};
-#endif /* HAS_PPC_PMC_PA6T */
-#endif /* HAS_PPC_PMC_CLASSIC */
+#endif
#ifdef CONFIG_PPC_SVM
static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char *buf)
@@ -723,16 +744,84 @@ static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char
}
static DEVICE_ATTR(svm, 0444, show_svm, NULL);
-static void create_svm_file(void)
+static void __init create_svm_file(void)
{
device_create_file(cpu_subsys.dev_root, &dev_attr_svm);
}
#else
-static void create_svm_file(void)
+static void __init create_svm_file(void)
{
}
#endif /* CONFIG_PPC_SVM */
+#ifdef CONFIG_PPC_PSERIES
+static void read_idle_purr(void *val)
+{
+ u64 *ret = val;
+
+ *ret = read_this_idle_purr();
+}
+
+static ssize_t idle_purr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ u64 val;
+
+ smp_call_function_single(cpu->dev.id, read_idle_purr, &val, 1);
+ return sprintf(buf, "%llx\n", val);
+}
+static DEVICE_ATTR(idle_purr, 0400, idle_purr_show, NULL);
+
+static void create_idle_purr_file(struct device *s)
+{
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ device_create_file(s, &dev_attr_idle_purr);
+}
+
+static void remove_idle_purr_file(struct device *s)
+{
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ device_remove_file(s, &dev_attr_idle_purr);
+}
+
+static void read_idle_spurr(void *val)
+{
+ u64 *ret = val;
+
+ *ret = read_this_idle_spurr();
+}
+
+static ssize_t idle_spurr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ u64 val;
+
+ smp_call_function_single(cpu->dev.id, read_idle_spurr, &val, 1);
+ return sprintf(buf, "%llx\n", val);
+}
+static DEVICE_ATTR(idle_spurr, 0400, idle_spurr_show, NULL);
+
+static void create_idle_spurr_file(struct device *s)
+{
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ device_create_file(s, &dev_attr_idle_spurr);
+}
+
+static void remove_idle_spurr_file(struct device *s)
+{
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ device_remove_file(s, &dev_attr_idle_spurr);
+}
+
+#else /* CONFIG_PPC_PSERIES */
+#define create_idle_purr_file(s)
+#define remove_idle_purr_file(s)
+#define create_idle_spurr_file(s)
+#define remove_idle_spurr_file(s)
+#endif /* CONFIG_PPC_PSERIES */
+
static int register_cpu_online(unsigned int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
@@ -754,25 +843,25 @@ static int register_cpu_online(unsigned int cpu)
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
- nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(ibm_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
- nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(g4_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
-#ifdef HAS_PPC_PMC_PA6T
+#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
- nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(pa6t_attrs);
pmc_attrs = NULL;
break;
-#endif /* HAS_PPC_PMC_PA6T */
+#endif
default:
attrs = NULL;
nattrs = 0;
@@ -787,17 +876,25 @@ static int register_cpu_online(unsigned int cpu)
device_create_file(s, &pmc_attrs[i]);
#ifdef CONFIG_PPC64
+#ifdef CONFIG_PMU_SYSFS
if (cpu_has_feature(CPU_FTR_MMCRA))
device_create_file(s, &dev_attr_mmcra);
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ device_create_file(s, &dev_attr_mmcr3);
+#endif /* CONFIG_PMU_SYSFS */
+
if (cpu_has_feature(CPU_FTR_PURR)) {
if (!firmware_has_feature(FW_FEATURE_LPAR))
add_write_permission_dev_attr(&dev_attr_purr);
device_create_file(s, &dev_attr_purr);
+ create_idle_purr_file(s);
}
- if (cpu_has_feature(CPU_FTR_SPURR))
+ if (cpu_has_feature(CPU_FTR_SPURR)) {
device_create_file(s, &dev_attr_spurr);
+ create_idle_spurr_file(s);
+ }
if (cpu_has_feature(CPU_FTR_DSCR))
device_create_file(s, &dev_attr_dscr);
@@ -810,7 +907,7 @@ static int register_cpu_online(unsigned int cpu)
device_create_file(s, &dev_attr_tscr);
#endif /* CONFIG_PPC64 */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
device_create_file(s, &dev_attr_pw20_state);
device_create_file(s, &dev_attr_pw20_wait_time);
@@ -831,7 +928,8 @@ static int unregister_cpu_online(unsigned int cpu)
struct device_attribute *attrs, *pmc_attrs;
int i, nattrs;
- BUG_ON(!c->hotpluggable);
+ if (WARN_RATELIMIT(!c->hotpluggable, "cpu %d can't be offlined\n", cpu))
+ return -EBUSY;
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_SMT))
@@ -843,25 +941,25 @@ static int unregister_cpu_online(unsigned int cpu)
#ifdef HAS_PPC_PMC_IBM
case PPC_PMC_IBM:
attrs = ibm_common_attrs;
- nattrs = sizeof(ibm_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(ibm_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_IBM */
#ifdef HAS_PPC_PMC_G4
case PPC_PMC_G4:
attrs = g4_common_attrs;
- nattrs = sizeof(g4_common_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(g4_common_attrs);
pmc_attrs = classic_pmc_attrs;
break;
#endif /* HAS_PPC_PMC_G4 */
-#ifdef HAS_PPC_PMC_PA6T
+#if defined(HAS_PPC_PMC_PA6T) || defined(HAS_PPC_PA6T)
case PPC_PMC_PA6T:
/* PA Semi starts counting at PMC0 */
attrs = pa6t_attrs;
- nattrs = sizeof(pa6t_attrs) / sizeof(struct device_attribute);
+ nattrs = ARRAY_SIZE(pa6t_attrs);
pmc_attrs = NULL;
break;
-#endif /* HAS_PPC_PMC_PA6T */
+#endif
default:
attrs = NULL;
nattrs = 0;
@@ -876,14 +974,23 @@ static int unregister_cpu_online(unsigned int cpu)
device_remove_file(s, &pmc_attrs[i]);
#ifdef CONFIG_PPC64
+#ifdef CONFIG_PMU_SYSFS
if (cpu_has_feature(CPU_FTR_MMCRA))
device_remove_file(s, &dev_attr_mmcra);
- if (cpu_has_feature(CPU_FTR_PURR))
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ device_remove_file(s, &dev_attr_mmcr3);
+#endif /* CONFIG_PMU_SYSFS */
+
+ if (cpu_has_feature(CPU_FTR_PURR)) {
device_remove_file(s, &dev_attr_purr);
+ remove_idle_purr_file(s);
+ }
- if (cpu_has_feature(CPU_FTR_SPURR))
+ if (cpu_has_feature(CPU_FTR_SPURR)) {
device_remove_file(s, &dev_attr_spurr);
+ remove_idle_spurr_file(s);
+ }
if (cpu_has_feature(CPU_FTR_DSCR))
device_remove_file(s, &dev_attr_dscr);
@@ -896,7 +1003,7 @@ static int unregister_cpu_online(unsigned int cpu)
device_remove_file(s, &dev_attr_tscr);
#endif /* CONFIG_PPC64 */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
if (PVR_VER(cur_cpu_spec->pvr_value) == PVR_VER_E6500) {
device_remove_file(s, &dev_attr_pw20_state);
device_remove_file(s, &dev_attr_pw20_wait_time);
@@ -1003,14 +1110,6 @@ EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
/* NUMA stuff */
#ifdef CONFIG_NUMA
-static void register_nodes(void)
-{
- int i;
-
- for (i = 0; i < MAX_NUMNODES; i++)
- register_one_node(i);
-}
-
int sysfs_add_device_to_node(struct device *dev, int nid)
{
struct node *node = node_devices[nid];
@@ -1025,13 +1124,6 @@ void sysfs_remove_device_from_node(struct device *dev, int nid)
sysfs_remove_link(&node->dev.kobj, kobject_name(&dev->kobj));
}
EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
-
-#else
-static void register_nodes(void)
-{
- return;
-}
-
#endif
/* Only valid if CPU is present. */
@@ -1048,11 +1140,10 @@ static int __init topology_init(void)
{
int cpu, r;
- register_nodes();
-
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
+#ifdef CONFIG_HOTPLUG_CPU
/*
* For now, we just see if the system supports making
* the RTAS calls for CPU hotplug. But, there may be a
@@ -1060,8 +1151,9 @@ static int __init topology_init(void)
* CPU. For instance, the boot cpu might never be valid
* for hotplugging.
*/
- if (ppc_md.cpu_die)
+ if (smp_ops && smp_ops->cpu_offline_self)
c->hotpluggable = 1;
+#endif
if (cpu_online(cpu) || c->hotpluggable) {
register_cpu(c, cpu);
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
deleted file mode 100644
index 5b905a2f4e4d..000000000000
--- a/arch/powerpc/kernel/systbl.S
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * This file contains the table of syscall-handling functions.
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
- * and Paul Mackerras.
- *
- * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
- * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
- */
-
-#include <asm/ppc_asm.h>
-
-.section .rodata,"a"
-
-#ifdef CONFIG_PPC64
- .p2align 3
-#endif
-
-.globl sys_call_table
-sys_call_table:
-#ifdef CONFIG_PPC64
-#define __SYSCALL(nr, entry) .8byte DOTSYM(entry)
-#include <asm/syscall_table_64.h>
-#undef __SYSCALL
-#else
-#define __SYSCALL(nr, entry) .long entry
-#include <asm/syscall_table_32.h>
-#undef __SYSCALL
-#endif
-
-#ifdef CONFIG_COMPAT
-.globl compat_sys_call_table
-compat_sys_call_table:
-#define compat_sys_sigsuspend sys_sigsuspend
-#define __SYSCALL(nr, entry) .8byte DOTSYM(entry)
-#include <asm/syscall_table_c32.h>
-#undef __SYSCALL
-#endif
diff --git a/arch/powerpc/kernel/systbl.c b/arch/powerpc/kernel/systbl.c
new file mode 100644
index 000000000000..4305f2a2162f
--- /dev/null
+++ b/arch/powerpc/kernel/systbl.c
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This file contains the table of syscall-handling functions.
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
+ * and Paul Mackerras.
+ *
+ * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
+ * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
+ */
+
+#include <linux/syscalls.h>
+#include <linux/compat.h>
+#include <asm/unistd.h>
+#include <asm/syscalls.h>
+
+#undef __SYSCALL_WITH_COMPAT
+#define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry)
+
+#undef __SYSCALL
+#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
+#define __SYSCALL(nr, entry) [nr] = entry,
+#else
+/*
+ * Coerce syscall handlers with arbitrary parameters to common type
+ * requires cast to void* to avoid -Wcast-function-type.
+ */
+#define __SYSCALL(nr, entry) [nr] = (void *) entry,
+#endif
+
+const syscall_fn sys_call_table[] = {
+#ifdef CONFIG_PPC64
+#include <asm/syscall_table_64.h>
+#else
+#include <asm/syscall_table_32.h>
+#endif
+};
+
+#ifdef CONFIG_COMPAT
+#undef __SYSCALL_WITH_COMPAT
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
+const syscall_fn compat_sys_call_table[] = {
+#include <asm/syscall_table_32.h>
+};
+#endif /* CONFIG_COMPAT */
diff --git a/arch/powerpc/kernel/systbl_chk.sh b/arch/powerpc/kernel/systbl_chk.sh
deleted file mode 100644
index c7ac3ed657c4..000000000000
--- a/arch/powerpc/kernel/systbl_chk.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0-or-later
-#
-# Just process the CPP output from systbl_chk.c and complain
-# if anything is out of order.
-#
-# Copyright © 2008 IBM Corporation
-#
-
-awk 'BEGIN { num = -1; } # Ignore the beginning of the file
- /^#/ { next; }
- /^[ \t]*$/ { next; }
- /^START_TABLE/ { num = 0; next; }
- /^END_TABLE/ {
- if (num != $2) {
- printf "Error: NR_syscalls (%s) is not one more than the last syscall (%s)\n",
- $2, num - 1;
- exit(1);
- }
- num = -1; # Ignore the rest of the file
- }
- {
- if (num == -1) next;
- if (($1 != -1) && ($1 != num)) {
- printf "Error: Syscall %s out of order (expected %s)\n",
- $1, num;
- exit(1);
- };
- num++;
- }' "$1"
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c
index e2ab8a111b69..828d0f4106d2 100644
--- a/arch/powerpc/kernel/tau_6xx.c
+++ b/arch/powerpc/kernel/tau_6xx.c
@@ -13,21 +13,22 @@
*/
#include <linux/errno.h>
-#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/reg.h>
#include <asm/nvram.h>
#include <asm/cache.h>
#include <asm/8xx_immap.h>
#include <asm/machdep.h>
-#include <asm/asm-prototypes.h>
#include "setup.h"
@@ -39,9 +40,7 @@ static struct tau_temp
unsigned char grew;
} tau[NR_CPUS];
-struct timer_list tau_timer;
-
-#undef DEBUG
+static bool tau_int_enable;
/* TODO: put these in a /proc interface, with some sanity checks, and maybe
* dynamic adjustment to minimize # of interrupts */
@@ -50,72 +49,49 @@ struct timer_list tau_timer;
#define step_size 2 /* step size when temp goes out of range */
#define window_expand 1 /* expand the window by this much */
/* configurable values for shrinking the window */
-#define shrink_timer 2*HZ /* period between shrinking the window */
+#define shrink_timer 2000 /* period between shrinking the window */
#define min_window 2 /* minimum window size, degrees C */
static void set_thresholds(unsigned long cpu)
{
-#ifdef CONFIG_TAU_INT
- /*
- * setup THRM1,
- * threshold, valid bit, enable interrupts, interrupt when below threshold
- */
- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TIE | THRM1_TID);
+ u32 maybe_tie = tau_int_enable ? THRM1_TIE : 0;
- /* setup THRM2,
- * threshold, valid bit, enable interrupts, interrupt when above threshold
- */
- mtspr (SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | THRM1_TIE);
-#else
- /* same thing but don't enable interrupts */
- mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | THRM1_TID);
- mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V);
-#endif
+ /* setup THRM1, threshold, valid bit, interrupt when below threshold */
+ mtspr(SPRN_THRM1, THRM1_THRES(tau[cpu].low) | THRM1_V | maybe_tie | THRM1_TID);
+
+ /* setup THRM2, threshold, valid bit, interrupt when above threshold */
+ mtspr(SPRN_THRM2, THRM1_THRES(tau[cpu].high) | THRM1_V | maybe_tie);
}
static void TAUupdate(int cpu)
{
- unsigned thrm;
-
-#ifdef DEBUG
- printk("TAUupdate ");
-#endif
+ u32 thrm;
+ u32 bits = THRM1_TIV | THRM1_TIN | THRM1_V;
/* if both thresholds are crossed, the step_sizes cancel out
* and the window winds up getting expanded twice. */
- if((thrm = mfspr(SPRN_THRM1)) & THRM1_TIV){ /* is valid? */
- if(thrm & THRM1_TIN){ /* crossed low threshold */
- if (tau[cpu].low >= step_size){
- tau[cpu].low -= step_size;
- tau[cpu].high -= (step_size - window_expand);
- }
- tau[cpu].grew = 1;
-#ifdef DEBUG
- printk("low threshold crossed ");
-#endif
+ thrm = mfspr(SPRN_THRM1);
+ if ((thrm & bits) == bits) {
+ mtspr(SPRN_THRM1, 0);
+
+ if (tau[cpu].low >= step_size) {
+ tau[cpu].low -= step_size;
+ tau[cpu].high -= (step_size - window_expand);
}
+ tau[cpu].grew = 1;
+ pr_debug("%s: low threshold crossed\n", __func__);
}
- if((thrm = mfspr(SPRN_THRM2)) & THRM1_TIV){ /* is valid? */
- if(thrm & THRM1_TIN){ /* crossed high threshold */
- if (tau[cpu].high <= 127-step_size){
- tau[cpu].low += (step_size - window_expand);
- tau[cpu].high += step_size;
- }
- tau[cpu].grew = 1;
-#ifdef DEBUG
- printk("high threshold crossed ");
-#endif
+ thrm = mfspr(SPRN_THRM2);
+ if ((thrm & bits) == bits) {
+ mtspr(SPRN_THRM2, 0);
+
+ if (tau[cpu].high <= 127 - step_size) {
+ tau[cpu].low += (step_size - window_expand);
+ tau[cpu].high += step_size;
}
+ tau[cpu].grew = 1;
+ pr_debug("%s: high threshold crossed\n", __func__);
}
-
-#ifdef DEBUG
- printk("grew = %d\n", tau[cpu].grew);
-#endif
-
-#ifndef CONFIG_TAU_INT /* tau_timeout will do this if not using interrupts */
- set_thresholds(cpu);
-#endif
-
}
#ifdef CONFIG_TAU_INT
@@ -124,33 +100,29 @@ static void TAUupdate(int cpu)
* with interrupts disabled
*/
-void TAUException(struct pt_regs * regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(TAUException)
{
int cpu = smp_processor_id();
- irq_enter();
tau[cpu].interrupts++;
TAUupdate(cpu);
-
- irq_exit();
}
#endif /* CONFIG_TAU_INT */
static void tau_timeout(void * info)
{
int cpu;
- unsigned long flags;
int size;
int shrink;
- /* disabling interrupts *should* be okay */
- local_irq_save(flags);
cpu = smp_processor_id();
-#ifndef CONFIG_TAU_INT
- TAUupdate(cpu);
-#endif
+ if (!tau_int_enable)
+ TAUupdate(cpu);
+
+ /* Stop thermal sensor comparisons and interrupts */
+ mtspr(SPRN_THRM3, 0);
size = tau[cpu].high - tau[cpu].low;
if (size > min_window && ! tau[cpu].grew) {
@@ -173,32 +145,26 @@ static void tau_timeout(void * info)
set_thresholds(cpu);
- /*
- * Do the enable every time, since otherwise a bunch of (relatively)
- * complex sleep code needs to be added. One mtspr every time
- * tau_timeout is called is probably not a big deal.
- *
- * Enable thermal sensor and set up sample interval timer
- * need 20 us to do the compare.. until a nice 'cpu_speed' function
- * call is implemented, just assume a 500 mhz clock. It doesn't really
- * matter if we take too long for a compare since it's all interrupt
- * driven anyway.
- *
- * use a extra long time.. (60 us @ 500 mhz)
+ /* Restart thermal sensor comparisons and interrupts.
+ * The "PowerPC 740 and PowerPC 750 Microprocessor Datasheet"
+ * recommends that "the maximum value be set in THRM3 under all
+ * conditions."
*/
- mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E);
-
- local_irq_restore(flags);
+ mtspr(SPRN_THRM3, THRM3_SITV(0x1fff) | THRM3_E);
}
-static void tau_timeout_smp(struct timer_list *unused)
-{
+static struct workqueue_struct *tau_workq;
- /* schedule ourselves to be run again */
- mod_timer(&tau_timer, jiffies + shrink_timer) ;
+static void tau_work_func(struct work_struct *work)
+{
+ msleep(shrink_timer);
on_each_cpu(tau_timeout, NULL, 0);
+ /* schedule ourselves to be run again */
+ queue_work(tau_workq, work);
}
+static DECLARE_WORK(tau_work, tau_work_func);
+
/*
* setup the TAU
*
@@ -231,21 +197,19 @@ static int __init TAU_init(void)
return 1;
}
+ tau_int_enable = IS_ENABLED(CONFIG_TAU_INT) &&
+ !strcmp(cur_cpu_spec->platform, "ppc750");
- /* first, set up the window shrinking timer */
- timer_setup(&tau_timer, tau_timeout_smp, 0);
- tau_timer.expires = jiffies + shrink_timer;
- add_timer(&tau_timer);
+ tau_workq = alloc_workqueue("tau", WQ_UNBOUND, 1);
+ if (!tau_workq)
+ return -ENOMEM;
on_each_cpu(TAU_init_smp, NULL, 0);
- printk("Thermal assist unit ");
-#ifdef CONFIG_TAU_INT
- printk("using interrupts, ");
-#else
- printk("using timers, ");
-#endif
- printk("shrink_timer: %d jiffies\n", shrink_timer);
+ queue_work(tau_workq, &tau_work);
+
+ pr_info("Thermal assist unit using %s, shrink_timer: %d ms\n",
+ tau_int_enable ? "interrupts" : "workqueue", shrink_timer);
tau_initialized = 1;
return 0;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 1168e8b37e30..a2ab397065c6 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -31,6 +31,7 @@
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/sched/cputime.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
@@ -50,40 +51,32 @@
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/irq_work.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
#include <linux/suspend.h>
-#include <linux/sched/cputime.h>
#include <linux/processor.h>
-#include <asm/trace.h>
+#include <linux/mc146818rtc.h>
+#include <linux/platform_device.h>
+#include <asm/trace.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/nvram.h>
#include <asm/cache.h>
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/div64.h>
#include <asm/smp.h>
#include <asm/vdso_datapage.h>
#include <asm/firmware.h>
-#include <asm/asm-prototypes.h>
+#include <asm/mce.h>
/* powerpc clocksource/clockevent code */
#include <linux/clockchips.h>
#include <linux/timekeeper_internal.h>
-static u64 rtc_read(struct clocksource *);
-static struct clocksource clocksource_rtc = {
- .name = "rtc",
- .rating = 400,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .mask = CLOCKSOURCE_MASK(64),
- .read = rtc_read,
-};
-
static u64 timebase_read(struct clocksource *);
static struct clocksource clocksource_timebase = {
.name = "timebase",
@@ -91,10 +84,12 @@ static struct clocksource clocksource_timebase = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.mask = CLOCKSOURCE_MASK(64),
.read = timebase_read,
+ .vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
};
#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
+EXPORT_SYMBOL_GPL(decrementer_max); /* for KVM HDEC */
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev);
@@ -113,7 +108,13 @@ struct clock_event_device decrementer_clockevent = {
};
EXPORT_SYMBOL(decrementer_clockevent);
-DEFINE_PER_CPU(u64, decrementers_next_tb);
+/*
+ * This always puts next_tb beyond now, so the clock event will never fire
+ * with the usual comparison, no need for a separate test for stopped.
+ */
+#define DEC_CLOCKEVENT_STOPPED ~0ULL
+DEFINE_PER_CPU(u64, decrementers_next_tb) = DEC_CLOCKEVENT_STOPPED;
+EXPORT_SYMBOL_GPL(decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers);
#define XSEC_PER_SEC (1024*1024)
@@ -156,10 +157,6 @@ bool tb_invalid;
u64 __cputime_usec_factor;
EXPORT_SYMBOL(__cputime_usec_factor);
-#ifdef CONFIG_PPC_SPLPAR
-void (*dtl_consumer)(struct dtl_entry *, u64);
-#endif
-
static void calc_cputime_factors(void)
{
struct div_result res;
@@ -181,99 +178,6 @@ static inline unsigned long read_spurr(unsigned long tb)
return tb;
}
-#ifdef CONFIG_PPC_SPLPAR
-
-/*
- * Scan the dispatch trace log and count up the stolen time.
- * Should be called with interrupts disabled.
- */
-static u64 scan_dispatch_log(u64 stop_tb)
-{
- u64 i = local_paca->dtl_ridx;
- struct dtl_entry *dtl = local_paca->dtl_curr;
- struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
- struct lppaca *vpa = local_paca->lppaca_ptr;
- u64 tb_delta;
- u64 stolen = 0;
- u64 dtb;
-
- if (!dtl)
- return 0;
-
- if (i == be64_to_cpu(vpa->dtl_idx))
- return 0;
- while (i < be64_to_cpu(vpa->dtl_idx)) {
- dtb = be64_to_cpu(dtl->timebase);
- tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
- be32_to_cpu(dtl->ready_to_enqueue_time);
- barrier();
- if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
- /* buffer has overflowed */
- i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
- dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
- continue;
- }
- if (dtb > stop_tb)
- break;
- if (dtl_consumer)
- dtl_consumer(dtl, i);
- stolen += tb_delta;
- ++i;
- ++dtl;
- if (dtl == dtl_end)
- dtl = local_paca->dispatch_log;
- }
- local_paca->dtl_ridx = i;
- local_paca->dtl_curr = dtl;
- return stolen;
-}
-
-/*
- * Accumulate stolen time by scanning the dispatch trace log.
- * Called on entry from user mode.
- */
-void notrace accumulate_stolen_time(void)
-{
- u64 sst, ust;
- unsigned long save_irq_soft_mask = irq_soft_mask_return();
- struct cpu_accounting_data *acct = &local_paca->accounting;
-
- /* We are called early in the exception entry, before
- * soft/hard_enabled are sync'ed to the expected state
- * for the exception. We are hard disabled but the PACA
- * needs to reflect that so various debug stuff doesn't
- * complain
- */
- irq_soft_mask_set(IRQS_DISABLED);
-
- sst = scan_dispatch_log(acct->starttime_user);
- ust = scan_dispatch_log(acct->starttime);
- acct->stime -= sst;
- acct->utime -= ust;
- acct->steal_time += ust + sst;
-
- irq_soft_mask_set(save_irq_soft_mask);
-}
-
-static inline u64 calculate_stolen_time(u64 stop_tb)
-{
- if (!firmware_has_feature(FW_FEATURE_SPLPAR))
- return 0;
-
- if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
- return scan_dispatch_log(stop_tb);
-
- return 0;
-}
-
-#else /* CONFIG_PPC_SPLPAR */
-static inline u64 calculate_stolen_time(u64 stop_tb)
-{
- return 0;
-}
-
-#endif /* CONFIG_PPC_SPLPAR */
-
/*
* Account time for a transition between system, hard irq
* or soft irq state.
@@ -318,12 +222,11 @@ static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
return stime_scaled;
}
-static unsigned long vtime_delta(struct task_struct *tsk,
+static unsigned long vtime_delta(struct cpu_accounting_data *acct,
unsigned long *stime_scaled,
unsigned long *steal_time)
{
unsigned long now, stime;
- struct cpu_accounting_data *acct = get_accounting(tsk);
WARN_ON_ONCE(!irqs_disabled());
@@ -333,34 +236,39 @@ static unsigned long vtime_delta(struct task_struct *tsk,
*stime_scaled = vtime_delta_scaled(acct, now, stime);
- *steal_time = calculate_stolen_time(now);
+ if (IS_ENABLED(CONFIG_PPC_SPLPAR) &&
+ firmware_has_feature(FW_FEATURE_SPLPAR))
+ *steal_time = pseries_calculate_stolen_time(now);
+ else
+ *steal_time = 0;
return stime;
}
+static void vtime_delta_kernel(struct cpu_accounting_data *acct,
+ unsigned long *stime, unsigned long *stime_scaled)
+{
+ unsigned long steal_time;
+
+ *stime = vtime_delta(acct, stime_scaled, &steal_time);
+ *stime -= min(*stime, steal_time);
+ acct->steal_time += steal_time;
+}
+
void vtime_account_kernel(struct task_struct *tsk)
{
- unsigned long stime, stime_scaled, steal_time;
struct cpu_accounting_data *acct = get_accounting(tsk);
+ unsigned long stime, stime_scaled;
- stime = vtime_delta(tsk, &stime_scaled, &steal_time);
+ vtime_delta_kernel(acct, &stime, &stime_scaled);
- stime -= min(stime, steal_time);
- acct->steal_time += steal_time;
-
- if ((tsk->flags & PF_VCPU) && !irq_count()) {
+ if (tsk->flags & PF_VCPU) {
acct->gtime += stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
acct->utime_scaled += stime_scaled;
#endif
} else {
- if (hardirq_count())
- acct->hardirq_time += stime;
- else if (in_serving_softirq())
- acct->softirq_time += stime;
- else
- acct->stime += stime;
-
+ acct->stime += stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
acct->stime_scaled += stime_scaled;
#endif
@@ -373,10 +281,34 @@ void vtime_account_idle(struct task_struct *tsk)
unsigned long stime, stime_scaled, steal_time;
struct cpu_accounting_data *acct = get_accounting(tsk);
- stime = vtime_delta(tsk, &stime_scaled, &steal_time);
+ stime = vtime_delta(acct, &stime_scaled, &steal_time);
acct->idle_time += stime + steal_time;
}
+static void vtime_account_irq_field(struct cpu_accounting_data *acct,
+ unsigned long *field)
+{
+ unsigned long stime, stime_scaled;
+
+ vtime_delta_kernel(acct, &stime, &stime_scaled);
+ *field += stime;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+ acct->stime_scaled += stime_scaled;
+#endif
+}
+
+void vtime_account_softirq(struct task_struct *tsk)
+{
+ struct cpu_accounting_data *acct = get_accounting(tsk);
+ vtime_account_irq_field(acct, &acct->softirq_time);
+}
+
+void vtime_account_hardirq(struct task_struct *tsk)
+{
+ struct cpu_accounting_data *acct = get_accounting(tsk);
+ vtime_account_irq_field(acct, &acct->hardirq_time);
+}
+
static void vtime_flush_scaled(struct task_struct *tsk,
struct cpu_accounting_data *acct)
{
@@ -445,19 +377,9 @@ void vtime_flush(struct task_struct *tsk)
void __delay(unsigned long loops)
{
unsigned long start;
- int diff;
spin_begin();
- if (__USE_RTC()) {
- start = get_rtcl();
- do {
- /* the RTCL register wraps at 1000000000 */
- diff = get_rtcl() - start;
- if (diff < 0)
- diff += 1000000000;
- spin_cpu_relax();
- } while (diff < loops);
- } else if (tb_invalid) {
+ if (tb_invalid) {
/*
* TB is in error state and isn't ticking anymore.
* HMI handler was unable to recover from TB error.
@@ -465,8 +387,8 @@ void __delay(unsigned long loops)
*/
spin_cpu_relax();
} else {
- start = get_tbl();
- while (get_tbl() - start < loops)
+ start = mftb();
+ while (mftb() - start < loops)
spin_cpu_relax();
}
spin_end();
@@ -522,35 +444,6 @@ static inline void clear_irq_work_pending(void)
"i" (offsetof(struct paca_struct, irq_work_pending)));
}
-void arch_irq_work_raise(void)
-{
- preempt_disable();
- set_irq_work_pending_flag();
- /*
- * Non-nmi code running with interrupts disabled will replay
- * irq_happened before it re-enables interrupts, so setthe
- * decrementer there instead of causing a hardware exception
- * which would immediately hit the masked interrupt handler
- * and have the net effect of setting the decrementer in
- * irq_happened.
- *
- * NMI interrupts can not check this when they return, so the
- * decrementer hardware exception is raised, which will fire
- * when interrupts are next enabled.
- *
- * BookE does not support this yet, it must audit all NMI
- * interrupt handlers to ensure they call nmi_enter() so this
- * check would be correct.
- */
- if (IS_ENABLED(CONFIG_BOOKE) || !irqs_disabled() || in_nmi()) {
- set_dec(1);
- } else {
- hard_irq_disable();
- local_paca->irq_happened |= PACA_IRQ_DEC;
- }
- preempt_enable();
-}
-
#else /* 32-bit */
DEFINE_PER_CPU(u8, irq_work_pending);
@@ -559,95 +452,133 @@ DEFINE_PER_CPU(u8, irq_work_pending);
#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
+#endif /* 32 vs 64 bit */
+
void arch_irq_work_raise(void)
{
+ /*
+ * 64-bit code that uses irq soft-mask can just cause an immediate
+ * interrupt here that gets soft masked, if this is called under
+ * local_irq_disable(). It might be possible to prevent that happening
+ * by noticing interrupts are disabled and setting decrementer pending
+ * to be replayed when irqs are enabled. The problem there is that
+ * tracing can call irq_work_raise, including in code that does low
+ * level manipulations of irq soft-mask state (e.g., trace_hardirqs_on)
+ * which could get tangled up if we're messing with the same state
+ * here.
+ */
preempt_disable();
set_irq_work_pending_flag();
set_dec(1);
preempt_enable();
}
-#endif /* 32 vs 64 bit */
+static void set_dec_or_work(u64 val)
+{
+ set_dec(val);
+ /* We may have raced with new irq work */
+ if (unlikely(test_irq_work_pending()))
+ set_dec(1);
+}
#else /* CONFIG_IRQ_WORK */
#define test_irq_work_pending() 0
#define clear_irq_work_pending()
+static void set_dec_or_work(u64 val)
+{
+ set_dec(val);
+}
#endif /* CONFIG_IRQ_WORK */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void timer_rearm_host_dec(u64 now)
+{
+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
+
+ WARN_ON_ONCE(!arch_irqs_disabled());
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ if (now >= *next_tb) {
+ local_paca->irq_happened |= PACA_IRQ_DEC;
+ } else {
+ now = *next_tb - now;
+ if (now > decrementer_max)
+ now = decrementer_max;
+ set_dec_or_work(now);
+ }
+}
+EXPORT_SYMBOL_GPL(timer_rearm_host_dec);
+#endif
+
/*
* timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled.
*/
-void timer_interrupt(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
{
struct clock_event_device *evt = this_cpu_ptr(&decrementers);
u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
struct pt_regs *old_regs;
u64 now;
- /* Some implementations of hotplug will get timer interrupts while
- * offline, just ignore these and we also need to set
- * decrementers_next_tb as MAX to make sure __check_irq_replay
- * don't replay timer interrupt when return, otherwise we'll trap
- * here infinitely :(
+ /*
+ * Some implementations of hotplug will get timer interrupts while
+ * offline, just ignore these.
*/
if (unlikely(!cpu_online(smp_processor_id()))) {
- *next_tb = ~(u64)0;
set_dec(decrementer_max);
return;
}
- /* Ensure a positive value is written to the decrementer, or else
- * some CPUs will continue to take decrementer exceptions. When the
- * PPC_WATCHDOG (decrementer based) is configured, keep this at most
- * 31 bits, which is about 4 seconds on most systems, which gives
- * the watchdog a chance of catching timer interrupt hard lockups.
- */
- if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
- set_dec(0x7fffffff);
- else
- set_dec(decrementer_max);
-
- /* Conditionally hard-enable interrupts now that the DEC has been
- * bumped to its maximum value
- */
- may_hard_irq_enable();
+ /* Conditionally hard-enable interrupts. */
+ if (should_hard_irq_enable()) {
+ /*
+ * Ensure a positive value is written to the decrementer, or
+ * else some CPUs will continue to take decrementer exceptions.
+ * When the PPC_WATCHDOG (decrementer based) is configured,
+ * keep this at most 31 bits, which is about 4 seconds on most
+ * systems, which gives the watchdog a chance of catching timer
+ * interrupt hard lockups.
+ */
+ if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
+ set_dec(0x7fffffff);
+ else
+ set_dec(decrementer_max);
+ do_hard_irq_enable();
+ }
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
- do_IRQ(regs);
+ __do_IRQ(regs);
#endif
old_regs = set_irq_regs(regs);
- irq_enter();
+
trace_timer_interrupt_entry(regs);
if (test_irq_work_pending()) {
clear_irq_work_pending();
+ mce_run_irq_context_handlers();
irq_work_run();
}
- now = get_tb_or_rtc();
+ now = get_tb();
if (now >= *next_tb) {
- *next_tb = ~(u64)0;
- if (evt->event_handler)
- evt->event_handler(evt);
+ evt->event_handler(evt);
__this_cpu_inc(irq_stat.timer_irqs_event);
} else {
now = *next_tb - now;
- if (now <= decrementer_max)
- set_dec(now);
- /* We may have raced with new irq work */
- if (test_irq_work_pending())
- set_dec(1);
+ if (now > decrementer_max)
+ now = decrementer_max;
+ set_dec_or_work(now);
__this_cpu_inc(irq_stat.timer_irqs_others);
}
trace_timer_interrupt_exit(regs);
- irq_exit();
+
set_irq_regs(old_regs);
}
EXPORT_SYMBOL(timer_interrupt);
@@ -655,26 +586,18 @@ EXPORT_SYMBOL(timer_interrupt);
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void timer_broadcast_interrupt(void)
{
- u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
-
- *next_tb = ~(u64)0;
tick_receive_broadcast();
__this_cpu_inc(irq_stat.broadcast_irqs_event);
}
#endif
-/*
- * Hypervisor decrementer interrupts shouldn't occur but are sometimes
- * left pending on exit from a KVM guest. We don't need to do anything
- * to clear them, as they are edge-triggered.
- */
-void hdec_interrupt(struct pt_regs *regs)
-{
-}
-
#ifdef CONFIG_SUSPEND
-static void generic_suspend_disable_irqs(void)
+/* Overrides the weak version in kernel/power/main.c */
+void arch_suspend_disable_irqs(void)
{
+ if (ppc_md.suspend_disable_irqs)
+ ppc_md.suspend_disable_irqs();
+
/* Disable the decrementer, so that it doesn't interfere
* with suspending.
*/
@@ -684,23 +607,11 @@ static void generic_suspend_disable_irqs(void)
set_dec(decrementer_max);
}
-static void generic_suspend_enable_irqs(void)
-{
- local_irq_enable();
-}
-
-/* Overrides the weak version in kernel/power/main.c */
-void arch_suspend_disable_irqs(void)
-{
- if (ppc_md.suspend_disable_irqs)
- ppc_md.suspend_disable_irqs();
- generic_suspend_disable_irqs();
-}
-
/* Overrides the weak version in kernel/power/main.c */
void arch_suspend_enable_irqs(void)
{
- generic_suspend_enable_irqs();
+ local_irq_enable();
+
if (ppc_md.suspend_enable_irqs)
ppc_md.suspend_enable_irqs();
}
@@ -721,8 +632,6 @@ EXPORT_SYMBOL_GPL(tb_to_ns);
*/
notrace unsigned long long sched_clock(void)
{
- if (__USE_RTC())
- return get_rtc();
return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
}
@@ -782,7 +691,7 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
static void start_cpu_decrementer(void)
{
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
unsigned int tcr;
/* Clear any pending timer interrupts */
@@ -838,7 +747,7 @@ static void __read_persistent_clock(struct timespec64 *ts)
static int first = 1;
ts->tv_nsec = 0;
- /* XXX this is a litle fragile but will work okay in the short term */
+ /* XXX this is a little fragile but will work okay in the short term */
if (first) {
first = 0;
if (ppc_md.time_init)
@@ -872,113 +781,14 @@ void read_persistent_clock64(struct timespec64 *ts)
}
/* clocksource code */
-static notrace u64 rtc_read(struct clocksource *cs)
-{
- return (u64)get_rtc();
-}
-
static notrace u64 timebase_read(struct clocksource *cs)
{
return (u64)get_tb();
}
-
-void update_vsyscall(struct timekeeper *tk)
-{
- struct timespec64 xt;
- struct clocksource *clock = tk->tkr_mono.clock;
- u32 mult = tk->tkr_mono.mult;
- u32 shift = tk->tkr_mono.shift;
- u64 cycle_last = tk->tkr_mono.cycle_last;
- u64 new_tb_to_xs, new_stamp_xsec;
- u64 frac_sec;
-
- if (clock != &clocksource_timebase)
- return;
-
- xt.tv_sec = tk->xtime_sec;
- xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
-
- /* Make userspace gettimeofday spin until we're done. */
- ++vdso_data->tb_update_count;
- smp_mb();
-
- /*
- * This computes ((2^20 / 1e9) * mult) >> shift as a
- * 0.64 fixed-point fraction.
- * The computation in the else clause below won't overflow
- * (as long as the timebase frequency is >= 1.049 MHz)
- * but loses precision because we lose the low bits of the constant
- * in the shift. Note that 19342813113834067 ~= 2^(20+64) / 1e9.
- * For a shift of 24 the error is about 0.5e-9, or about 0.5ns
- * over a second. (Shift values are usually 22, 23 or 24.)
- * For high frequency clocks such as the 512MHz timebase clock
- * on POWER[6789], the mult value is small (e.g. 32768000)
- * and so we can shift the constant by 16 initially
- * (295147905179 ~= 2^(20+64-16) / 1e9) and then do the
- * remaining shifts after the multiplication, which gives a
- * more accurate result (e.g. with mult = 32768000, shift = 24,
- * the error is only about 1.2e-12, or 0.7ns over 10 minutes).
- */
- if (mult <= 62500000 && clock->shift >= 16)
- new_tb_to_xs = ((u64) mult * 295147905179ULL) >> (clock->shift - 16);
- else
- new_tb_to_xs = (u64) mult * (19342813113834067ULL >> clock->shift);
-
- /*
- * Compute the fractional second in units of 2^-32 seconds.
- * The fractional second is tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift
- * in nanoseconds, so multiplying that by 2^32 / 1e9 gives
- * it in units of 2^-32 seconds.
- * We assume shift <= 32 because clocks_calc_mult_shift()
- * generates shift values in the range 0 - 32.
- */
- frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift);
- do_div(frac_sec, NSEC_PER_SEC);
-
- /*
- * Work out new stamp_xsec value for any legacy users of systemcfg.
- * stamp_xsec is in units of 2^-20 seconds.
- */
- new_stamp_xsec = frac_sec >> 12;
- new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC;
-
- /*
- * tb_update_count is used to allow the userspace gettimeofday code
- * to assure itself that it sees a consistent view of the tb_to_xs and
- * stamp_xsec variables. It reads the tb_update_count, then reads
- * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
- * the two values of tb_update_count match and are even then the
- * tb_to_xs and stamp_xsec values are consistent. If not, then it
- * loops back and reads them again until this criteria is met.
- */
- vdso_data->tb_orig_stamp = cycle_last;
- vdso_data->stamp_xsec = new_stamp_xsec;
- vdso_data->tb_to_xs = new_tb_to_xs;
- vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
- vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
- vdso_data->stamp_xtime_sec = xt.tv_sec;
- vdso_data->stamp_xtime_nsec = xt.tv_nsec;
- vdso_data->stamp_sec_fraction = frac_sec;
- vdso_data->hrtimer_res = hrtimer_resolution;
- smp_wmb();
- ++(vdso_data->tb_update_count);
-}
-
-void update_vsyscall_tz(void)
-{
- vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
- vdso_data->tz_dsttime = sys_tz.tz_dsttime;
-}
-
static void __init clocksource_init(void)
{
- struct clocksource *clock;
-
- if (__USE_RTC())
- clock = &clocksource_rtc;
- else
- clock = &clocksource_timebase;
+ struct clocksource *clock = &clocksource_timebase;
if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
printk(KERN_ERR "clocksource: %s is already registered\n",
@@ -993,19 +803,17 @@ static void __init clocksource_init(void)
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
- __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
- set_dec(evt);
-
- /* We may have raced with new irq work */
- if (test_irq_work_pending())
- set_dec(1);
+ __this_cpu_write(decrementers_next_tb, get_tb() + evt);
+ set_dec_or_work(evt);
return 0;
}
static int decrementer_shutdown(struct clock_event_device *dev)
{
- decrementer_set_next_event(decrementer_max, dev);
+ __this_cpu_write(decrementers_next_tb, DEC_CLOCKEVENT_STOPPED);
+ set_dec_or_work(decrementer_max);
+
return 0;
}
@@ -1084,7 +892,7 @@ void secondary_cpu_time_init(void)
*/
start_cpu_decrementer();
- /* FIME: Should make unrelatred change to move snapshot_timebase
+ /* FIME: Should make unrelated change to move snapshot_timebase
* call here ! */
register_decrementer_clockevent(smp_processor_id());
}
@@ -1096,17 +904,12 @@ void __init time_init(void)
u64 scale;
unsigned shift;
- if (__USE_RTC()) {
- /* 601 processor: dec counts down by 128 every 128ns */
- ppc_tb_freq = 1000000000;
- } else {
- /* Normal PowerPC with timebase register */
- ppc_md.calibrate_decr();
- printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
- ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
- printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
- ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
- }
+ /* Normal PowerPC with timebase register */
+ ppc_md.calibrate_decr();
+ printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
+ ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
+ printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
+ ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
tb_ticks_per_jiffy = ppc_tb_freq / HZ;
tb_ticks_per_sec = ppc_tb_freq;
@@ -1132,7 +935,7 @@ void __init time_init(void)
tb_to_ns_scale = scale;
tb_to_ns_shift = shift;
/* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
- boot_tb = get_tb_or_rtc();
+ boot_tb = get_tb();
/* If platform provided a timezone (pmac), we correct the time */
if (timezone_offset) {
@@ -1140,7 +943,6 @@ void __init time_init(void)
sys_tz.tz_dsttime = 0;
}
- vdso_data->tb_update_count = 0;
vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
/* initialise and enable the large decrementer (if we have one) */
@@ -1158,9 +960,8 @@ void __init time_init(void)
init_decrementer_clockevent();
tick_setup_hrtimer_broadcast();
-#ifdef CONFIG_COMMON_CLK
of_clk_init(NULL);
-#endif
+ enable_sched_clock_irqtime();
}
/*
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 6ba0fdd1e7f8..5a0f023a26e9 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -122,6 +122,13 @@ _GLOBAL(tm_reclaim)
std r3, STK_PARAM(R3)(r1)
SAVE_NVGPRS(r1)
+ /*
+ * Save kernel live AMR since it will be clobbered by treclaim
+ * but can be used elsewhere later in kernel space.
+ */
+ mfspr r3, SPRN_AMR
+ std r3, TM_FRAME_L1(r1)
+
/* We need to setup MSR for VSX register save instructions. */
mfmsr r14
mr r15, r14
@@ -219,11 +226,8 @@ _GLOBAL(tm_reclaim)
/* Sync the userland GPRs 2-12, 14-31 to thread->regs: */
SAVE_GPR(0, r7) /* user r0 */
- SAVE_GPR(2, r7) /* user r2 */
- SAVE_4GPRS(3, r7) /* user r3-r6 */
- SAVE_GPR(8, r7) /* user r8 */
- SAVE_GPR(9, r7) /* user r9 */
- SAVE_GPR(10, r7) /* user r10 */
+ SAVE_GPRS(2, 6, r7) /* user r2-r6 */
+ SAVE_GPRS(8, 10, r7) /* user r8-r10 */
ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */
@@ -245,7 +249,7 @@ _GLOBAL(tm_reclaim)
* but is used in signal return to 'wind back' to the abort handler.
*/
- /* ******************** CR,LR,CCR,MSR ********** */
+ /* ***************** CTR, LR, CR, XER ********** */
mfctr r3
mflr r4
mfcr r5
@@ -256,7 +260,6 @@ _GLOBAL(tm_reclaim)
std r5, _CCR(r7)
std r6, _XER(r7)
-
/* ******************** TAR, DSCR ********** */
mfspr r3, SPRN_TAR
mfspr r4, SPRN_DSCR
@@ -264,6 +267,10 @@ _GLOBAL(tm_reclaim)
std r3, THREAD_TM_TAR(r12)
std r4, THREAD_TM_DSCR(r12)
+ /* ******************** AMR **************** */
+ mfspr r3, SPRN_AMR
+ std r3, THREAD_TM_AMR(r12)
+
/*
* MSR and flags: We don't change CRs, and we don't need to alter MSR.
*/
@@ -308,7 +315,9 @@ _GLOBAL(tm_reclaim)
std r3, THREAD_TM_TFHAR(r12)
std r4, THREAD_TM_TFIAR(r12)
- /* AMR is checkpointed too, but is unsupported by Linux. */
+ /* Restore kernel live AMR */
+ ld r8, TM_FRAME_L1(r1)
+ mtspr SPRN_AMR, r8
/* Restore original MSR/IRQ state & clear TM mode */
ld r14, TM_FRAME_L0(r1) /* Orig MSR */
@@ -355,6 +364,13 @@ _GLOBAL(__tm_recheckpoint)
*/
SAVE_NVGPRS(r1)
+ /*
+ * Save kernel live AMR since it will be clobbered for trechkpt
+ * but can be used elsewhere later in kernel space.
+ */
+ mfspr r8, SPRN_AMR
+ std r8, TM_FRAME_L0(r1)
+
/* Load complete register state from ts_ckpt* registers */
addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */
@@ -404,7 +420,7 @@ _GLOBAL(__tm_recheckpoint)
restore_gprs:
- /* ******************** CR,LR,CCR,MSR ********** */
+ /* ****************** CTR, LR, XER ************* */
ld r4, _CTR(r7)
ld r5, _LINK(r7)
ld r8, _XER(r7)
@@ -417,17 +433,18 @@ restore_gprs:
ld r4, THREAD_TM_TAR(r3)
mtspr SPRN_TAR, r4
+ /* ******************** AMR ******************** */
+ ld r4, THREAD_TM_AMR(r3)
+ mtspr SPRN_AMR, r4
+
/* Load up the PPR and DSCR in GPRs only at this stage */
ld r5, THREAD_TM_DSCR(r3)
ld r6, THREAD_TM_PPR(r3)
REST_GPR(0, r7) /* GPR0 */
- REST_2GPRS(2, r7) /* GPR2-3 */
- REST_GPR(4, r7) /* GPR4 */
- REST_4GPRS(8, r7) /* GPR8-11 */
- REST_2GPRS(12, r7) /* GPR12-13 */
-
- REST_NVGPRS(r7) /* GPR14-31 */
+ REST_GPRS(2, 4, r7) /* GPR2-4 */
+ REST_GPRS(8, 12, r7) /* GPR8-12 */
+ REST_GPRS(14, 31, r7) /* GPR14-31 */
/* Load up PPR and DSCR here so we don't run with user values for long */
mtspr SPRN_DSCR, r5
@@ -463,18 +480,24 @@ restore_gprs:
REST_GPR(6, r7)
/*
- * Store r1 and r5 on the stack so that we can access them after we
- * clear MSR RI.
+ * Store user r1 and r5 and r13 on the stack (in the unused save
+ * areas / compiler reserved areas), so that we can access them after
+ * we clear MSR RI.
*/
REST_GPR(5, r7)
std r5, -8(r1)
- ld r5, GPR1(r7)
+ ld r5, GPR13(r7)
std r5, -16(r1)
+ ld r5, GPR1(r7)
+ std r5, -24(r1)
REST_GPR(7, r7)
- /* Clear MSR RI since we are about to use SCRATCH0. EE is already off */
+ /* Stash the stack pointer away for use after recheckpoint */
+ std r1, PACAR1(r13)
+
+ /* Clear MSR RI since we are about to clobber r13. EE is already off */
li r5, 0
mtmsrd r5, 1
@@ -485,9 +508,9 @@ restore_gprs:
* until we turn MSR RI back on.
*/
- SET_SCRATCH0(r1)
ld r5, -8(r1)
- ld r1, -16(r1)
+ ld r13, -16(r1)
+ ld r1, -24(r1)
/* Commit register state as checkpointed state: */
TRECHKPT
@@ -503,12 +526,16 @@ restore_gprs:
*/
GET_PACA(r13)
- GET_SCRATCH0(r1)
+ ld r1, PACAR1(r13)
- /* R1 is restored, so we are recoverable again. EE is still off */
+ /* R13, R1 is restored, so we are recoverable again. EE is still off */
li r4, MSR_RI
mtmsrd r4, 1
+ /* Restore kernel live AMR */
+ ld r8, TM_FRAME_L0(r1)
+ mtspr SPRN_AMR, r8
+
REST_NVGPRS(r1)
addi r1, r1, TM_FRAME_SIZE
diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile
index 858503775c58..af8527538fe4 100644
--- a/arch/powerpc/kernel/trace/Makefile
+++ b/arch/powerpc/kernel/trace/Makefile
@@ -8,16 +8,13 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
endif
-obj32-$(CONFIG_FUNCTION_TRACER) += ftrace_32.o
-obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64.o
+obj32-$(CONFIG_FUNCTION_TRACER) += ftrace_mprofile.o
ifdef CONFIG_MPROFILE_KERNEL
-obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_mprofile.o
+obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_mprofile.o
else
obj64-$(CONFIG_FUNCTION_TRACER) += ftrace_64_pg.o
endif
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace_low.o ftrace.o
obj-$(CONFIG_TRACING) += trace_clock.o
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 7ea0ca044b65..7b85c3b460a3 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -22,14 +22,11 @@
#include <linux/init.h>
#include <linux/list.h>
-#include <asm/asm-prototypes.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
#include <asm/ftrace.h>
#include <asm/syscall.h>
-
-
-#ifdef CONFIG_DYNAMIC_FTRACE
+#include <asm/inst.h>
/*
* We generally only have a single long_branch tramp and at most 2 or 3 plt
@@ -40,23 +37,23 @@
#define NUM_FTRACE_TRAMPS 8
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
-static unsigned int
+static ppc_inst_t
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
- unsigned int op;
+ ppc_inst_t op;
addr = ppc_function_entry((void *)addr);
/* if (link) set op to 'bl' else 'b' */
- op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
+ create_branch(&op, (u32 *)ip, addr, link ? BRANCH_SET_LINK : 0);
return op;
}
-static int
-ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
+static inline int
+ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
{
- unsigned int replaced;
+ ppc_inst_t replaced;
/*
* Note:
@@ -67,21 +64,18 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
*/
/* read the text we want to modify */
- if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
+ if (copy_inst_from_kernel_nofault(&replaced, (void *)ip))
return -EFAULT;
/* Make sure it is what we expect it to be */
- if (replaced != old) {
- pr_err("%p: replaced (%#x) != old (%#x)",
- (void *)ip, replaced, old);
+ if (!ppc_inst_equal(replaced, old)) {
+ pr_err("%p: replaced (%08lx) != old (%08lx)", (void *)ip,
+ ppc_inst_as_ulong(replaced), ppc_inst_as_ulong(old));
return -EINVAL;
}
/* replace the text with the new text */
- if (patch_instruction((unsigned int *)ip, new))
- return -EPERM;
-
- return 0;
+ return patch_instruction((u32 *)ip, new);
}
/*
@@ -91,25 +85,24 @@ static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
addr = ppc_function_entry((void *)addr);
- /* use the create_branch to verify that this offset can be branched */
- return create_branch((unsigned int *)ip, addr, 0);
+ return is_offset_in_branch_range(addr - ip);
}
-static int is_bl_op(unsigned int op)
+static int is_bl_op(ppc_inst_t op)
{
- return (op & 0xfc000003) == 0x48000001;
+ return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BL(0);
}
-static int is_b_op(unsigned int op)
+static int is_b_op(ppc_inst_t op)
{
- return (op & 0xfc000003) == 0x48000000;
+ return (ppc_inst_val(op) & ~PPC_LI_MASK) == PPC_RAW_BRANCH(0);
}
-static unsigned long find_bl_target(unsigned long ip, unsigned int op)
+static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
{
int offset;
- offset = (op & 0x03fffffc);
+ offset = PPC_LI(ppc_inst_val(op));
/* make it signed */
if (offset & 0x02000000)
offset |= 0xfe000000;
@@ -118,24 +111,23 @@ static unsigned long find_bl_target(unsigned long ip, unsigned int op)
}
#ifdef CONFIG_MODULES
-#ifdef CONFIG_PPC64
static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long entry, ptr, tramp;
unsigned long ip = rec->ip;
- unsigned int op, pop;
+ ppc_inst_t op, pop;
/* read where this goes */
- if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
- /* Make sure that that this is still a 24bit jump */
+ /* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %x\n", op);
+ pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
@@ -158,23 +150,40 @@ __ftrace_make_nop(struct module *mod,
return -EINVAL;
}
-#ifdef CONFIG_MPROFILE_KERNEL
- /* When using -mkernel_profile there is no load to jump over */
- pop = PPC_INST_NOP;
+ if (IS_ENABLED(CONFIG_MPROFILE_KERNEL)) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)(ip - 4))) {
+ pr_err("Fetching instruction at %lx failed.\n", ip - 4);
+ return -EFAULT;
+ }
- if (probe_kernel_read(&op, (void *)(ip - 4), 4)) {
- pr_err("Fetching instruction at %lx failed.\n", ip - 4);
- return -EFAULT;
- }
+ /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_MFLR(_R0))) &&
+ !ppc_inst_equal(op, ppc_inst(PPC_INST_STD_LR))) {
+ pr_err("Unexpected instruction %08lx around bl _mcount\n",
+ ppc_inst_as_ulong(op));
+ return -EINVAL;
+ }
+ } else if (IS_ENABLED(CONFIG_PPC64)) {
+ /*
+ * Check what is in the next instruction. We can see ld r2,40(r1), but
+ * on first pass after boot we will see mflr r0.
+ */
+ if (copy_inst_from_kernel_nofault(&op, (void *)(ip + 4))) {
+ pr_err("Fetching op failed.\n");
+ return -EFAULT;
+ }
- /* We expect either a mflr r0, or a std r0, LRSAVE(r1) */
- if (op != PPC_INST_MFLR && op != PPC_INST_STD_LR) {
- pr_err("Unexpected instruction %08x around bl _mcount\n", op);
- return -EINVAL;
+ if (!ppc_inst_equal(op, ppc_inst(PPC_INST_LD_TOC))) {
+ pr_err("Expected %08lx found %08lx\n", PPC_INST_LD_TOC,
+ ppc_inst_as_ulong(op));
+ return -EINVAL;
+ }
}
-#else
+
/*
- * Our original call site looks like:
+ * When using -mprofile-kernel or PPC32 there is no load to jump over.
+ *
+ * Otherwise our original call site looks like:
*
* bl <tramp>
* ld r2,XX(r1)
@@ -186,102 +195,23 @@ __ftrace_make_nop(struct module *mod,
*
* Use a b +8 to jump over the load.
*/
+ if (IS_ENABLED(CONFIG_MPROFILE_KERNEL) || IS_ENABLED(CONFIG_PPC32))
+ pop = ppc_inst(PPC_RAW_NOP());
+ else
+ pop = ppc_inst(PPC_RAW_BRANCH(8)); /* b +8 */
- pop = PPC_INST_BRANCH | 8; /* b +8 */
-
- /*
- * Check what is in the next instruction. We can see ld r2,40(r1), but
- * on first pass after boot we will see mflr r0.
- */
- if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
- pr_err("Fetching op failed.\n");
- return -EFAULT;
- }
-
- if (op != PPC_INST_LD_TOC) {
- pr_err("Expected %08x found %08x\n", PPC_INST_LD_TOC, op);
- return -EINVAL;
- }
-#endif /* CONFIG_MPROFILE_KERNEL */
-
- if (patch_instruction((unsigned int *)ip, pop)) {
+ if (patch_instruction((u32 *)ip, pop)) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
return 0;
}
-
-#else /* !PPC64 */
-static int
-__ftrace_make_nop(struct module *mod,
- struct dyn_ftrace *rec, unsigned long addr)
+#else
+static int __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int op;
- unsigned int jmp[4];
- unsigned long ip = rec->ip;
- unsigned long tramp;
-
- if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
-
- /* Make sure that that this is still a 24bit jump */
- if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %x\n", op);
- return -EINVAL;
- }
-
- /* lets find where the pointer goes */
- tramp = find_bl_target(ip, op);
-
- /*
- * On PPC32 the trampoline looks like:
- * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
- * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
- * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
- * 0x4e, 0x80, 0x04, 0x20 bctr
- */
-
- pr_devel("ip:%lx jumps to %lx", ip, tramp);
-
- /* Find where the trampoline jumps to */
- if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
- pr_err("Failed to read %lx\n", tramp);
- return -EFAULT;
- }
-
- pr_devel(" %08x %08x ", jmp[0], jmp[1]);
-
- /* verify that this is what we expect it to be */
- if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
- ((jmp[1] & 0xffff0000) != 0x398c0000) ||
- (jmp[2] != 0x7d8903a6) ||
- (jmp[3] != 0x4e800420)) {
- pr_err("Not a trampoline\n");
- return -EINVAL;
- }
-
- tramp = (jmp[1] & 0xffff) |
- ((jmp[0] & 0xffff) << 16);
- if (tramp & 0x8000)
- tramp -= 0x10000;
-
- pr_devel(" %lx ", tramp);
-
- if (tramp != addr) {
- pr_err("Trampoline location %08lx does not match addr\n",
- tramp);
- return -EINVAL;
- }
-
- op = PPC_INST_NOP;
-
- if (patch_instruction((unsigned int *)ip, op))
- return -EPERM;
-
return 0;
}
-#endif /* PPC64 */
#endif /* CONFIG_MODULES */
static unsigned long find_ftrace_tramp(unsigned long ip)
@@ -295,7 +225,7 @@ static unsigned long find_ftrace_tramp(unsigned long ip)
for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
if (!ftrace_tramps[i])
continue;
- else if (create_branch((void *)ip, ftrace_tramps[i], 0))
+ else if (is_offset_in_branch_range(ftrace_tramps[i] - ip))
return ftrace_tramps[i];
return 0;
@@ -322,26 +252,17 @@ static int add_ftrace_tramp(unsigned long tramp)
*/
static int setup_mcount_compiler_tramp(unsigned long tramp)
{
- int i, op;
+ int i;
+ ppc_inst_t op;
unsigned long ptr;
- static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
/* Is this a known long jump tramp? */
for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
- if (!ftrace_tramps[i])
- break;
- else if (ftrace_tramps[i] == tramp)
+ if (ftrace_tramps[i] == tramp)
return 0;
- /* Is this a known plt tramp? */
- for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
- if (!ftrace_plt_tramps[i])
- break;
- else if (ftrace_plt_tramps[i] == tramp)
- return -1;
-
/* New trampoline -- read where this goes */
- if (probe_kernel_read(&op, (void *)tramp, sizeof(int))) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)tramp)) {
pr_debug("Fetching opcode failed.\n");
return -1;
}
@@ -361,18 +282,12 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
}
/* Let's re-write the tramp to go to ftrace_[regs_]caller */
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
-#else
- ptr = ppc_global_function_entry((void *)ftrace_caller);
-#endif
- if (!create_branch((void *)tramp, ptr, 0)) {
- pr_debug("%ps is not reachable from existing mcount tramp\n",
- (void *)ptr);
- return -1;
- }
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
+ else
+ ptr = ppc_global_function_entry((void *)ftrace_caller);
- if (patch_branch((unsigned int *)tramp, ptr, 0)) {
+ if (patch_branch((u32 *)tramp, ptr, 0)) {
pr_debug("REL24 out of range!\n");
return -1;
}
@@ -388,17 +303,17 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long tramp, ip = rec->ip;
- unsigned int op;
+ ppc_inst_t op;
/* Read where this goes */
- if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
- /* Make sure that that this is still a 24bit jump */
+ /* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %x\n", op);
+ pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
@@ -416,7 +331,7 @@ static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
}
}
- if (patch_instruction((unsigned int *)ip, PPC_INST_NOP)) {
+ if (patch_instruction((u32 *)ip, ppc_inst(PPC_RAW_NOP()))) {
pr_err("Patching NOP failed.\n");
return -EPERM;
}
@@ -428,7 +343,7 @@ int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
- unsigned int old, new;
+ ppc_inst_t old, new;
/*
* If the calling address is more that 24 bits away,
@@ -438,12 +353,14 @@ int ftrace_make_nop(struct module *mod,
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_call_replace(ip, addr, 1);
- new = PPC_INST_NOP;
+ new = ppc_inst(PPC_RAW_NOP());
return ftrace_modify_code(ip, old, new);
- } else if (core_kernel_text(ip))
+ } else if (core_kernel_text(ip)) {
return __ftrace_make_nop_kernel(rec, addr);
+ } else if (!IS_ENABLED(CONFIG_MODULES)) {
+ return -EINVAL;
+ }
-#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
* We should either already have a pointer to the module
@@ -466,80 +383,55 @@ int ftrace_make_nop(struct module *mod,
mod = rec->arch.mod;
return __ftrace_make_nop(mod, rec, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
}
#ifdef CONFIG_MODULES
-#ifdef CONFIG_PPC64
/*
* Examine the existing instructions for __ftrace_make_call.
* They should effectively be a NOP, and follow formal constraints,
* depending on the ABI. Return false if they don't.
*/
-#ifndef CONFIG_MPROFILE_KERNEL
-static int
-expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
-{
- /*
- * We expect to see:
- *
- * b +8
- * ld r2,XX(r1)
- *
- * The load offset is different depending on the ABI. For simplicity
- * just mask it out when doing the compare.
- */
- if ((op0 != 0x48000008) || ((op1 & 0xffff0000) != 0xe8410000))
- return 0;
- return 1;
-}
-#else
-static int
-expected_nop_sequence(void *ip, unsigned int op0, unsigned int op1)
+static bool expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
{
- /* look for patched "NOP" on ppc64 with -mprofile-kernel */
- if (op0 != PPC_INST_NOP)
- return 0;
- return 1;
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ return ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP()));
+ else
+ return ppc_inst_equal(op0, ppc_inst(PPC_RAW_BRANCH(8))) &&
+ ppc_inst_equal(op1, ppc_inst(PPC_INST_LD_TOC));
}
-#endif
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int op[2];
+ ppc_inst_t op[2];
void *ip = (void *)rec->ip;
unsigned long entry, ptr, tramp;
struct module *mod = rec->arch.mod;
/* read where this goes */
- if (probe_kernel_read(op, ip, sizeof(op)))
+ if (copy_inst_from_kernel_nofault(op, ip))
+ return -EFAULT;
+
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) &&
+ copy_inst_from_kernel_nofault(op + 1, ip + 4))
return -EFAULT;
if (!expected_nop_sequence(ip, op[0], op[1])) {
- pr_err("Unexpected call sequence at %p: %x %x\n",
- ip, op[0], op[1]);
+ pr_err("Unexpected call sequence at %p: %08lx %08lx\n", ip,
+ ppc_inst_as_ulong(op[0]), ppc_inst_as_ulong(op[1]));
return -EINVAL;
}
/* If we never set up ftrace trampoline(s), then bail */
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- if (!mod->arch.tramp || !mod->arch.tramp_regs) {
-#else
- if (!mod->arch.tramp) {
-#endif
+ if (!mod->arch.tramp ||
+ (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !mod->arch.tramp_regs)) {
pr_err("No ftrace trampoline\n");
return -EINVAL;
}
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- if (rec->flags & FTRACE_FL_REGS)
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && rec->flags & FTRACE_FL_REGS)
tramp = mod->arch.tramp_regs;
else
-#endif
tramp = mod->arch.tramp;
if (module_trampoline_target(mod, tramp, &ptr)) {
@@ -556,12 +448,6 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return -EINVAL;
}
- /* Ensure branch is within 24 bits */
- if (!create_branch(ip, tramp, BRANCH_SET_LINK)) {
- pr_err("Branch out of range\n");
- return -EINVAL;
- }
-
if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
@@ -569,51 +455,16 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return 0;
}
-
-#else /* !CONFIG_PPC64: */
-static int
-__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+#else
+static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int op;
- unsigned long ip = rec->ip;
-
- /* read where this goes */
- if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
-
- /* It should be pointing to a nop */
- if (op != PPC_INST_NOP) {
- pr_err("Expected NOP but have %x\n", op);
- return -EINVAL;
- }
-
- /* If we never set up a trampoline to ftrace_caller, then bail */
- if (!rec->arch.mod->arch.tramp) {
- pr_err("No ftrace trampoline\n");
- return -EINVAL;
- }
-
- /* create the branch to the trampoline */
- op = create_branch((unsigned int *)ip,
- rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
- if (!op) {
- pr_err("REL24 out of range!\n");
- return -EINVAL;
- }
-
- pr_devel("write to %lx\n", rec->ip);
-
- if (patch_instruction((unsigned int *)ip, op))
- return -EPERM;
-
return 0;
}
-#endif /* CONFIG_PPC64 */
#endif /* CONFIG_MODULES */
static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
{
- unsigned int op;
+ ppc_inst_t op;
void *ip = (void *)rec->ip;
unsigned long tramp, entry, ptr;
@@ -621,26 +472,23 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
entry = ppc_global_function_entry((void *)ftrace_caller);
ptr = ppc_global_function_entry((void *)addr);
- if (ptr != entry) {
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ if (ptr != entry && IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
entry = ppc_global_function_entry((void *)ftrace_regs_caller);
- if (ptr != entry) {
-#endif
- pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
- return -EINVAL;
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- }
-#endif
+
+ if (ptr != entry) {
+ pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
+ return -EINVAL;
}
/* Make sure we have a nop */
- if (probe_kernel_read(&op, ip, sizeof(op))) {
+ if (copy_inst_from_kernel_nofault(&op, ip)) {
pr_err("Unable to read ftrace location %p\n", ip);
return -EFAULT;
}
- if (op != PPC_INST_NOP) {
- pr_err("Unexpected call sequence at %p: %x\n", ip, op);
+ if (!ppc_inst_equal(op, ppc_inst(PPC_RAW_NOP()))) {
+ pr_err("Unexpected call sequence at %p: %08lx\n",
+ ip, ppc_inst_as_ulong(op));
return -EINVAL;
}
@@ -661,7 +509,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
- unsigned int old, new;
+ ppc_inst_t old, new;
/*
* If the calling address is more that 24 bits away,
@@ -670,13 +518,16 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
- old = PPC_INST_NOP;
+ old = ppc_inst(PPC_RAW_NOP());
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
- } else if (core_kernel_text(ip))
+ } else if (core_kernel_text(ip)) {
return __ftrace_make_call_kernel(rec, addr);
+ } else if (!IS_ENABLED(CONFIG_MODULES)) {
+ /* We should not get here without modules */
+ return -EINVAL;
+ }
-#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
* Being that we are converting from nop, it had better
@@ -688,10 +539,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
return __ftrace_make_call(rec, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
}
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
@@ -700,7 +547,7 @@ static int
__ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
- unsigned int op;
+ ppc_inst_t op;
unsigned long ip = rec->ip;
unsigned long entry, ptr, tramp;
struct module *mod = rec->arch.mod;
@@ -712,14 +559,14 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
/* read where this goes */
- if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
+ if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
pr_err("Fetching opcode failed.\n");
return -EFAULT;
}
- /* Make sure that that this is still a 24bit jump */
+ /* Make sure that this is still a 24bit jump */
if (!is_bl_op(op)) {
- pr_err("Not expected bl: opcode is %x\n", op);
+ pr_err("Not expected bl: opcode is %08lx\n", ppc_inst_as_ulong(op));
return -EINVAL;
}
@@ -748,7 +595,7 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
/* The new target may be within range */
if (test_24bit_addr(ip, addr)) {
/* within range */
- if (patch_branch((unsigned int *)ip, addr, BRANCH_SET_LINK)) {
+ if (patch_branch((u32 *)ip, addr, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
@@ -775,26 +622,25 @@ __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return -EINVAL;
}
- /* Ensure branch is within 24 bits */
- if (!create_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) {
- pr_err("Branch out of range\n");
- return -EINVAL;
- }
-
- if (patch_branch((unsigned int *)ip, tramp, BRANCH_SET_LINK)) {
+ if (patch_branch((u32 *)ip, tramp, BRANCH_SET_LINK)) {
pr_err("REL24 out of range!\n");
return -EINVAL;
}
return 0;
}
+#else
+static int __ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
+{
+ return 0;
+}
#endif
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned long ip = rec->ip;
- unsigned int old, new;
+ ppc_inst_t old, new;
/*
* If the calling address is more that 24 bits away,
@@ -812,9 +658,11 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
* variant, so there is nothing to do here
*/
return 0;
+ } else if (!IS_ENABLED(CONFIG_MODULES)) {
+ /* We should not get here without modules */
+ return -EINVAL;
}
-#ifdef CONFIG_MODULES
/*
* Out of range jumps are called from modules.
*/
@@ -824,32 +672,26 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
}
return __ftrace_modify_call(rec, old_addr, addr);
-#else
- /* We should not get here without modules */
- return -EINVAL;
-#endif /* CONFIG_MODULES */
}
#endif
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
- unsigned int old, new;
+ ppc_inst_t old, new;
int ret;
- old = *(unsigned int *)&ftrace_call;
+ old = ppc_inst_read((u32 *)&ftrace_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/* Also update the regs callback function */
- if (!ret) {
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && !ret) {
ip = (unsigned long)(&ftrace_regs_call);
- old = *(unsigned int *)&ftrace_regs_call;
+ old = ppc_inst_read((u32 *)&ftrace_regs_call);
new = ftrace_call_replace(ip, (unsigned long)func, 1);
ret = ftrace_modify_code(ip, old, new);
}
-#endif
return ret;
}
@@ -868,25 +710,39 @@ void arch_ftrace_update_code(int command)
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
+void ftrace_free_init_tramp(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_FTRACE_TRAMPS && ftrace_tramps[i]; i++)
+ if (ftrace_tramps[i] == (unsigned long)ftrace_tramp_init) {
+ ftrace_tramps[i] = 0;
+ return;
+ }
+}
+
int __init ftrace_dyn_arch_init(void)
{
int i;
unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
u32 stub_insns[] = {
- 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */
- 0x3d8c0000, /* addis r12,r12,<high> */
- 0x398c0000, /* addi r12,r12,<low> */
- 0x7d8903a6, /* mtctr r12 */
- 0x4e800420, /* bctr */
+ PPC_RAW_LD(_R12, _R13, PACATOC),
+ PPC_RAW_ADDIS(_R12, _R12, 0),
+ PPC_RAW_ADDI(_R12, _R12, 0),
+ PPC_RAW_MTCTR(_R12),
+ PPC_RAW_BCTR()
};
-#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller);
-#else
- unsigned long addr = ppc_global_function_entry((void *)ftrace_caller);
-#endif
- long reladdr = addr - kernel_toc_addr();
+ unsigned long addr;
+ long reladdr;
+
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ addr = ppc_global_function_entry((void *)ftrace_regs_caller);
+ else
+ addr = ppc_global_function_entry((void *)ftrace_caller);
- if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+ reladdr = addr - kernel_toc_addr();
+
+ if (reladdr >= SZ_2G || reladdr < -(long)SZ_2G) {
pr_err("Address of %ps out of range of kernel_toc.\n",
(void *)addr);
return -1;
@@ -901,53 +757,48 @@ int __init ftrace_dyn_arch_init(void)
return 0;
}
-#else
-int __init ftrace_dyn_arch_init(void)
-{
- return 0;
-}
#endif
-#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_call(void);
extern void ftrace_graph_stub(void);
-int ftrace_enable_ftrace_graph_caller(void)
+static int ftrace_modify_ftrace_graph_caller(bool enable)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
unsigned long addr = (unsigned long)(&ftrace_graph_caller);
unsigned long stub = (unsigned long)(&ftrace_graph_stub);
- unsigned int old, new;
+ ppc_inst_t old, new;
+
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS))
+ return 0;
- old = ftrace_call_replace(ip, stub, 0);
- new = ftrace_call_replace(ip, addr, 0);
+ old = ftrace_call_replace(ip, enable ? stub : addr, 0);
+ new = ftrace_call_replace(ip, enable ? addr : stub, 0);
return ftrace_modify_code(ip, old, new);
}
-int ftrace_disable_ftrace_graph_caller(void)
+int ftrace_enable_ftrace_graph_caller(void)
{
- unsigned long ip = (unsigned long)(&ftrace_graph_call);
- unsigned long addr = (unsigned long)(&ftrace_graph_caller);
- unsigned long stub = (unsigned long)(&ftrace_graph_stub);
- unsigned int old, new;
-
- old = ftrace_call_replace(ip, addr, 0);
- new = ftrace_call_replace(ip, stub, 0);
+ return ftrace_modify_ftrace_graph_caller(true);
+}
- return ftrace_modify_code(ip, old, new);
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_ftrace_graph_caller(false);
}
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info. Return the address we want to divert to.
*/
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
- unsigned long sp)
+static unsigned long
+__prepare_ftrace_return(unsigned long parent, unsigned long ip, unsigned long sp)
{
unsigned long return_hooker;
+ int bit;
if (unlikely(ftrace_graph_is_dead()))
goto out;
@@ -955,16 +806,36 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
goto out;
+ bit = ftrace_test_recursion_trylock(ip, parent);
+ if (bit < 0)
+ goto out;
+
return_hooker = ppc_function_entry(return_to_handler);
if (!function_graph_enter(parent, ip, 0, (unsigned long *)sp))
parent = return_hooker;
+
+ ftrace_test_recursion_unlock(bit);
out:
return parent;
}
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
+{
+ fregs->regs.link = __prepare_ftrace_return(parent_ip, ip, fregs->regs.gpr[1]);
+}
+#else
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
+ unsigned long sp)
+{
+ return __prepare_ftrace_return(parent, ip, sp);
+}
+#endif
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
char *arch_ftrace_match_adjust(char *str, const char *search)
{
if (str[0] == '.' && search[0] != '.')
@@ -972,4 +843,4 @@ char *arch_ftrace_match_adjust(char *str, const char *search)
else
return str;
}
-#endif /* PPC64_ELF_ABI_v1 */
+#endif /* CONFIG_PPC64_ELF_ABI_V1 */
diff --git a/arch/powerpc/kernel/trace/ftrace_32.S b/arch/powerpc/kernel/trace/ftrace_32.S
deleted file mode 100644
index e023ae59c429..000000000000
--- a/arch/powerpc/kernel/trace/ftrace_32.S
+++ /dev/null
@@ -1,95 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Split from entry_32.S
- */
-
-#include <linux/magic.h>
-#include <asm/reg.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/ftrace.h>
-#include <asm/export.h>
-
-_GLOBAL(mcount)
-_GLOBAL(_mcount)
- /*
- * It is required that _mcount on PPC32 must preserve the
- * link register. But we have r0 to play with. We use r0
- * to push the return address back to the caller of mcount
- * into the ctr register, restore the link register and
- * then jump back using the ctr register.
- */
- mflr r0
- mtctr r0
- lwz r0, 4(r1)
- mtlr r0
- bctr
-
-_GLOBAL(ftrace_caller)
- MCOUNT_SAVE_FRAME
- /* r3 ends up with link register */
- subi r3, r3, MCOUNT_INSN_SIZE
-.globl ftrace_call
-ftrace_call:
- bl ftrace_stub
- nop
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-.globl ftrace_graph_call
-ftrace_graph_call:
- b ftrace_graph_stub
-_GLOBAL(ftrace_graph_stub)
-#endif
- MCOUNT_RESTORE_FRAME
- /* old link register ends up in ctr reg */
- bctr
-
-EXPORT_SYMBOL(_mcount)
-
-_GLOBAL(ftrace_stub)
- blr
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-_GLOBAL(ftrace_graph_caller)
- addi r5, r1, 48
- /* load r4 with local address */
- lwz r4, 44(r1)
- subi r4, r4, MCOUNT_INSN_SIZE
-
- /* Grab the LR out of the caller stack frame */
- lwz r3,52(r1)
-
- bl prepare_ftrace_return
- nop
-
- /*
- * prepare_ftrace_return gives us the address we divert to.
- * Change the LR in the callers stack frame to this.
- */
- stw r3,52(r1)
-
- MCOUNT_RESTORE_FRAME
- /* old link register ends up in ctr reg */
- bctr
-
-_GLOBAL(return_to_handler)
- /* need to save return values */
- stwu r1, -32(r1)
- stw r3, 20(r1)
- stw r4, 16(r1)
- stw r31, 12(r1)
- mr r31, r1
-
- bl ftrace_return_to_handler
- nop
-
- /* return value has real return address */
- mtlr r3
-
- lwz r3, 20(r1)
- lwz r4, 16(r1)
- lwz r31,12(r1)
- lwz r1, 0(r1)
-
- /* Jump back to real return address */
- blr
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/trace/ftrace_64.S b/arch/powerpc/kernel/trace/ftrace_low.S
index 25e5b9e47c06..294d1e05958a 100644
--- a/arch/powerpc/kernel/trace/ftrace_64.S
+++ b/arch/powerpc/kernel/trace/ftrace_low.S
@@ -10,6 +10,7 @@
#include <asm/ppc-opcode.h>
#include <asm/export.h>
+#ifdef CONFIG_PPC64
.pushsection ".tramp.ftrace.text","aw",@progbits;
.globl ftrace_tramp_text
ftrace_tramp_text:
@@ -21,6 +22,7 @@ ftrace_tramp_text:
ftrace_tramp_init:
.space 64
.popsection
+#endif
_GLOBAL(mcount)
_GLOBAL(_mcount)
@@ -33,6 +35,7 @@ EXPORT_SYMBOL(_mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
_GLOBAL(return_to_handler)
/* need to save return values */
+#ifdef CONFIG_PPC64
std r4, -32(r1)
std r3, -24(r1)
/* save TOC */
@@ -45,7 +48,12 @@ _GLOBAL(return_to_handler)
* We might be called from a module.
* Switch to our TOC to run inside the core kernel.
*/
- ld r2, PACATOC(r13)
+ LOAD_PACA_TOC()
+#else
+ stwu r1, -16(r1)
+ stw r3, 8(r1)
+ stw r4, 12(r1)
+#endif
bl ftrace_return_to_handler
nop
@@ -53,11 +61,17 @@ _GLOBAL(return_to_handler)
/* return value has real return address */
mtlr r3
+#ifdef CONFIG_PPC64
ld r1, 0(r1)
ld r4, -32(r1)
ld r3, -24(r1)
ld r2, -16(r1)
ld r31, -8(r1)
+#else
+ lwz r3, 8(r1)
+ lwz r4, 12(r1)
+ addi r1, r1, 16
+#endif
/* Jump back to real return address */
blr
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_mprofile.S
index f9fd5f743eba..d031093bc436 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_mprofile.S
@@ -32,53 +32,69 @@
* Our job is to save the register state into a struct pt_regs (on the stack)
* and then arrange for the ftrace function to be called.
*/
-_GLOBAL(ftrace_regs_caller)
- /* Save the original return address in A's stack frame */
- std r0,LRSAVE(r1)
-
+.macro ftrace_regs_entry allregs
/* Create our stack frame + pt_regs */
- stdu r1,-SWITCH_FRAME_SIZE(r1)
+ PPC_STLU r1,-SWITCH_FRAME_SIZE(r1)
/* Save all gprs to pt_regs */
SAVE_GPR(0, r1)
- SAVE_10GPRS(2, r1)
+ SAVE_GPRS(3, 10, r1)
+#ifdef CONFIG_PPC64
+ /* Save the original return address in A's stack frame */
+ std r0, LRSAVE+SWITCH_FRAME_SIZE(r1)
/* Ok to continue? */
lbz r3, PACA_FTRACE_ENABLED(r13)
cmpdi r3, 0
beq ftrace_no_trace
+#endif
- SAVE_10GPRS(12, r1)
- SAVE_10GPRS(22, r1)
+ .if \allregs == 1
+ SAVE_GPR(2, r1)
+ SAVE_GPRS(11, 31, r1)
+ .else
+#ifdef CONFIG_LIVEPATCH_64
+ SAVE_GPR(14, r1)
+#endif
+ .endif
/* Save previous stack pointer (r1) */
addi r8, r1, SWITCH_FRAME_SIZE
- std r8, GPR1(r1)
+ PPC_STL r8, GPR1(r1)
+ .if \allregs == 1
/* Load special regs for save below */
mfmsr r8
mfctr r9
mfxer r10
mfcr r11
+ .else
+ /* Clear MSR to flag as ftrace_caller versus frace_regs_caller */
+ li r8, 0
+ .endif
/* Get the _mcount() call site out of LR */
mflr r7
/* Save it as pt_regs->nip */
- std r7, _NIP(r1)
+ PPC_STL r7, _NIP(r1)
/* Save the read LR in pt_regs->link */
- std r0, _LINK(r1)
+ PPC_STL r0, _LINK(r1)
+#ifdef CONFIG_PPC64
/* Save callee's TOC in the ABI compliant location */
- std r2, 24(r1)
- ld r2,PACATOC(r13) /* get kernel TOC in r2 */
-
- addis r3,r2,function_trace_op@toc@ha
- addi r3,r3,function_trace_op@toc@l
+ std r2, STK_GOT(r1)
+ LOAD_PACA_TOC() /* get kernel TOC in r2 */
+ LOAD_REG_ADDR(r3, function_trace_op)
ld r5,0(r3)
+#else
+ lis r3,function_trace_op@ha
+ lwz r5,function_trace_op@l(r3)
+#endif
-#ifdef CONFIG_LIVEPATCH
- mr r14,r7 /* remember old NIP */
+#ifdef CONFIG_LIVEPATCH_64
+ mr r14, r7 /* remember old NIP */
#endif
+
/* Calculate ip from nip-4 into r3 for call below */
subi r3, r7, MCOUNT_INSN_SIZE
@@ -86,130 +102,87 @@ _GLOBAL(ftrace_regs_caller)
mr r4, r0
/* Save special regs */
- std r8, _MSR(r1)
- std r9, _CTR(r1)
- std r10, _XER(r1)
- std r11, _CCR(r1)
+ PPC_STL r8, _MSR(r1)
+ .if \allregs == 1
+ PPC_STL r9, _CTR(r1)
+ PPC_STL r10, _XER(r1)
+ PPC_STL r11, _CCR(r1)
+ .endif
/* Load &pt_regs in r6 for call below */
- addi r6, r1 ,STACK_FRAME_OVERHEAD
-
- /* ftrace_call(r3, r4, r5, r6) */
-.globl ftrace_regs_call
-ftrace_regs_call:
- bl ftrace_stub
- nop
+ addi r6, r1, STACK_FRAME_OVERHEAD
+.endm
+.macro ftrace_regs_exit allregs
/* Load ctr with the possibly modified NIP */
- ld r3, _NIP(r1)
+ PPC_LL r3, _NIP(r1)
mtctr r3
-#ifdef CONFIG_LIVEPATCH
+
+#ifdef CONFIG_LIVEPATCH_64
cmpd r14, r3 /* has NIP been altered? */
#endif
/* Restore gprs */
- REST_GPR(0,r1)
- REST_10GPRS(2,r1)
- REST_10GPRS(12,r1)
- REST_10GPRS(22,r1)
+ .if \allregs == 1
+ REST_GPRS(2, 31, r1)
+ .else
+ REST_GPRS(3, 10, r1)
+#ifdef CONFIG_LIVEPATCH_64
+ REST_GPR(14, r1)
+#endif
+ .endif
/* Restore possibly modified LR */
- ld r0, _LINK(r1)
+ PPC_LL r0, _LINK(r1)
mtlr r0
+#ifdef CONFIG_PPC64
/* Restore callee's TOC */
- ld r2, 24(r1)
+ ld r2, STK_GOT(r1)
+#endif
/* Pop our stack frame */
addi r1, r1, SWITCH_FRAME_SIZE
-#ifdef CONFIG_LIVEPATCH
+#ifdef CONFIG_LIVEPATCH_64
/* Based on the cmpd above, if the NIP was altered handle livepatch */
bne- livepatch_handler
#endif
-
-ftrace_caller_common:
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-.globl ftrace_graph_call
-ftrace_graph_call:
- b ftrace_graph_stub
-_GLOBAL(ftrace_graph_stub)
-#endif
-
bctr /* jump after _mcount site */
+.endm
-_GLOBAL(ftrace_stub)
- blr
-
-ftrace_no_trace:
- mflr r3
- mtctr r3
- REST_GPR(3, r1)
- addi r1, r1, SWITCH_FRAME_SIZE
- mtlr r0
- bctr
+_GLOBAL(ftrace_regs_caller)
+ ftrace_regs_entry 1
+ /* ftrace_call(r3, r4, r5, r6) */
+.globl ftrace_regs_call
+ftrace_regs_call:
+ bl ftrace_stub
+ nop
+ ftrace_regs_exit 1
_GLOBAL(ftrace_caller)
- /* Save the original return address in A's stack frame */
- std r0, LRSAVE(r1)
-
- /* Create our stack frame + pt_regs */
- stdu r1, -SWITCH_FRAME_SIZE(r1)
-
- /* Save all gprs to pt_regs */
- SAVE_8GPRS(3, r1)
-
- lbz r3, PACA_FTRACE_ENABLED(r13)
- cmpdi r3, 0
- beq ftrace_no_trace
-
- /* Get the _mcount() call site out of LR */
- mflr r7
- std r7, _NIP(r1)
-
- /* Save callee's TOC in the ABI compliant location */
- std r2, 24(r1)
- ld r2, PACATOC(r13) /* get kernel TOC in r2 */
-
- addis r3, r2, function_trace_op@toc@ha
- addi r3, r3, function_trace_op@toc@l
- ld r5, 0(r3)
-
- /* Calculate ip from nip-4 into r3 for call below */
- subi r3, r7, MCOUNT_INSN_SIZE
-
- /* Put the original return address in r4 as parent_ip */
- mr r4, r0
-
- /* Set pt_regs to NULL */
- li r6, 0
-
+ ftrace_regs_entry 0
/* ftrace_call(r3, r4, r5, r6) */
.globl ftrace_call
ftrace_call:
bl ftrace_stub
nop
+ ftrace_regs_exit 0
- ld r3, _NIP(r1)
- mtctr r3
-
- /* Restore gprs */
- REST_8GPRS(3,r1)
-
- /* Restore callee's TOC */
- ld r2, 24(r1)
+_GLOBAL(ftrace_stub)
+ blr
- /* Pop our stack frame */
+#ifdef CONFIG_PPC64
+ftrace_no_trace:
+ mflr r3
+ mtctr r3
+ REST_GPR(3, r1)
addi r1, r1, SWITCH_FRAME_SIZE
-
- /* Reload original LR */
- ld r0, LRSAVE(r1)
mtlr r0
+ bctr
+#endif
- /* Handle function_graph or go back */
- b ftrace_caller_common
-
-#ifdef CONFIG_LIVEPATCH
+#ifdef CONFIG_LIVEPATCH_64
/*
* This function runs in the mcount context, between two functions. As
* such it can only clobber registers which are volatile and used in
@@ -276,55 +249,3 @@ livepatch_handler:
/* Return to original caller of live patched function */
blr
#endif /* CONFIG_LIVEPATCH */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-_GLOBAL(ftrace_graph_caller)
- stdu r1, -112(r1)
- /* with -mprofile-kernel, parameter regs are still alive at _mcount */
- std r10, 104(r1)
- std r9, 96(r1)
- std r8, 88(r1)
- std r7, 80(r1)
- std r6, 72(r1)
- std r5, 64(r1)
- std r4, 56(r1)
- std r3, 48(r1)
-
- /* Save callee's TOC in the ABI compliant location */
- std r2, 24(r1)
- ld r2, PACATOC(r13) /* get kernel TOC in r2 */
-
- addi r5, r1, 112
- mfctr r4 /* ftrace_caller has moved local addr here */
- std r4, 40(r1)
- mflr r3 /* ftrace_caller has restored LR from stack */
- subi r4, r4, MCOUNT_INSN_SIZE
-
- bl prepare_ftrace_return
- nop
-
- /*
- * prepare_ftrace_return gives us the address we divert to.
- * Change the LR to this.
- */
- mtlr r3
-
- ld r0, 40(r1)
- mtctr r0
- ld r10, 104(r1)
- ld r9, 96(r1)
- ld r8, 88(r1)
- ld r7, 80(r1)
- ld r6, 72(r1)
- ld r5, 64(r1)
- ld r4, 56(r1)
- ld r3, 48(r1)
-
- /* Restore callee's TOC */
- ld r2, 24(r1)
-
- addi r1, r1, 112
- mflr r0
- std r0, LRSAVE(r1)
- bctr
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 82a3438300fd..9bdd79aa51cf 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -37,11 +37,11 @@
#include <linux/smp.h>
#include <linux/console.h>
#include <linux/kmsg_dump.h>
+#include <linux/debugfs.h>
#include <asm/emulated_ops.h>
-#include <asm/pgtable.h>
#include <linux/uaccess.h>
-#include <asm/debugfs.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
@@ -53,7 +53,6 @@
#ifdef CONFIG_PPC64
#include <asm/firmware.h>
#include <asm/processor.h>
-#include <asm/tm.h>
#endif
#include <asm/kexec.h>
#include <asm/ppc-opcode.h>
@@ -68,6 +67,8 @@
#include <asm/kprobes.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
+#include <asm/disassemble.h>
+#include <asm/udbg.h>
#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -171,7 +172,6 @@ extern void panic_flush_kmsg_start(void)
extern void panic_flush_kmsg_end(void)
{
- printk_safe_flush_on_panic();
kmsg_dump(KMSG_DUMP_PANIC);
bust_spinlocks(0);
debug_locks_off();
@@ -222,7 +222,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
/*
* system_reset_excption handles debugger, crash dump, panic, for 0x100
*/
- if (TRAP(regs) == 0x100)
+ if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
return;
crash_fadump(regs, "die oops");
@@ -246,7 +246,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
if (panic_on_oops)
panic("Fatal exception");
- do_exit(signr);
+ make_task_dead(signr);
}
NOKPROBE_SYMBOL(oops_end);
@@ -290,7 +290,7 @@ void die(const char *str, struct pt_regs *regs, long err)
/*
* system_reset_excption handles debugger, crash dump, panic, for 0x100
*/
- if (TRAP(regs) != 0x100) {
+ if (TRAP(regs) != INTERRUPT_SYSTEM_RESET) {
if (debugger(regs))
return;
}
@@ -341,19 +341,19 @@ static bool exception_common(int signr, struct pt_regs *regs, int code,
return false;
}
- show_signal_msg(signr, regs, code, addr);
+ /*
+ * Must not enable interrupts even for user-mode exception, because
+ * this can be called from machine check, which may be a NMI or IRQ
+ * which don't like interrupts being enabled. Could check for
+ * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
+ * reason why _exception() should enable irqs for an exception handler,
+ * the handlers themselves do that directly.
+ */
- if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
- local_irq_enable();
+ show_signal_msg(signr, regs, code, addr);
current->thread.trap_nr = code;
- /*
- * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
- * to capture the content, if the task gets killed.
- */
- thread_pkey_regs_save(&current->thread);
-
return true;
}
@@ -394,7 +394,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
* Builds that do not support KVM could take this second option to increase
* the recoverability of NMIs.
*/
-void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
+noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_POWERNV
unsigned long kbase = (unsigned long)_stext;
@@ -411,7 +411,7 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
* Now test if the interrupt has hit a range that may be using
* HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
* problem ranges all run un-relocated. Test real and virt modes
- * at the same time by droping the high bit of the nip (virt mode
+ * at the same time by dropping the high bit of the nip (virt mode
* entry points still have the +0x4000 offset).
*/
nip &= ~0xc000000000000000ULL;
@@ -435,23 +435,16 @@ void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
nonrecoverable:
regs->msr &= ~MSR_RI;
+ local_paca->hsrr_valid = 0;
+ local_paca->srr_valid = 0;
#endif
}
-
-void system_reset_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
{
unsigned long hsrr0, hsrr1;
- bool nested = in_nmi();
bool saved_hsrrs = false;
/*
- * Avoid crashes in case of nested NMI exceptions. Recoverability
- * is determined by RI and in_nmi
- */
- if (!nested)
- nmi_enter();
-
- /*
* System reset can interrupt code where HSRRs are live and MSR[RI]=1.
* The system reset interrupt itself may clobber HSRRs (e.g., to call
* OPAL), so save them here and restore them before returning.
@@ -510,21 +503,23 @@ out:
#ifdef CONFIG_PPC_BOOK3S_64
BUG_ON(get_paca()->in_nmi == 0);
if (get_paca()->in_nmi > 1)
- nmi_panic(regs, "Unrecoverable nested System Reset");
+ die("Unrecoverable nested System Reset", regs, SIGABRT);
#endif
/* Must die if the interrupt is not recoverable */
- if (!(regs->msr & MSR_RI))
- nmi_panic(regs, "Unrecoverable System Reset");
+ if (regs_is_unrecoverable(regs)) {
+ /* For the reason explained in die_mce, nmi_exit before die */
+ nmi_exit();
+ die("Unrecoverable System Reset", regs, SIGABRT);
+ }
if (saved_hsrrs) {
mtspr(SPRN_HSRR0, hsrr0);
mtspr(SPRN_HSRR1, hsrr1);
}
- if (!nested)
- nmi_exit();
-
/* What should we do here? We could issue a shutdown or hard reset. */
+
+ return 0;
}
/*
@@ -532,9 +527,6 @@ out:
* Check if the NIP corresponds to the address of a sync
* instruction for which there is an entry in the exception
* table.
- * Note that the 601 only takes a machine check on TEA
- * (transfer error ack) signal assertion, and does not
- * set any of the top 16 bits of SRR1.
* -- paulus.
*/
static inline int check_io_access(struct pt_regs *regs)
@@ -554,11 +546,11 @@ static inline int check_io_access(struct pt_regs *regs)
* For the debug message, we look at the preceding
* load or store.
*/
- if (*nip == PPC_INST_NOP)
+ if (*nip == PPC_RAW_NOP())
nip -= 2;
- else if (*nip == PPC_INST_ISYNC)
+ else if (*nip == PPC_RAW_ISYNC())
--nip;
- if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
+ if (*nip == PPC_RAW_SYNC() || get_op(*nip) == OP_TRAP) {
unsigned int rb;
--nip;
@@ -566,8 +558,8 @@ static inline int check_io_access(struct pt_regs *regs)
printk(KERN_DEBUG "%s bad port %lx at %p\n",
(*nip & 0x100)? "OUT to": "IN from",
regs->gpr[rb] - _IO_BASE, nip);
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_recoverable(regs);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
}
@@ -578,11 +570,13 @@ static inline int check_io_access(struct pt_regs *regs)
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
/* On 4xx, the reason for the machine check or program exception
is in the ESR. */
-#define get_reason(regs) ((regs)->dsisr)
+#define get_reason(regs) ((regs)->esr)
#define REASON_FP ESR_FP
#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
#define REASON_PRIVILEGED ESR_PPR
#define REASON_TRAP ESR_PTR
+#define REASON_PREFIXED 0
+#define REASON_BOUNDARY 0
/* single-step stuff */
#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
@@ -597,13 +591,17 @@ static inline int check_io_access(struct pt_regs *regs)
#define REASON_ILLEGAL SRR1_PROGILL
#define REASON_PRIVILEGED SRR1_PROGPRIV
#define REASON_TRAP SRR1_PROGTRAP
+#define REASON_PREFIXED SRR1_PREFIXED
+#define REASON_BOUNDARY SRR1_BOUNDARY
#define single_stepping(regs) ((regs)->msr & MSR_SE)
-#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
-#define clear_br_trace(regs) ((regs)->msr &= ~MSR_BE)
+#define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE))
+#define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE))
#endif
-#if defined(CONFIG_E500)
+#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
+
+#if defined(CONFIG_PPC_E500)
int machine_check_e500mc(struct pt_regs *regs)
{
unsigned long mcsr = mfspr(SPRN_MCSR);
@@ -757,31 +755,6 @@ int machine_check_generic(struct pt_regs *regs)
{
return 0;
}
-#elif defined(CONFIG_E200)
-int machine_check_e200(struct pt_regs *regs)
-{
- unsigned long reason = mfspr(SPRN_MCSR);
-
- printk("Machine check in kernel mode.\n");
- printk("Caused by (from MCSR=%lx): ", reason);
-
- if (reason & MCSR_MCP)
- pr_cont("Machine Check Signal\n");
- if (reason & MCSR_CP_PERR)
- pr_cont("Cache Push Parity Error\n");
- if (reason & MCSR_CPERR)
- pr_cont("Cache Parity Error\n");
- if (reason & MCSR_EXCP_ERR)
- pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
- if (reason & MCSR_BUS_IRERR)
- pr_cont("Bus - Read Bus Error on instruction fetch\n");
- if (reason & MCSR_BUS_DRERR)
- pr_cont("Bus - Read Bus Error on data load\n");
- if (reason & MCSR_BUS_WRERR)
- pr_cont("Bus - Write Bus Error on buffered store or cache line push\n");
-
- return 0;
-}
#elif defined(CONFIG_PPC32)
int machine_check_generic(struct pt_regs *regs)
{
@@ -793,7 +766,6 @@ int machine_check_generic(struct pt_regs *regs)
case 0x80000:
pr_cont("Machine check signal\n");
break;
- case 0: /* for 601 */
case 0x40000:
case 0x140000: /* 7450 MSS error and TEA */
pr_cont("Transfer error ack signal\n");
@@ -820,12 +792,31 @@ int machine_check_generic(struct pt_regs *regs)
}
#endif /* everything else */
-void machine_check_exception(struct pt_regs *regs)
+void die_mce(const char *str, struct pt_regs *regs, long err)
+{
+ /*
+ * The machine check wants to kill the interrupted context,
+ * but make_task_dead() checks for in_interrupt() and panics
+ * in that case, so exit the irq/nmi before calling die.
+ */
+ if (in_nmi())
+ nmi_exit();
+ else
+ irq_exit();
+ die(str, regs, err);
+}
+
+/*
+ * BOOK3S_64 does not usually call this handler as a non-maskable interrupt
+ * (it uses its own early real-mode handler to handle the MCE proper
+ * and then raises irq_work to call this handler when interrupts are
+ * enabled). The only time when this is not true is if the early handler
+ * is unrecoverable, then it does call this directly to try to get a
+ * message out.
+ */
+static void __machine_check_exception(struct pt_regs *regs)
{
int recover = 0;
- bool nested = in_nmi();
- if (!nested)
- nmi_enter();
__this_cpu_inc(irq_stat.mce_exceptions);
@@ -851,23 +842,41 @@ void machine_check_exception(struct pt_regs *regs)
if (check_io_access(regs))
goto bail;
- if (!nested)
- nmi_exit();
-
- die("Machine check", regs, SIGBUS);
+ die_mce("Machine check", regs, SIGBUS);
+bail:
/* Must die if the interrupt is not recoverable */
- if (!(regs->msr & MSR_RI))
- nmi_panic(regs, "Unrecoverable Machine check");
+ if (regs_is_unrecoverable(regs))
+ die_mce("Unrecoverable Machine check", regs, SIGBUS);
+}
- return;
+#ifdef CONFIG_PPC_BOOK3S_64
+DEFINE_INTERRUPT_HANDLER_RAW(machine_check_early_boot)
+{
+ udbg_printf("Machine check (early boot)\n");
+ udbg_printf("SRR0=0x%016lx SRR1=0x%016lx\n", regs->nip, regs->msr);
+ udbg_printf(" DAR=0x%016lx DSISR=0x%08lx\n", regs->dar, regs->dsisr);
+ udbg_printf(" LR=0x%016lx R1=0x%08lx\n", regs->link, regs->gpr[1]);
+ udbg_printf("------\n");
+ die("Machine check (early boot)", regs, SIGBUS);
+ for (;;)
+ ;
+ return 0;
+}
-bail:
- if (!nested)
- nmi_exit();
+DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
+{
+ __machine_check_exception(regs);
+}
+#endif
+DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
+{
+ __machine_check_exception(regs);
+
+ return 0;
}
-void SMIException(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
{
die("System Management Interrupt", regs, SIGABRT);
}
@@ -877,11 +886,11 @@ static void p9_hmi_special_emu(struct pt_regs *regs)
{
unsigned int ra, rb, t, i, sel, instr, rc;
const void __user *addr;
- u8 vbuf[16], *vdst;
+ u8 vbuf[16] __aligned(16), *vdst;
unsigned long ea, msr, msr_mask;
bool swap;
- if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
+ if (__get_user(instr, (unsigned int __user *)regs->nip))
return;
/*
@@ -1049,16 +1058,15 @@ static void p9_hmi_special_emu(struct pt_regs *regs)
#endif /* !__LITTLE_ENDIAN__ */
/* Go to next instruction */
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
}
#endif /* CONFIG_VSX */
-void handle_hmi_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception)
{
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
- irq_enter();
#ifdef CONFIG_VSX
/* Real mode flagged P9 special emu is needed */
@@ -1078,46 +1086,52 @@ void handle_hmi_exception(struct pt_regs *regs)
if (ppc_md.handle_hmi_exception)
ppc_md.handle_hmi_exception(regs);
- irq_exit();
set_irq_regs(old_regs);
}
-void unknown_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(unknown_exception)
{
- enum ctx_state prev_state = exception_enter();
-
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs, TRAP_UNK, 0);
+}
+
+DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception)
+{
+ printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
+ regs->nip, regs->msr, regs->trap);
- exception_exit(prev_state);
+ _exception(SIGTRAP, regs, TRAP_UNK, 0);
}
-void instruction_breakpoint_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception)
{
- enum ctx_state prev_state = exception_enter();
+ printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
+ regs->nip, regs->msr, regs->trap);
+
+ _exception(SIGTRAP, regs, TRAP_UNK, 0);
+
+ return 0;
+}
+DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception)
+{
if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
- goto bail;
+ return;
if (debugger_iabr_match(regs))
- goto bail;
+ return;
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
-
-bail:
- exception_exit(prev_state);
}
-void RunModeException(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(RunModeException)
{
_exception(SIGTRAP, regs, TRAP_UNK, 0);
}
-void single_step_exception(struct pt_regs *regs)
+static void __single_step_exception(struct pt_regs *regs)
{
- enum ctx_state prev_state = exception_enter();
-
clear_single_step(regs);
clear_br_trace(regs);
@@ -1126,16 +1140,17 @@ void single_step_exception(struct pt_regs *regs)
if (notify_die(DIE_SSTEP, "single_step", regs, 5,
5, SIGTRAP) == NOTIFY_STOP)
- goto bail;
+ return;
if (debugger_sstep(regs))
- goto bail;
+ return;
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
+}
-bail:
- exception_exit(prev_state);
+DEFINE_INTERRUPT_HANDLER(single_step_exception)
+{
+ __single_step_exception(regs);
}
-NOKPROBE_SYMBOL(single_step_exception);
/*
* After we have successfully emulated an instruction, we have to
@@ -1146,7 +1161,7 @@ NOKPROBE_SYMBOL(single_step_exception);
static void emulate_single_step(struct pt_regs *regs)
{
if (single_stepping(regs))
- single_step_exception(regs);
+ __single_step_exception(regs);
}
static inline int __parse_fpscr(unsigned long fpscr)
@@ -1182,7 +1197,9 @@ static void parse_fpe(struct pt_regs *regs)
flush_fp_to_thread(current);
+#ifdef CONFIG_PPC_FPU_REGS
code = __parse_fpscr(current->thread.fp_state.fpscr);
+#endif
_exception(SIGFPE, regs, code, regs->nip);
}
@@ -1333,7 +1350,6 @@ static int emulate_instruction(struct pt_regs *regs)
if (!user_mode(regs))
return -EINVAL;
- CHECK_FULL_REGS(regs);
if (get_user(instword, (u32 __user *)(regs->nip)))
return -EFAULT;
@@ -1430,7 +1446,6 @@ int is_valid_bugaddr(unsigned long addr)
static int emulate_math(struct pt_regs *regs)
{
int ret;
- extern int do_mathemu(struct pt_regs *regs);
ret = do_mathemu(regs);
if (ret >= 0)
@@ -1457,9 +1472,8 @@ static int emulate_math(struct pt_regs *regs)
static inline int emulate_math(struct pt_regs *regs) { return -1; }
#endif
-void program_check_exception(struct pt_regs *regs)
+static void do_program_check(struct pt_regs *regs)
{
- enum ctx_state prev_state = exception_enter();
unsigned int reason = get_reason(regs);
/* We can now get here via a FP Unavailable exception if the core
@@ -1468,22 +1482,22 @@ void program_check_exception(struct pt_regs *regs)
if (reason & REASON_FP) {
/* IEEE FP exception */
parse_fpe(regs);
- goto bail;
+ return;
}
if (reason & REASON_TRAP) {
unsigned long bugaddr;
/* Debugger is first in line to stop recursive faults in
* rcu_lock, notify_die, or atomic_notifier_call_chain */
if (debugger_bpt(regs))
- goto bail;
+ return;
if (kprobe_handler(regs))
- goto bail;
+ return;
/* trap exception */
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
== NOTIFY_STOP)
- goto bail;
+ return;
bugaddr = regs->nip;
/*
@@ -1494,11 +1508,16 @@ void program_check_exception(struct pt_regs *regs)
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
- regs->nip += 4;
- goto bail;
+ const struct exception_table_entry *entry;
+
+ entry = search_exception_tables(bugaddr);
+ if (entry) {
+ regs_set_return_ip(regs, extable_fixup(entry) + regs->nip - bugaddr);
+ return;
+ }
}
_exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
- goto bail;
+ return;
}
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (reason & REASON_TM) {
@@ -1519,7 +1538,7 @@ void program_check_exception(struct pt_regs *regs)
*/
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
- goto bail;
+ return;
} else {
printk(KERN_EMERG "Unexpected TM Bad Thing exception "
"at %lx (msr 0x%lx) tm_scratch=%llx\n",
@@ -1539,9 +1558,7 @@ void program_check_exception(struct pt_regs *regs)
if (!user_mode(regs))
goto sigill;
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
* but there seems to be a hardware bug on the 405GP (RevD)
@@ -1552,18 +1569,18 @@ void program_check_exception(struct pt_regs *regs)
* pattern to occurrences etc. -dgibson 31/Mar/2003
*/
if (!emulate_math(regs))
- goto bail;
+ return;
/* Try to emulate it if we should. */
if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
switch (emulate_instruction(regs)) {
case 0:
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
emulate_single_step(regs);
- goto bail;
+ return;
case -EFAULT:
_exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
- goto bail;
+ return;
}
}
@@ -1573,42 +1590,49 @@ sigill:
else
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
-bail:
- exception_exit(prev_state);
}
-NOKPROBE_SYMBOL(program_check_exception);
+
+DEFINE_INTERRUPT_HANDLER(program_check_exception)
+{
+ do_program_check(regs);
+}
/*
* This occurs when running in hypervisor mode on POWER6 or later
* and an illegal instruction is encountered.
*/
-void emulation_assist_interrupt(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt)
{
- regs->msr |= REASON_ILLEGAL;
- program_check_exception(regs);
+ regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL);
+ do_program_check(regs);
}
-NOKPROBE_SYMBOL(emulation_assist_interrupt);
-void alignment_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(alignment_exception)
{
- enum ctx_state prev_state = exception_enter();
int sig, code, fixed = 0;
+ unsigned long reason;
+
+ interrupt_cond_local_irq_enable(regs);
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ reason = get_reason(regs);
+ if (reason & REASON_BOUNDARY) {
+ sig = SIGBUS;
+ code = BUS_ADRALN;
+ goto bad;
+ }
if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
- goto bail;
+ return;
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
if (fixed == 1) {
- regs->nip += 4; /* skip over emulated instruction */
+ /* skip over emulated instruction */
+ regs_add_return_ip(regs, inst_length(reason));
emulate_single_step(regs);
- goto bail;
+ return;
}
/* Operand address was bad */
@@ -1619,64 +1643,40 @@ void alignment_exception(struct pt_regs *regs)
sig = SIGBUS;
code = BUS_ADRALN;
}
+bad:
if (user_mode(regs))
_exception(sig, regs, code, regs->dar);
else
- bad_page_fault(regs, regs->dar, sig);
-
-bail:
- exception_exit(prev_state);
-}
-
-void StackOverflow(struct pt_regs *regs)
-{
- pr_crit("Kernel stack overflow in process %s[%d], r1=%lx\n",
- current->comm, task_pid_nr(current), regs->gpr[1]);
- debugger(regs);
- show_regs(regs);
- panic("kernel stack overflow");
+ bad_page_fault(regs, sig);
}
-void stack_overflow_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(stack_overflow_exception)
{
- enum ctx_state prev_state = exception_enter();
-
die("Kernel stack overflow", regs, SIGSEGV);
-
- exception_exit(prev_state);
}
-void kernel_fp_unavailable_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception)
{
- enum ctx_state prev_state = exception_enter();
-
printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
-
- exception_exit(prev_state);
}
-void altivec_unavailable_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception)
{
- enum ctx_state prev_state = exception_enter();
-
if (user_mode(regs)) {
/* A user program has executed an altivec instruction,
but this kernel doesn't support altivec. */
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
- goto bail;
+ return;
}
printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
"%lx at %lx\n", regs->trap, regs->nip);
die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
-
-bail:
- exception_exit(prev_state);
}
-void vsx_unavailable_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception)
{
if (user_mode(regs)) {
/* A user program has executed an vsx instruction,
@@ -1690,13 +1690,13 @@ void vsx_unavailable_exception(struct pt_regs *regs)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_BOOK3S_64
static void tm_unavailable(struct pt_regs *regs)
{
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (user_mode(regs)) {
current->thread.load_tm++;
- regs->msr |= MSR_TM;
+ regs_set_return_msr(regs, regs->msr | MSR_TM);
tm_enable();
tm_restore_sprs(&current->thread);
return;
@@ -1707,7 +1707,7 @@ static void tm_unavailable(struct pt_regs *regs)
die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
}
-void facility_unavailable_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)
{
static char *facility_strings[] = {
[FSCR_FP_LG] = "FPU",
@@ -1720,6 +1720,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
[FSCR_TAR_LG] = "TAR",
[FSCR_MSGP_LG] = "MSGP",
[FSCR_SCV_LG] = "SCV",
+ [FSCR_PREFIX_LG] = "PREFIX",
};
char *facility = "unknown";
u64 value;
@@ -1727,7 +1728,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
u8 status;
bool hv;
- hv = (TRAP(regs) == 0xf80);
+ hv = (TRAP(regs) == INTERRUPT_H_FAC_UNAVAIL);
if (hv)
value = mfspr(SPRN_HFSCR);
else
@@ -1746,9 +1747,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
die("Unexpected facility unavailable exception", regs, SIGABRT);
}
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
if (status == FSCR_DSCR_LG) {
/*
@@ -1789,7 +1788,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
pr_err("DSCR based mfspr emulation failed\n");
return;
}
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
emulate_single_step(regs);
}
return;
@@ -1826,7 +1825,7 @@ out:
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-void fp_unavailable_tm(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm)
{
/* Note: This does not handle any kind of FP laziness. */
@@ -1859,7 +1858,7 @@ void fp_unavailable_tm(struct pt_regs *regs)
tm_recheckpoint(&current->thread);
}
-void altivec_unavailable_tm(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm)
{
/* See the comments in fp_unavailable_tm(). This function operates
* the same way.
@@ -1874,7 +1873,7 @@ void altivec_unavailable_tm(struct pt_regs *regs)
current->thread.used_vr = 1;
}
-void vsx_unavailable_tm(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm)
{
/* See the comments in fp_unavailable_tm(). This works similarly,
* though we're loading both FP and VEC registers in here.
@@ -1899,13 +1898,42 @@ void vsx_unavailable_tm(struct pt_regs *regs)
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-void performance_monitor_exception(struct pt_regs *regs)
+#ifdef CONFIG_PPC64
+DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
+DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi)
+{
+ __this_cpu_inc(irq_stat.pmu_irqs);
+
+ perf_irq(regs);
+
+ return 0;
+}
+#endif
+
+DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
+DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async)
{
__this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs);
}
+DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception)
+{
+ /*
+ * On 64-bit, if perf interrupts hit in a local_irq_disable
+ * (soft-masked) region, we consider them as NMIs. This is required to
+ * prevent hash faults on user addresses when reading callchains (and
+ * looks better from an irq tracing perspective).
+ */
+ if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
+ performance_monitor_exception_nmi(regs);
+ else
+ performance_monitor_exception_async(regs);
+
+ return 0;
+}
+
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
{
@@ -1957,7 +1985,7 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
*/
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
else
/* Make sure the IDM flag is off */
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
@@ -1966,8 +1994,10 @@ static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
}
-void DebugException(struct pt_regs *regs, unsigned long debug_status)
+DEFINE_INTERRUPT_HANDLER(DebugException)
{
+ unsigned long debug_status = regs->dsisr;
+
current->thread.debug.dbsr = debug_status;
/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
@@ -1976,7 +2006,7 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status)
* instead of stopping here when hitting a BT
*/
if (debug_status & DBSR_BT) {
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
/* Disable BT */
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
@@ -1987,7 +2017,7 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status)
if (user_mode(regs)) {
current->thread.debug.dbcr0 &= ~DBCR0_BT;
current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
return;
}
@@ -2001,7 +2031,7 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status)
if (debugger_sstep(regs))
return;
} else if (debug_status & DBSR_IC) { /* Instruction complete */
- regs->msr &= ~MSR_DE;
+ regs_set_return_msr(regs, regs->msr & ~MSR_DE);
/* Disable instruction completion */
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
@@ -2023,7 +2053,7 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status)
current->thread.debug.dbcr0 &= ~DBCR0_IC;
if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
current->thread.debug.dbcr1))
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
else
/* Make sure the IDM bit is off */
current->thread.debug.dbcr0 &= ~DBCR0_IDM;
@@ -2033,19 +2063,10 @@ void DebugException(struct pt_regs *regs, unsigned long debug_status)
} else
handle_debug(regs, debug_status);
}
-NOKPROBE_SYMBOL(DebugException);
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
-#if !defined(CONFIG_TAU_INT)
-void TAUException(struct pt_regs *regs)
-{
- printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
- regs->nip, regs->msr, regs->trap, print_tainted());
-}
-#endif /* CONFIG_INT_TAU */
-
#ifdef CONFIG_ALTIVEC
-void altivec_assist_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(altivec_assist_exception)
{
int err;
@@ -2060,7 +2081,7 @@ void altivec_assist_exception(struct pt_regs *regs)
PPC_WARN_EMULATED(altivec, regs);
err = emulate_altivec(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
@@ -2078,10 +2099,11 @@ void altivec_assist_exception(struct pt_regs *regs)
}
#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_FSL_BOOKE
-void CacheLockingException(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+#ifdef CONFIG_PPC_85xx
+DEFINE_INTERRUPT_HANDLER(CacheLockingException)
{
+ unsigned long error_code = regs->dsisr;
+
/* We treat cache locking instructions from the user
* as priv ops, in the future we could try to do
* something smarter
@@ -2090,20 +2112,17 @@ void CacheLockingException(struct pt_regs *regs, unsigned long address,
_exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
return;
}
-#endif /* CONFIG_FSL_BOOKE */
+#endif /* CONFIG_PPC_85xx */
#ifdef CONFIG_SPE
-void SPEFloatingPointException(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException)
{
- extern int do_spe_mathemu(struct pt_regs *regs);
unsigned long spefscr;
int fpexc_mode;
int code = FPE_FLTUNK;
int err;
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
flush_spe_to_thread(current);
@@ -2126,7 +2145,7 @@ void SPEFloatingPointException(struct pt_regs *regs)
err = do_spe_mathemu(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
@@ -2145,24 +2164,21 @@ void SPEFloatingPointException(struct pt_regs *regs)
return;
}
-void SPEFloatingPointRoundException(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
{
- extern int speround_handler(struct pt_regs *regs);
int err;
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
preempt_disable();
if (regs->msr & MSR_SPE)
giveup_spe(current);
preempt_enable();
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
err = speround_handler(regs);
if (err == 0) {
- regs->nip += 4; /* skip emulated instruction */
+ regs_add_return_ip(regs, 4); /* skip emulated instruction */
emulate_single_step(regs);
return;
}
@@ -2187,13 +2203,15 @@ void SPEFloatingPointRoundException(struct pt_regs *regs)
* in the MSR is 0. This indicates that SRR0/1 are live, and that
* we therefore lost state by taking this exception.
*/
-void unrecoverable_exception(struct pt_regs *regs)
+void __noreturn unrecoverable_exception(struct pt_regs *regs)
{
pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
regs->trap, regs->nip, regs->msr);
die("Unrecoverable exception", regs, SIGABRT);
+ /* die() should not return */
+ for (;;)
+ ;
}
-NOKPROBE_SYMBOL(unrecoverable_exception);
#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
/*
@@ -2207,10 +2225,11 @@ void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
return;
}
-void WatchdogException(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_NMI(WatchdogException)
{
printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
WatchdogHandler(regs);
+ return 0;
}
#endif
@@ -2218,18 +2237,12 @@ void WatchdogException(struct pt_regs *regs)
* We enter here if we discover during exception entry that we are
* running in supervisor mode with a userspace value in the stack pointer.
*/
-void kernel_bad_stack(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(kernel_bad_stack)
{
printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
regs->gpr[1], regs->nip);
die("Bad kernel stack pointer", regs, SIGABRT);
}
-NOKPROBE_SYMBOL(kernel_bad_stack);
-
-void __init trap_init(void)
-{
-}
-
#ifdef CONFIG_PPC_EMULATED_STATS
@@ -2278,35 +2291,20 @@ void ppc_warn_emulated_print(const char *type)
static int __init ppc_warn_emulated_init(void)
{
- struct dentry *dir, *d;
+ struct dentry *dir;
unsigned int i;
struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
- if (!powerpc_debugfs_root)
- return -ENODEV;
-
dir = debugfs_create_dir("emulated_instructions",
- powerpc_debugfs_root);
- if (!dir)
- return -ENOMEM;
+ arch_debugfs_dir);
- d = debugfs_create_u32("do_warn", 0644, dir,
- &ppc_warn_emulated);
- if (!d)
- goto fail;
+ debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated);
- for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
- d = debugfs_create_u32(entries[i].name, 0644, dir,
- (u32 *)&entries[i].val.counter);
- if (!d)
- goto fail;
- }
+ for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++)
+ debugfs_create_u32(entries[i].name, 0644, dir,
+ (u32 *)&entries[i].val.counter);
return 0;
-
-fail:
- debugfs_remove_recursive(dir);
- return -ENOMEM;
}
device_initcall(ppc_warn_emulated_init);
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 01595e8cafe7..92b3fc258d11 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -5,7 +5,7 @@
* c 2001 PPC 64 Team, IBM Corp
*/
-#include <stdarg.h>
+#include <linux/stdarg.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/console.h>
@@ -67,6 +67,8 @@ void __init udbg_early_init(void)
udbg_init_debug_opal_raw();
#elif defined(CONFIG_PPC_EARLY_DEBUG_OPAL_HVSI)
udbg_init_debug_opal_hvsi();
+#elif defined(CONFIG_PPC_EARLY_DEBUG_16550)
+ udbg_init_debug_16550();
#endif
#ifdef CONFIG_PPC_EARLY_DEBUG
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 9356b60d6030..74ddf836f7a2 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -8,6 +8,7 @@
#include <asm/udbg.h>
#include <asm/io.h>
#include <asm/reg_a2.h>
+#include <asm/early_ioremap.h>
extern u8 real_readb(volatile u8 __iomem *addr);
extern void real_writeb(u8 data, volatile u8 __iomem *addr);
@@ -84,7 +85,7 @@ static int udbg_uart_getc(void)
return udbg_uart_in(UART_RBR);
}
-static void udbg_use_uart(void)
+static void __init udbg_use_uart(void)
{
udbg_putc = udbg_uart_putc;
udbg_flush = udbg_uart_flush;
@@ -92,7 +93,7 @@ static void udbg_use_uart(void)
udbg_getc_poll = udbg_uart_getc_poll;
}
-void udbg_uart_setup(unsigned int speed, unsigned int clock)
+void __init udbg_uart_setup(unsigned int speed, unsigned int clock)
{
unsigned int dll, base_bauds;
@@ -121,7 +122,7 @@ void udbg_uart_setup(unsigned int speed, unsigned int clock)
udbg_uart_out(UART_FCR, 0x7);
}
-unsigned int udbg_probe_uart_speed(unsigned int clock)
+unsigned int __init udbg_probe_uart_speed(unsigned int clock)
{
unsigned int dll, dlm, divisor, prescaler, speed;
u8 old_lcr;
@@ -172,7 +173,7 @@ static void udbg_uart_out_pio(unsigned int reg, u8 data)
outb(data, udbg_uart.pio_base + (reg * udbg_uart_stride));
}
-void udbg_uart_init_pio(unsigned long port, unsigned int stride)
+void __init udbg_uart_init_pio(unsigned long port, unsigned int stride)
{
if (!port)
return;
@@ -194,7 +195,7 @@ static void udbg_uart_out_mmio(unsigned int reg, u8 data)
}
-void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride)
+void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride)
{
if (!addr)
return;
@@ -296,3 +297,35 @@ void __init udbg_init_40x_realmode(void)
}
#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
+
+#ifdef CONFIG_PPC_EARLY_DEBUG_16550
+
+static void __iomem *udbg_uart_early_addr;
+
+void __init udbg_init_debug_16550(void)
+{
+ udbg_uart_early_addr = early_ioremap(CONFIG_PPC_EARLY_DEBUG_16550_PHYSADDR, 0x1000);
+ udbg_uart_init_mmio(udbg_uart_early_addr, CONFIG_PPC_EARLY_DEBUG_16550_STRIDE);
+}
+
+static int __init udbg_init_debug_16550_ioremap(void)
+{
+ void __iomem *addr;
+
+ if (!udbg_uart_early_addr)
+ return 0;
+
+ addr = ioremap(CONFIG_PPC_EARLY_DEBUG_16550_PHYSADDR, 0x1000);
+ if (WARN_ON(!addr))
+ return -ENOMEM;
+
+ udbg_uart_init_mmio(addr, CONFIG_PPC_EARLY_DEBUG_16550_STRIDE);
+ early_iounmap(udbg_uart_early_addr, 0x1000);
+ udbg_uart_early_addr = NULL;
+
+ return 0;
+}
+
+early_initcall(udbg_init_debug_16550_ioremap);
+
+#endif /* CONFIG_PPC_EARLY_DEBUG_16550 */
diff --git a/arch/powerpc/kernel/uprobes.c b/arch/powerpc/kernel/uprobes.c
index 1cfef0e5fec5..95a41ae9dfa7 100644
--- a/arch/powerpc/kernel/uprobes.c
+++ b/arch/powerpc/kernel/uprobes.c
@@ -14,6 +14,7 @@
#include <linux/kdebug.h>
#include <asm/sstep.h>
+#include <asm/inst.h>
#define UPROBE_TRAP_NR UINT_MAX
@@ -40,6 +41,18 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
if (addr & 0x03)
return -EINVAL;
+ if (cpu_has_feature(CPU_FTR_ARCH_31) &&
+ ppc_inst_prefixed(ppc_inst_read(auprobe->insn)) &&
+ (addr & 0x3f) == 60) {
+ pr_info_ratelimited("Cannot register a uprobe on 64 byte unaligned prefixed instruction\n");
+ return -EINVAL;
+ }
+
+ if (!can_single_step(ppc_inst_val(ppc_inst_read(auprobe->insn)))) {
+ pr_info_ratelimited("Cannot register a uprobe on instructions that can't be single stepped\n");
+ return -ENOTSUPP;
+ }
+
return 0;
}
@@ -54,7 +67,7 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
autask->saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR;
- regs->nip = current->utask->xol_vaddr;
+ regs_set_return_ip(regs, current->utask->xol_vaddr);
user_enable_single_step(current);
return 0;
@@ -111,7 +124,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
* support doesn't exist and have to fix-up the next instruction
* to be executed.
*/
- regs->nip = utask->vaddr + MAX_UINSN_BYTES;
+ regs_set_return_ip(regs, (unsigned long)ppc_inst_next((void *)utask->vaddr, auprobe->insn));
user_disable_single_step(current);
return 0;
@@ -140,6 +153,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
case DIE_SSTEP:
if (uprobe_post_sstep_notifier(regs))
return NOTIFY_STOP;
+ break;
default:
break;
}
@@ -173,7 +187,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
* emulate_step() returns 1 if the insn was successfully emulated.
* For all other cases, we need to single-step in hardware.
*/
- ret = emulate_step(regs, auprobe->insn);
+ ret = emulate_step(regs, ppc_inst_read(auprobe->insn));
if (ret > 0)
return true;
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index b9a108411c0d..4abc01949702 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -17,12 +17,14 @@
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/memblock.h>
+#include <linux/syscalls.h>
+#include <linux/time_namespace.h>
+#include <vdso/datapage.h>
-#include <asm/pgtable.h>
+#include <asm/syscall.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/sections.h>
@@ -31,39 +33,13 @@
#include <asm/vdso_datapage.h>
#include <asm/setup.h>
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(fmt...) printk(fmt)
-#else
-#define DBG(fmt...)
-#endif
-
-/* Max supported size for symbol names */
-#define MAX_SYMNAME 64
-
/* The alignment of the vDSO */
#define VDSO_ALIGNMENT (1 << 16)
-static unsigned int vdso32_pages;
-static void *vdso32_kbase;
-static struct page **vdso32_pagelist;
-unsigned long vdso32_sigtramp;
-unsigned long vdso32_rt_sigtramp;
-
-#ifdef CONFIG_VDSO32
extern char vdso32_start, vdso32_end;
-#endif
-
-#ifdef CONFIG_PPC64
extern char vdso64_start, vdso64_end;
-static void *vdso64_kbase = &vdso64_start;
-static unsigned int vdso64_pages;
-static struct page **vdso64_pagelist;
-unsigned long vdso64_rt_sigtramp;
-#endif /* CONFIG_PPC64 */
-static int vdso_ready;
+long sys_ni_syscall(void);
/*
* The vdso data page (aka. systemcfg for old ppc64 fans) is here.
@@ -71,570 +47,254 @@ static int vdso_ready;
* with it, it will become dynamically allocated
*/
static union {
- struct vdso_data data;
+ struct vdso_arch_data data;
u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = &vdso_data_store.data;
-
-/* Format of the patch table */
-struct vdso_patch_def
-{
- unsigned long ftr_mask, ftr_value;
- const char *gen_name;
- const char *fix_name;
-};
+struct vdso_arch_data *vdso_data = &vdso_data_store.data;
-/* Table of functions to patch based on the CPU type/revision
- *
- * Currently, we only change sync_dicache to do nothing on processors
- * with a coherent icache
- */
-static struct vdso_patch_def vdso_patches[] = {
- {
- CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE,
- "__kernel_sync_dicache", "__kernel_sync_dicache_p5"
- },
+enum vvar_pages {
+ VVAR_DATA_PAGE_OFFSET,
+ VVAR_TIMENS_PAGE_OFFSET,
+ VVAR_NR_PAGES,
};
-/*
- * Some infos carried around for each of them during parsing at
- * boot time.
- */
-struct lib32_elfinfo
-{
- Elf32_Ehdr *hdr; /* ptr to ELF */
- Elf32_Sym *dynsym; /* ptr to .dynsym section */
- unsigned long dynsymsize; /* size of .dynsym section */
- char *dynstr; /* ptr to .dynstr section */
- unsigned long text; /* offset of .text section in .so */
-};
-
-struct lib64_elfinfo
+static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
+ unsigned long text_size)
{
- Elf64_Ehdr *hdr;
- Elf64_Sym *dynsym;
- unsigned long dynsymsize;
- char *dynstr;
- unsigned long text;
-};
-
-
-/*
- * This is called from binfmt_elf, we create the special vma for the
- * vDSO and insert it into the mm struct tree
- */
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
-{
- struct mm_struct *mm = current->mm;
- struct page **vdso_pagelist;
- unsigned long vdso_pages;
- unsigned long vdso_base;
- int rc;
-
- if (!vdso_ready)
- return 0;
-
-#ifdef CONFIG_PPC64
- if (is_32bit_task()) {
- vdso_pagelist = vdso32_pagelist;
- vdso_pages = vdso32_pages;
- vdso_base = VDSO32_MBASE;
- } else {
- vdso_pagelist = vdso64_pagelist;
- vdso_pages = vdso64_pages;
- /*
- * On 64bit we don't have a preferred map address. This
- * allows get_unmapped_area to find an area near other mmaps
- * and most likely share a SLB entry.
- */
- vdso_base = 0;
- }
-#else
- vdso_pagelist = vdso32_pagelist;
- vdso_pages = vdso32_pages;
- vdso_base = VDSO32_MBASE;
-#endif
-
- current->mm->context.vdso_base = 0;
+ unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
- /* vDSO has a problem and was disabled, just don't "enable" it for the
- * process
- */
- if (vdso_pages == 0)
- return 0;
- /* Add a page to the vdso size for the data page */
- vdso_pages ++;
+ if (new_size != text_size)
+ return -EINVAL;
- /*
- * pick a base address for the vDSO in process space. We try to put it
- * at vdso_base which is the "natural" base for it, but we might fail
- * and end up putting it elsewhere.
- * Add enough to the size so that the result can be aligned.
- */
- if (down_write_killable(&mm->mmap_sem))
- return -EINTR;
- vdso_base = get_unmapped_area(NULL, vdso_base,
- (vdso_pages << PAGE_SHIFT) +
- ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
- 0, 0);
- if (IS_ERR_VALUE(vdso_base)) {
- rc = vdso_base;
- goto fail_mmapsem;
- }
-
- /* Add required alignment. */
- vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
-
- /*
- * Put vDSO base into mm struct. We need to do this before calling
- * install_special_mapping or the perf counter mmap tracking code
- * will fail to recognise it as a vDSO (since arch_vma_name fails).
- */
- current->mm->context.vdso_base = vdso_base;
-
- /*
- * our vma flags don't have VM_WRITE so by default, the process isn't
- * allowed to write those pages.
- * gdb can break that with ptrace interface, and thus trigger COW on
- * those pages but it's then your responsibility to never do that on
- * the "data" page of the vDSO or you'll stop getting kernel updates
- * and your nice userland gettimeofday will be totally dead.
- * It's fine to use that for setting breakpoints in the vDSO code
- * pages though.
- */
- rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
- VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
- vdso_pagelist);
- if (rc) {
- current->mm->context.vdso_base = 0;
- goto fail_mmapsem;
- }
+ current->mm->context.vdso = (void __user *)new_vma->vm_start;
- up_write(&mm->mmap_sem);
return 0;
+}
- fail_mmapsem:
- up_write(&mm->mmap_sem);
- return rc;
+static int vdso32_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
+{
+ return vdso_mremap(sm, new_vma, &vdso32_end - &vdso32_start);
}
-const char *arch_vma_name(struct vm_area_struct *vma)
+static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
- if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
- return "[vdso]";
- return NULL;
+ return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
}
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf);
+static struct vm_special_mapping vvar_spec __ro_after_init = {
+ .name = "[vvar]",
+ .fault = vvar_fault,
+};
-#ifdef CONFIG_VDSO32
-static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
- unsigned long *size)
-{
- Elf32_Shdr *sechdrs;
- unsigned int i;
- char *secnames;
-
- /* Grab section headers and strings so we can tell who is who */
- sechdrs = (void *)ehdr + ehdr->e_shoff;
- secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
-
- /* Find the section they want */
- for (i = 1; i < ehdr->e_shnum; i++) {
- if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
- if (size)
- *size = sechdrs[i].sh_size;
- return (void *)ehdr + sechdrs[i].sh_offset;
- }
- }
- *size = 0;
- return NULL;
-}
+static struct vm_special_mapping vdso32_spec __ro_after_init = {
+ .name = "[vdso]",
+ .mremap = vdso32_mremap,
+};
+
+static struct vm_special_mapping vdso64_spec __ro_after_init = {
+ .name = "[vdso]",
+ .mremap = vdso64_mremap,
+};
-static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib,
- const char *symname)
+#ifdef CONFIG_TIME_NS
+struct vdso_data *arch_get_vdso_data(void *vvar_page)
{
- unsigned int i;
- char name[MAX_SYMNAME], *c;
-
- for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
- if (lib->dynsym[i].st_name == 0)
- continue;
- strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
- MAX_SYMNAME);
- c = strchr(name, '@');
- if (c)
- *c = 0;
- if (strcmp(symname, name) == 0)
- return &lib->dynsym[i];
- }
- return NULL;
+ return ((struct vdso_arch_data *)vvar_page)->data;
}
-/* Note that we assume the section is .text and the symbol is relative to
- * the library base
+/*
+ * The vvar mapping contains data for a specific time namespace, so when a task
+ * changes namespace we must unmap its vvar data for the old namespace.
+ * Subsequent faults will map in data for the new namespace.
+ *
+ * For more details see timens_setup_vdso_data().
*/
-static unsigned long __init find_function32(struct lib32_elfinfo *lib,
- const char *symname)
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{
- Elf32_Sym *sym = find_symbol32(lib, symname);
+ struct mm_struct *mm = task->mm;
+ VMA_ITERATOR(vmi, mm, 0);
+ struct vm_area_struct *vma;
- if (sym == NULL) {
- printk(KERN_WARNING "vDSO32: function %s not found !\n",
- symname);
- return 0;
- }
- return sym->st_value - VDSO32_LBASE;
-}
+ mmap_read_lock(mm);
+ for_each_vma(vmi, vma) {
+ unsigned long size = vma->vm_end - vma->vm_start;
-static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32,
- struct lib64_elfinfo *v64,
- const char *orig, const char *fix)
-{
- Elf32_Sym *sym32_gen, *sym32_fix;
-
- sym32_gen = find_symbol32(v32, orig);
- if (sym32_gen == NULL) {
- printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig);
- return -1;
- }
- if (fix == NULL) {
- sym32_gen->st_name = 0;
- return 0;
+ if (vma_is_special_mapping(vma, &vvar_spec))
+ zap_page_range(vma, vma->vm_start, size);
}
- sym32_fix = find_symbol32(v32, fix);
- if (sym32_fix == NULL) {
- printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix);
- return -1;
- }
- sym32_gen->st_value = sym32_fix->st_value;
- sym32_gen->st_size = sym32_fix->st_size;
- sym32_gen->st_info = sym32_fix->st_info;
- sym32_gen->st_other = sym32_fix->st_other;
- sym32_gen->st_shndx = sym32_fix->st_shndx;
+ mmap_read_unlock(mm);
return 0;
}
-#else /* !CONFIG_VDSO32 */
-static unsigned long __init find_function32(struct lib32_elfinfo *lib,
- const char *symname)
-{
- return 0;
-}
-static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32,
- struct lib64_elfinfo *v64,
- const char *orig, const char *fix)
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
{
- return 0;
-}
-#endif /* CONFIG_VDSO32 */
+ if (likely(vma->vm_mm == current->mm))
+ return current->nsproxy->time_ns->vvar_page;
+ /*
+ * VM_PFNMAP | VM_IO protect .fault() handler from being called
+ * through interfaces like /proc/$pid/mem or
+ * process_vm_{readv,writev}() as long as there's no .access()
+ * in special_mapping_vmops.
+ * For more details check_vma_flags() and __access_remote_vm()
+ */
+ WARN(1, "vvar_page accessed remotely");
-#ifdef CONFIG_PPC64
-
-static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname,
- unsigned long *size)
-{
- Elf64_Shdr *sechdrs;
- unsigned int i;
- char *secnames;
-
- /* Grab section headers and strings so we can tell who is who */
- sechdrs = (void *)ehdr + ehdr->e_shoff;
- secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
-
- /* Find the section they want */
- for (i = 1; i < ehdr->e_shnum; i++) {
- if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
- if (size)
- *size = sechdrs[i].sh_size;
- return (void *)ehdr + sechdrs[i].sh_offset;
- }
- }
- if (size)
- *size = 0;
return NULL;
}
-
-static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib,
- const char *symname)
+#else
+static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
{
- unsigned int i;
- char name[MAX_SYMNAME], *c;
-
- for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) {
- if (lib->dynsym[i].st_name == 0)
- continue;
- strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
- MAX_SYMNAME);
- c = strchr(name, '@');
- if (c)
- *c = 0;
- if (strcmp(symname, name) == 0)
- return &lib->dynsym[i];
- }
return NULL;
}
-
-/* Note that we assume the section is .text and the symbol is relative to
- * the library base
- */
-static unsigned long __init find_function64(struct lib64_elfinfo *lib,
- const char *symname)
-{
- Elf64_Sym *sym = find_symbol64(lib, symname);
-
- if (sym == NULL) {
- printk(KERN_WARNING "vDSO64: function %s not found !\n",
- symname);
- return 0;
- }
-#ifdef VDS64_HAS_DESCRIPTORS
- return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) -
- VDSO64_LBASE;
-#else
- return sym->st_value - VDSO64_LBASE;
#endif
-}
-static int __init vdso_do_func_patch64(struct lib32_elfinfo *v32,
- struct lib64_elfinfo *v64,
- const char *orig, const char *fix)
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
{
- Elf64_Sym *sym64_gen, *sym64_fix;
-
- sym64_gen = find_symbol64(v64, orig);
- if (sym64_gen == NULL) {
- printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig);
- return -1;
- }
- if (fix == NULL) {
- sym64_gen->st_name = 0;
- return 0;
- }
- sym64_fix = find_symbol64(v64, fix);
- if (sym64_fix == NULL) {
- printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix);
- return -1;
+ struct page *timens_page = find_timens_vvar_page(vma);
+ unsigned long pfn;
+
+ switch (vmf->pgoff) {
+ case VVAR_DATA_PAGE_OFFSET:
+ if (timens_page)
+ pfn = page_to_pfn(timens_page);
+ else
+ pfn = virt_to_pfn(vdso_data);
+ break;
+#ifdef CONFIG_TIME_NS
+ case VVAR_TIMENS_PAGE_OFFSET:
+ /*
+ * If a task belongs to a time namespace then a namespace
+ * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
+ * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
+ * offset.
+ * See also the comment near timens_setup_vdso_data().
+ */
+ if (!timens_page)
+ return VM_FAULT_SIGBUS;
+ pfn = virt_to_pfn(vdso_data);
+ break;
+#endif /* CONFIG_TIME_NS */
+ default:
+ return VM_FAULT_SIGBUS;
}
- sym64_gen->st_value = sym64_fix->st_value;
- sym64_gen->st_size = sym64_fix->st_size;
- sym64_gen->st_info = sym64_fix->st_info;
- sym64_gen->st_other = sym64_fix->st_other;
- sym64_gen->st_shndx = sym64_fix->st_shndx;
- return 0;
+ return vmf_insert_pfn(vma, vmf->address, pfn);
}
-#endif /* CONFIG_PPC64 */
+/*
+ * This is called from binfmt_elf, we create the special vma for the
+ * vDSO and insert it into the mm struct tree
+ */
+static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ unsigned long vdso_size, vdso_base, mappings_size;
+ struct vm_special_mapping *vdso_spec;
+ unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ if (is_32bit_task()) {
+ vdso_spec = &vdso32_spec;
+ vdso_size = &vdso32_end - &vdso32_start;
+ } else {
+ vdso_spec = &vdso64_spec;
+ vdso_size = &vdso64_end - &vdso64_start;
+ }
-static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
- struct lib64_elfinfo *v64)
-{
- void *sect;
+ mappings_size = vdso_size + vvar_size;
+ mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
/*
- * Locate symbol tables & text section
+ * Pick a base address for the vDSO in process space.
+ * Add enough to the size so that the result can be aligned.
*/
+ vdso_base = get_unmapped_area(NULL, 0, mappings_size, 0, 0);
+ if (IS_ERR_VALUE(vdso_base))
+ return vdso_base;
-#ifdef CONFIG_VDSO32
- v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize);
- v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL);
- if (v32->dynsym == NULL || v32->dynstr == NULL) {
- printk(KERN_ERR "vDSO32: required symbol section not found\n");
- return -1;
- }
- sect = find_section32(v32->hdr, ".text", NULL);
- if (sect == NULL) {
- printk(KERN_ERR "vDSO32: the .text section was not found\n");
- return -1;
- }
- v32->text = sect - vdso32_kbase;
-#endif
+ /* Add required alignment. */
+ vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
-#ifdef CONFIG_PPC64
- v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize);
- v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL);
- if (v64->dynsym == NULL || v64->dynstr == NULL) {
- printk(KERN_ERR "vDSO64: required symbol section not found\n");
- return -1;
- }
- sect = find_section64(v64->hdr, ".text", NULL);
- if (sect == NULL) {
- printk(KERN_ERR "vDSO64: the .text section was not found\n");
- return -1;
- }
- v64->text = sect - vdso64_kbase;
-#endif /* CONFIG_PPC64 */
+ /*
+ * Put vDSO base into mm struct. We need to do this before calling
+ * install_special_mapping or the perf counter mmap tracking code
+ * will fail to recognise it as a vDSO.
+ */
+ mm->context.vdso = (void __user *)vdso_base + vvar_size;
- return 0;
-}
+ vma = _install_special_mapping(mm, vdso_base, vvar_size,
+ VM_READ | VM_MAYREAD | VM_IO |
+ VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
-static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32,
- struct lib64_elfinfo *v64)
-{
/*
- * Find signal trampolines
+ * our vma flags don't have VM_WRITE so by default, the process isn't
+ * allowed to write those pages.
+ * gdb can break that with ptrace interface, and thus trigger COW on
+ * those pages but it's then your responsibility to never do that on
+ * the "data" page of the vDSO or you'll stop getting kernel updates
+ * and your nice userland gettimeofday will be totally dead.
+ * It's fine to use that for setting breakpoints in the vDSO code
+ * pages though.
*/
+ vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
+ VM_READ | VM_EXEC | VM_MAYREAD |
+ VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
+ if (IS_ERR(vma))
+ do_munmap(mm, vdso_base, vvar_size, NULL);
-#ifdef CONFIG_PPC64
- vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64");
-#endif
- vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32");
- vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32");
+ return PTR_ERR_OR_ZERO(vma);
}
-static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
- struct lib64_elfinfo *v64)
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
-#ifdef CONFIG_VDSO32
- Elf32_Sym *sym32;
-#endif
-#ifdef CONFIG_PPC64
- Elf64_Sym *sym64;
+ struct mm_struct *mm = current->mm;
+ int rc;
- sym64 = find_symbol64(v64, "__kernel_datapage_offset");
- if (sym64 == NULL) {
- printk(KERN_ERR "vDSO64: Can't find symbol "
- "__kernel_datapage_offset !\n");
- return -1;
- }
- *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
- (vdso64_pages << PAGE_SHIFT) -
- (sym64->st_value - VDSO64_LBASE);
-#endif /* CONFIG_PPC64 */
+ mm->context.vdso = NULL;
-#ifdef CONFIG_VDSO32
- sym32 = find_symbol32(v32, "__kernel_datapage_offset");
- if (sym32 == NULL) {
- printk(KERN_ERR "vDSO32: Can't find symbol "
- "__kernel_datapage_offset !\n");
- return -1;
- }
- *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
- (vdso32_pages << PAGE_SHIFT) -
- (sym32->st_value - VDSO32_LBASE);
-#endif
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
- return 0;
+ rc = __arch_setup_additional_pages(bprm, uses_interp);
+ if (rc)
+ mm->context.vdso = NULL;
+
+ mmap_write_unlock(mm);
+ return rc;
}
+#define VDSO_DO_FIXUPS(type, value, bits, sec) do { \
+ void *__start = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_start); \
+ void *__end = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_end); \
+ \
+ do_##type##_fixups((value), __start, __end); \
+} while (0)
-static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
- struct lib64_elfinfo *v64)
+static void __init vdso_fixup_features(void)
{
- unsigned long size;
- void *start;
-
#ifdef CONFIG_PPC64
- start = find_section64(v64->hdr, "__ftr_fixup", &size);
- if (start)
- do_feature_fixups(cur_cpu_spec->cpu_features,
- start, start + size);
-
- start = find_section64(v64->hdr, "__mmu_ftr_fixup", &size);
- if (start)
- do_feature_fixups(cur_cpu_spec->mmu_features,
- start, start + size);
-
- start = find_section64(v64->hdr, "__fw_ftr_fixup", &size);
- if (start)
- do_feature_fixups(powerpc_firmware_features,
- start, start + size);
-
- start = find_section64(v64->hdr, "__lwsync_fixup", &size);
- if (start)
- do_lwsync_fixups(cur_cpu_spec->cpu_features,
- start, start + size);
+ VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 64, ftr_fixup);
+ VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 64, mmu_ftr_fixup);
+ VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 64, fw_ftr_fixup);
+ VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 64, lwsync_fixup);
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_VDSO32
- start = find_section32(v32->hdr, "__ftr_fixup", &size);
- if (start)
- do_feature_fixups(cur_cpu_spec->cpu_features,
- start, start + size);
-
- start = find_section32(v32->hdr, "__mmu_ftr_fixup", &size);
- if (start)
- do_feature_fixups(cur_cpu_spec->mmu_features,
- start, start + size);
-
+ VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 32, ftr_fixup);
+ VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 32, mmu_ftr_fixup);
#ifdef CONFIG_PPC64
- start = find_section32(v32->hdr, "__fw_ftr_fixup", &size);
- if (start)
- do_feature_fixups(powerpc_firmware_features,
- start, start + size);
+ VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 32, fw_ftr_fixup);
#endif /* CONFIG_PPC64 */
-
- start = find_section32(v32->hdr, "__lwsync_fixup", &size);
- if (start)
- do_lwsync_fixups(cur_cpu_spec->cpu_features,
- start, start + size);
+ VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 32, lwsync_fixup);
#endif
-
- return 0;
-}
-
-static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
- struct lib64_elfinfo *v64)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) {
- struct vdso_patch_def *patch = &vdso_patches[i];
- int match = (cur_cpu_spec->cpu_features & patch->ftr_mask)
- == patch->ftr_value;
- if (!match)
- continue;
-
- DBG("replacing %s with %s...\n", patch->gen_name,
- patch->fix_name ? "NONE" : patch->fix_name);
-
- /*
- * Patch the 32 bits and 64 bits symbols. Note that we do not
- * patch the "." symbol on 64 bits.
- * It would be easy to do, but doesn't seem to be necessary,
- * patching the OPD symbol is enough.
- */
- vdso_do_func_patch32(v32, v64, patch->gen_name,
- patch->fix_name);
-#ifdef CONFIG_PPC64
- vdso_do_func_patch64(v32, v64, patch->gen_name,
- patch->fix_name);
-#endif /* CONFIG_PPC64 */
- }
-
- return 0;
-}
-
-
-static __init int vdso_setup(void)
-{
- struct lib32_elfinfo v32;
- struct lib64_elfinfo v64;
-
- v32.hdr = vdso32_kbase;
-#ifdef CONFIG_PPC64
- v64.hdr = vdso64_kbase;
-#endif
- if (vdso_do_find_sections(&v32, &v64))
- return -1;
-
- if (vdso_fixup_datapage(&v32, &v64))
- return -1;
-
- if (vdso_fixup_features(&v32, &v64))
- return -1;
-
- if (vdso_fixup_alt_funcs(&v32, &v64))
- return -1;
-
- vdso_setup_trampolines(&v32, &v64);
-
- return 0;
}
/*
@@ -644,26 +304,13 @@ static __init int vdso_setup(void)
static void __init vdso_setup_syscall_map(void)
{
unsigned int i;
- extern unsigned long *sys_call_table;
-#ifdef CONFIG_PPC64
- extern unsigned long *compat_sys_call_table;
-#endif
- extern unsigned long sys_ni_syscall;
-
for (i = 0; i < NR_syscalls; i++) {
-#ifdef CONFIG_PPC64
- if (sys_call_table[i] != sys_ni_syscall)
- vdso_data->syscall_map_64[i >> 5] |=
- 0x80000000UL >> (i & 0x1f);
- if (compat_sys_call_table[i] != sys_ni_syscall)
- vdso_data->syscall_map_32[i >> 5] |=
- 0x80000000UL >> (i & 0x1f);
-#else /* CONFIG_PPC64 */
- if (sys_call_table[i] != sys_ni_syscall)
- vdso_data->syscall_map_32[i >> 5] |=
- 0x80000000UL >> (i & 0x1f);
-#endif /* CONFIG_PPC64 */
+ if (sys_call_table[i] != (void *)&sys_ni_syscall)
+ vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
+ if (IS_ENABLED(CONFIG_COMPAT) &&
+ compat_sys_call_table[i] != (void *)&sys_ni_syscall)
+ vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
}
}
@@ -682,7 +329,7 @@ int vdso_getcpu_init(void)
node = cpu_to_node(cpu);
WARN_ON_ONCE(node > 0xffff);
- val = (cpu & 0xfff) | ((node & 0xffff) << 16);
+ val = (cpu & 0xffff) | ((node & 0xffff) << 16);
mtspr(SPRN_SPRG_VDSO_WRITE, val);
get_paca()->sprg_vdso = val;
@@ -694,10 +341,24 @@ int vdso_getcpu_init(void)
early_initcall(vdso_getcpu_init);
#endif
-static int __init vdso_init(void)
+static struct page ** __init vdso_setup_pages(void *start, void *end)
{
int i;
+ struct page **pagelist;
+ int pages = (end - start) >> PAGE_SHIFT;
+
+ pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
+ if (!pagelist)
+ panic("%s: Cannot allocate page list for VDSO", __func__);
+ for (i = 0; i < pages; i++)
+ pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
+
+ return pagelist;
+}
+
+static int __init vdso_init(void)
+{
#ifdef CONFIG_PPC64
/*
* Fill up the "systemcfg" stuff for backward compatibility
@@ -722,75 +383,19 @@ static int __init vdso_init(void)
vdso_data->icache_block_size = ppc64_caches.l1i.block_size;
vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
-
- /*
- * Calculate the size of the 64 bits vDSO
- */
- vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT;
- DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages);
#endif /* CONFIG_PPC64 */
-
-#ifdef CONFIG_VDSO32
- vdso32_kbase = &vdso32_start;
-
- /*
- * Calculate the size of the 32 bits vDSO
- */
- vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT;
- DBG("vdso32_kbase: %p, 0x%x pages\n", vdso32_kbase, vdso32_pages);
-#endif
-
-
- /*
- * Setup the syscall map in the vDOS
- */
vdso_setup_syscall_map();
- /*
- * Initialize the vDSO images in memory, that is do necessary
- * fixups of vDSO symbols, locate trampolines, etc...
- */
- if (vdso_setup()) {
- printk(KERN_ERR "vDSO setup failure, not enabled !\n");
- vdso32_pages = 0;
-#ifdef CONFIG_PPC64
- vdso64_pages = 0;
-#endif
- return 0;
- }
+ vdso_fixup_features();
-#ifdef CONFIG_VDSO32
- /* Make sure pages are in the correct state */
- vdso32_pagelist = kcalloc(vdso32_pages + 2, sizeof(struct page *),
- GFP_KERNEL);
- BUG_ON(vdso32_pagelist == NULL);
- for (i = 0; i < vdso32_pages; i++) {
- struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
- get_page(pg);
- vdso32_pagelist[i] = pg;
- }
- vdso32_pagelist[i++] = virt_to_page(vdso_data);
- vdso32_pagelist[i] = NULL;
-#endif
-
-#ifdef CONFIG_PPC64
- vdso64_pagelist = kcalloc(vdso64_pages + 2, sizeof(struct page *),
- GFP_KERNEL);
- BUG_ON(vdso64_pagelist == NULL);
- for (i = 0; i < vdso64_pages; i++) {
- struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
- get_page(pg);
- vdso64_pagelist[i] = pg;
- }
- vdso64_pagelist[i++] = virt_to_page(vdso_data);
- vdso64_pagelist[i] = NULL;
-#endif /* CONFIG_PPC64 */
+ if (IS_ENABLED(CONFIG_VDSO32))
+ vdso32_spec.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
- get_page(virt_to_page(vdso_data));
+ if (IS_ENABLED(CONFIG_PPC64))
+ vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
smp_wmb();
- vdso_ready = 1;
return 0;
}
diff --git a/arch/powerpc/kernel/vdso/.gitignore b/arch/powerpc/kernel/vdso/.gitignore
new file mode 100644
index 000000000000..dd9bdd67758b
--- /dev/null
+++ b/arch/powerpc/kernel/vdso/.gitignore
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vdso32.lds
+vdso32.so.dbg
+vdso64.lds
+vdso64.so.dbg
diff --git a/arch/powerpc/kernel/vdso/Makefile b/arch/powerpc/kernel/vdso/Makefile
new file mode 100644
index 000000000000..a2e7b0ce5b19
--- /dev/null
+++ b/arch/powerpc/kernel/vdso/Makefile
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: GPL-2.0
+
+# List of files in the vdso, has to be asm only for now
+
+ARCH_REL_TYPE_ABS := R_PPC_JUMP_SLOT|R_PPC_GLOB_DAT|R_PPC_ADDR32|R_PPC_ADDR24|R_PPC_ADDR16|R_PPC_ADDR16_LO|R_PPC_ADDR16_HI|R_PPC_ADDR16_HA|R_PPC_ADDR14|R_PPC_ADDR14_BRTAKEN|R_PPC_ADDR14_BRNTAKEN|R_PPC_REL24
+include $(srctree)/lib/vdso/Makefile
+
+obj-vdso32 = sigtramp32-32.o gettimeofday-32.o datapage-32.o cacheflush-32.o note-32.o getcpu-32.o
+obj-vdso64 = sigtramp64-64.o gettimeofday-64.o datapage-64.o cacheflush-64.o note-64.o getcpu-64.o
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday-32.o += -include $(c-gettimeofday-y)
+ CFLAGS_vgettimeofday-32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_vgettimeofday-32.o += $(call cc-option, -fno-stack-protector)
+ CFLAGS_vgettimeofday-32.o += -DDISABLE_BRANCH_PROFILING
+ CFLAGS_vgettimeofday-32.o += -ffreestanding -fasynchronous-unwind-tables
+ CFLAGS_REMOVE_vgettimeofday-32.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_vgettimeofday-32.o += -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc
+ CFLAGS_vgettimeofday-64.o += -include $(c-gettimeofday-y)
+ CFLAGS_vgettimeofday-64.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+ CFLAGS_vgettimeofday-64.o += $(call cc-option, -fno-stack-protector)
+ CFLAGS_vgettimeofday-64.o += -DDISABLE_BRANCH_PROFILING
+ CFLAGS_vgettimeofday-64.o += -ffreestanding -fasynchronous-unwind-tables
+ CFLAGS_REMOVE_vgettimeofday-64.o = $(CC_FLAGS_FTRACE)
+# Go prior to 1.16.x assumes r30 is not clobbered by any VDSO code. That used to be true
+# by accident when the VDSO was hand-written asm code, but may not be now that the VDSO is
+# compiler generated. To avoid breaking Go tell GCC not to use r30. Impact on code
+# generation is minimal, it will just use r29 instead.
+ CFLAGS_vgettimeofday-64.o += $(call cc-option, -ffixed-r30)
+endif
+
+# Build rules
+
+ifdef CROSS32_COMPILE
+ VDSOCC := $(CROSS32_COMPILE)gcc
+else
+ VDSOCC := $(CC)
+endif
+
+targets := $(obj-vdso32) vdso32.so.dbg vgettimeofday-32.o
+obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
+targets += $(obj-vdso64) vdso64.so.dbg vgettimeofday-64.o
+obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
+
+GCOV_PROFILE := n
+KCOV_INSTRUMENT := n
+UBSAN_SANITIZE := n
+KASAN_SANITIZE := n
+
+ccflags-y := -shared -fno-common -fno-builtin -nostdlib -Wl,--hash-style=both
+ccflags-$(CONFIG_LD_IS_LLD) += $(call cc-option,--ld-path=$(LD),-fuse-ld=lld)
+
+CC32FLAGS := -Wl,-soname=linux-vdso32.so.1 -m32
+AS32FLAGS := -D__VDSO32__ -s
+
+CC64FLAGS := -Wl,-soname=linux-vdso64.so.1
+AS64FLAGS := -D__VDSO64__ -s
+
+targets += vdso32.lds
+CPPFLAGS_vdso32.lds += -P -C -Upowerpc
+targets += vdso64.lds
+CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
+
+# link rule for the .so file, .lds has to be first
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) $(obj)/vgettimeofday-32.o FORCE
+ $(call if_changed,vdso32ld_and_check)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) $(obj)/vgettimeofday-64.o FORCE
+ $(call if_changed,vdso64ld_and_check)
+
+# assembly rules for the .S files
+$(obj-vdso32): %-32.o: %.S FORCE
+ $(call if_changed_dep,vdso32as)
+$(obj)/vgettimeofday-32.o: %-32.o: %.c FORCE
+ $(call if_changed_dep,vdso32cc)
+$(obj-vdso64): %-64.o: %.S FORCE
+ $(call if_changed_dep,vdso64as)
+$(obj)/vgettimeofday-64.o: %-64.o: %.c FORCE
+ $(call if_changed_dep,cc_o_c)
+
+# Generate VDSO offsets using helper script
+gen-vdso32sym := $(srctree)/$(src)/gen_vdso32_offsets.sh
+quiet_cmd_vdso32sym = VDSO32SYM $@
+ cmd_vdso32sym = $(NM) $< | $(gen-vdso32sym) | LC_ALL=C sort > $@
+gen-vdso64sym := $(srctree)/$(src)/gen_vdso64_offsets.sh
+quiet_cmd_vdso64sym = VDSO64SYM $@
+ cmd_vdso64sym = $(NM) $< | $(gen-vdso64sym) | LC_ALL=C sort > $@
+
+include/generated/vdso32-offsets.h: $(obj)/vdso32.so.dbg FORCE
+ $(call if_changed,vdso32sym)
+include/generated/vdso64-offsets.h: $(obj)/vdso64.so.dbg FORCE
+ $(call if_changed,vdso64sym)
+
+# actual build commands
+quiet_cmd_vdso32ld_and_check = VDSO32L $@
+ cmd_vdso32ld_and_check = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check)
+quiet_cmd_vdso32as = VDSO32A $@
+ cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) $(AS32FLAGS) -c -o $@ $<
+quiet_cmd_vdso32cc = VDSO32C $@
+ cmd_vdso32cc = $(VDSOCC) $(c_flags) $(CC32FLAGS) -c -o $@ $<
+
+quiet_cmd_vdso64ld_and_check = VDSO64L $@
+ cmd_vdso64ld_and_check = $(VDSOCC) $(c_flags) $(CC64FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) -z noexecstack ; $(cmd_vdso_check)
+quiet_cmd_vdso64as = VDSO64A $@
+ cmd_vdso64as = $(VDSOCC) $(a_flags) $(CC64FLAGS) $(AS64FLAGS) -c -o $@ $<
diff --git a/arch/powerpc/kernel/vdso32/cacheflush.S b/arch/powerpc/kernel/vdso/cacheflush.S
index 3440ddf21c8b..0085ae464dac 100644
--- a/arch/powerpc/kernel/vdso32/cacheflush.S
+++ b/arch/powerpc/kernel/vdso/cacheflush.S
@@ -24,11 +24,15 @@
*/
V_FUNCTION_BEGIN(__kernel_sync_dicache)
.cfi_startproc
+BEGIN_FTR_SECTION
+ b 3f
+END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
#ifdef CONFIG_PPC64
mflr r12
.cfi_register lr,r12
- get_datapage r10, r0
+ get_datapage r10
mtlr r12
+ .cfi_restore lr
#endif
#ifdef CONFIG_PPC64
@@ -42,7 +46,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
add r8,r8,r5 /* ensure we get enough */
#ifdef CONFIG_PPC64
lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
- srw. r8,r8,r9 /* compute line count */
+ PPC_SRL. r8,r8,r9 /* compute line count */
#else
srwi. r8, r8, L1_CACHE_SHIFT
mr r7, r6
@@ -68,7 +72,7 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
subf r8,r6,r4 /* compute length */
add r8,r8,r5
lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)
- srw. r8,r8,r9 /* compute line count */
+ PPC_SRL. r8,r8,r9 /* compute line count */
crclr cr0*4+so
beqlr /* nothing to do? */
#endif
@@ -84,20 +88,12 @@ V_FUNCTION_BEGIN(__kernel_sync_dicache)
isync
li r3,0
blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_sync_dicache)
-
-
-/*
- * POWER5 version of __kernel_sync_dicache
- */
-V_FUNCTION_BEGIN(__kernel_sync_dicache_p5)
- .cfi_startproc
+3:
crclr cr0*4+so
sync
+ icbi 0,r1
isync
li r3,0
blr
.cfi_endproc
-V_FUNCTION_END(__kernel_sync_dicache_p5)
-
+V_FUNCTION_END(__kernel_sync_dicache)
diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso/datapage.S
index 217bb630f8f9..db8e167f0166 100644
--- a/arch/powerpc/kernel/vdso32/datapage.S
+++ b/arch/powerpc/kernel/vdso/datapage.S
@@ -13,9 +13,6 @@
#include <asm/vdso_datapage.h>
.text
- .global __kernel_datapage_offset;
-__kernel_datapage_offset:
- .long 0
/*
* void *__kernel_get_syscall_map(unsigned int *syscall_count) ;
@@ -31,13 +28,17 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
mflr r12
.cfi_register lr,r12
mr. r4,r3
- get_datapage r3, r0
+ get_datapage r3
mtlr r12
+#ifdef __powerpc64__
+ addi r3,r3,CFG_SYSCALL_MAP64
+#else
addi r3,r3,CFG_SYSCALL_MAP32
+#endif
+ crclr cr0*4+so
beqlr
li r0,NR_syscalls
stw r0,0(r4)
- crclr cr0*4+so
blr
.cfi_endproc
V_FUNCTION_END(__kernel_get_syscall_map)
@@ -47,17 +48,17 @@ V_FUNCTION_END(__kernel_get_syscall_map)
*
* returns the timebase frequency in HZ
*/
-#ifndef CONFIG_PPC_BOOK3S_601
V_FUNCTION_BEGIN(__kernel_get_tbfreq)
.cfi_startproc
mflr r12
.cfi_register lr,r12
- get_datapage r3, r0
+ get_datapage r3
+#ifndef __powerpc64__
lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3)
- lwz r3,CFG_TB_TICKS_PER_SEC(r3)
+#endif
+ PPC_LL r3,CFG_TB_TICKS_PER_SEC(r3)
mtlr r12
crclr cr0*4+so
blr
.cfi_endproc
V_FUNCTION_END(__kernel_get_tbfreq)
-#endif
diff --git a/arch/powerpc/kernel/vdso/gen_vdso32_offsets.sh b/arch/powerpc/kernel/vdso/gen_vdso32_offsets.sh
new file mode 100755
index 000000000000..c7b54a5dcd3e
--- /dev/null
+++ b/arch/powerpc/kernel/vdso/gen_vdso32_offsets.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Match symbols in the DSO that look like VDSO_*; produce a header file
+# of constant offsets into the shared object.
+#
+# Doing this inside the Makefile will break the $(filter-out) function,
+# causing Kbuild to rebuild the vdso-offsets header file every time.
+#
+# Author: Will Deacon <will.deacon@arm.com
+#
+
+LC_ALL=C
+sed -n -e 's/^00*/0/' -e \
+'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso32_offset_\2\t0x\1/p'
diff --git a/arch/powerpc/kernel/vdso/gen_vdso64_offsets.sh b/arch/powerpc/kernel/vdso/gen_vdso64_offsets.sh
new file mode 100755
index 000000000000..4bf15ffd5933
--- /dev/null
+++ b/arch/powerpc/kernel/vdso/gen_vdso64_offsets.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Match symbols in the DSO that look like VDSO_*; produce a header file
+# of constant offsets into the shared object.
+#
+# Doing this inside the Makefile will break the $(filter-out) function,
+# causing Kbuild to rebuild the vdso-offsets header file every time.
+#
+# Author: Will Deacon <will.deacon@arm.com
+#
+
+LC_ALL=C
+sed -n -e 's/^00*/0/' -e \
+'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso64_offset_\2\t0x\1/p'
diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso/getcpu.S
index ff5e214fec41..8e08ccf19062 100644
--- a/arch/powerpc/kernel/vdso32/getcpu.S
+++ b/arch/powerpc/kernel/vdso/getcpu.S
@@ -19,8 +19,8 @@
V_FUNCTION_BEGIN(__kernel_getcpu)
.cfi_startproc
mfspr r5,SPRN_SPRG_VDSO_READ
- cmpwi cr0,r3,0
- cmpwi cr1,r4,0
+ PPC_LCMPI cr0,r3,0
+ PPC_LCMPI cr1,r4,0
clrlwi r6,r5,16
rlwinm r7,r5,16,31-15,31-0
beq cr0,1f
diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S
new file mode 100644
index 000000000000..0c4ecc8fec5a
--- /dev/null
+++ b/arch/powerpc/kernel/vdso/gettimeofday.S
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Userland implementation of gettimeofday() for processes
+ * for use in the vDSO
+ *
+ * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
+ * IBM Corp.
+ */
+#include <asm/processor.h>
+#include <asm/ppc_asm.h>
+#include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+/*
+ * The macro sets two stack frames, one for the caller and one for the callee
+ * because there are no requirement for the caller to set a stack frame when
+ * calling VDSO so it may have omitted to set one, especially on PPC64
+ */
+
+.macro cvdso_call funct call_time=0
+ .cfi_startproc
+ PPC_STLU r1, -PPC_MIN_STKFRM(r1)
+ .cfi_adjust_cfa_offset PPC_MIN_STKFRM
+ mflr r0
+ PPC_STLU r1, -PPC_MIN_STKFRM(r1)
+ .cfi_adjust_cfa_offset PPC_MIN_STKFRM
+ PPC_STL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
+ .cfi_rel_offset lr, PPC_MIN_STKFRM + PPC_LR_STKOFF
+#ifdef __powerpc64__
+ PPC_STL r2, PPC_MIN_STKFRM + STK_GOT(r1)
+ .cfi_rel_offset r2, PPC_MIN_STKFRM + STK_GOT
+#endif
+ get_datapage r5
+ .ifeq \call_time
+ addi r5, r5, VDSO_DATA_OFFSET
+ .else
+ addi r4, r5, VDSO_DATA_OFFSET
+ .endif
+ bl DOTSYM(\funct)
+ PPC_LL r0, PPC_MIN_STKFRM + PPC_LR_STKOFF(r1)
+#ifdef __powerpc64__
+ PPC_LL r2, PPC_MIN_STKFRM + STK_GOT(r1)
+ .cfi_restore r2
+#endif
+ .ifeq \call_time
+ cmpwi r3, 0
+ .endif
+ mtlr r0
+ addi r1, r1, 2 * PPC_MIN_STKFRM
+ .cfi_restore lr
+ .cfi_def_cfa_offset 0
+ crclr so
+ .ifeq \call_time
+ beqlr+
+ crset so
+ neg r3, r3
+ .endif
+ blr
+ .cfi_endproc
+.endm
+
+ .text
+/*
+ * Exact prototype of gettimeofday
+ *
+ * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_gettimeofday)
+ cvdso_call __c_kernel_gettimeofday
+V_FUNCTION_END(__kernel_gettimeofday)
+
+/*
+ * Exact prototype of clock_gettime()
+ *
+ * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_clock_gettime)
+ cvdso_call __c_kernel_clock_gettime
+V_FUNCTION_END(__kernel_clock_gettime)
+
+/*
+ * Exact prototype of clock_gettime64()
+ *
+ * int __kernel_clock_gettime64(clockid_t clock_id, struct __timespec64 *ts);
+ *
+ */
+#ifndef __powerpc64__
+V_FUNCTION_BEGIN(__kernel_clock_gettime64)
+ cvdso_call __c_kernel_clock_gettime64
+V_FUNCTION_END(__kernel_clock_gettime64)
+#endif
+
+/*
+ * Exact prototype of clock_getres()
+ *
+ * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_clock_getres)
+ cvdso_call __c_kernel_clock_getres
+V_FUNCTION_END(__kernel_clock_getres)
+
+
+/*
+ * Exact prototype of time()
+ *
+ * time_t time(time *t);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_time)
+ cvdso_call __c_kernel_time call_time=1
+V_FUNCTION_END(__kernel_time)
+
+/* Routines for restoring integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area. */
+#ifndef __powerpc64__
+_GLOBAL(_restgpr_31_x)
+_GLOBAL(_rest32gpr_31_x)
+ lwz r0,4(r11)
+ lwz r31,-4(r11)
+ mtlr r0
+ mr r1,r11
+ blr
+#endif
diff --git a/arch/powerpc/kernel/vdso32/note.S b/arch/powerpc/kernel/vdso/note.S
index 227a7327399e..227a7327399e 100644
--- a/arch/powerpc/kernel/vdso32/note.S
+++ b/arch/powerpc/kernel/vdso/note.S
diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso/sigtramp32.S
index 0bcc5e5fe789..0bcc5e5fe789 100644
--- a/arch/powerpc/kernel/vdso32/sigtramp.S
+++ b/arch/powerpc/kernel/vdso/sigtramp32.S
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso/sigtramp64.S
index a8cc0409d7d2..2d4067561293 100644
--- a/arch/powerpc/kernel/vdso64/sigtramp.S
+++ b/arch/powerpc/kernel/vdso/sigtramp64.S
@@ -6,6 +6,7 @@
* Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp.
* Copyright (C) 2004 Alan Modra (amodra@au.ibm.com)), IBM Corp.
*/
+#include <asm/cache.h> /* IFETCH_ALIGN_BYTES */
#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/unistd.h>
@@ -14,21 +15,26 @@
.text
-/* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
- the return address to get an address in the middle of the presumed
- call instruction. Since we don't have a call here, we artificially
- extend the range covered by the unwind info by padding before the
- real start. */
- nop
+/*
+ * __kernel_start_sigtramp_rt64 and __kernel_sigtramp_rt64 together
+ * are one function split in two parts. The kernel jumps to the former
+ * and the signal handler indirectly (by blr) returns to the latter.
+ * __kernel_sigtramp_rt64 needs to point to the return address so
+ * glibc can correctly identify the trampoline stack frame.
+ */
.balign 8
+ .balign IFETCH_ALIGN_BYTES
+V_FUNCTION_BEGIN(__kernel_start_sigtramp_rt64)
+.Lsigrt_start:
+ bctrl /* call the handler */
+V_FUNCTION_END(__kernel_start_sigtramp_rt64)
V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
-.Lsigrt_start = . - 4
addi r1, r1, __SIGNAL_FRAMESIZE
li r0,__NR_rt_sigreturn
sc
.Lsigrt_end:
V_FUNCTION_END(__kernel_sigtramp_rt64)
-/* The ".balign 8" above and the following zeros mimic the old stack
+/* The .balign 8 above and the following zeros mimic the old stack
trampoline layout. The last magic value is the ucontext pointer,
chosen in such a way that older libgcc unwind code returns a zero
for a sigcontext pointer. */
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S
index 5206c2eb2a1d..bc0be274a9ac 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso32.lds.S
@@ -4,6 +4,8 @@
* library
*/
#include <asm/vdso.h>
+#include <asm/page.h>
+#include <asm-generic/vmlinux.lds.h>
#ifdef __LITTLE_ENDIAN__
OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle")
@@ -11,11 +13,11 @@ OUTPUT_FORMAT("elf32-powerpcle", "elf32-powerpcle", "elf32-powerpcle")
OUTPUT_FORMAT("elf32-powerpc", "elf32-powerpc", "elf32-powerpc")
#endif
OUTPUT_ARCH(powerpc:common)
-ENTRY(_start)
SECTIONS
{
- . = VDSO32_LBASE + SIZEOF_HEADERS;
+ PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE);
+ . = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
@@ -36,17 +38,25 @@ SECTIONS
PROVIDE(etext = .);
. = ALIGN(8);
+ VDSO_ftr_fixup_start = .;
__ftr_fixup : { *(__ftr_fixup) }
+ VDSO_ftr_fixup_end = .;
. = ALIGN(8);
+ VDSO_mmu_ftr_fixup_start = .;
__mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
+ VDSO_mmu_ftr_fixup_end = .;
. = ALIGN(8);
+ VDSO_lwsync_fixup_start = .;
__lwsync_fixup : { *(__lwsync_fixup) }
+ VDSO_lwsync_fixup_end = .;
#ifdef CONFIG_PPC64
. = ALIGN(8);
+ VDSO_fw_ftr_fixup_start = .;
__fw_ftr_fixup : { *(__fw_ftr_fixup) }
+ VDSO_fw_ftr_fixup_end = .;
#endif
/*
@@ -68,49 +78,14 @@ SECTIONS
__end = .;
PROVIDE(end = .);
- /*
- * Stabs debugging sections are here too.
- */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-
- /*
- * DWARF debug sections.
- * Symbols in the DWARF debugging sections are relative to the beginning
- * of the section so we begin them at 0.
- */
- /* DWARF 1 */
- .debug 0 : { *(.debug) }
- .line 0 : { *(.line) }
- /* GNU DWARF 1 extensions */
- .debug_srcinfo 0 : { *(.debug_srcinfo) }
- .debug_sfnames 0 : { *(.debug_sfnames) }
- /* DWARF 1.1 and DWARF 2 */
- .debug_aranges 0 : { *(.debug_aranges) }
- .debug_pubnames 0 : { *(.debug_pubnames) }
- /* DWARF 2 */
- .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
- .debug_abbrev 0 : { *(.debug_abbrev) }
- .debug_line 0 : { *(.debug_line) }
- .debug_frame 0 : { *(.debug_frame) }
- .debug_str 0 : { *(.debug_str) }
- .debug_loc 0 : { *(.debug_loc) }
- .debug_macinfo 0 : { *(.debug_macinfo) }
- /* SGI/MIPS DWARF 2 extensions */
- .debug_weaknames 0 : { *(.debug_weaknames) }
- .debug_funcnames 0 : { *(.debug_funcnames) }
- .debug_typenames 0 : { *(.debug_typenames) }
- .debug_varnames 0 : { *(.debug_varnames) }
+ DWARF_DEBUG
+ ELF_DETAILS
/DISCARD/ : {
*(.note.GNU-stack)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
+ *(.got1)
}
}
@@ -138,21 +113,14 @@ VERSION
{
VDSO_VERSION_STRING {
global:
- /*
- * Has to be there for the kernel to find
- */
- __kernel_datapage_offset;
-
__kernel_get_syscall_map;
-#ifndef CONFIG_PPC_BOOK3S_601
__kernel_gettimeofday;
__kernel_clock_gettime;
+ __kernel_clock_gettime64;
__kernel_clock_getres;
__kernel_time;
__kernel_get_tbfreq;
-#endif
__kernel_sync_dicache;
- __kernel_sync_dicache_p5;
__kernel_sigtramp32;
__kernel_sigtramp_rt32;
#if defined(CONFIG_PPC64) || !defined(CONFIG_SMP)
@@ -162,3 +130,9 @@ VERSION
local: *;
};
}
+
+/*
+ * Make the sigreturn code visible to the kernel.
+ */
+VDSO_sigtramp32 = __kernel_sigtramp32;
+VDSO_sigtramp_rt32 = __kernel_sigtramp_rt32;
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso/vdso64.lds.S
index 256fb9720298..744ae5363e6c 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso64.lds.S
@@ -4,6 +4,8 @@
* library
*/
#include <asm/vdso.h>
+#include <asm/page.h>
+#include <asm-generic/vmlinux.lds.h>
#ifdef __LITTLE_ENDIAN__
OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
@@ -11,11 +13,11 @@ OUTPUT_FORMAT("elf64-powerpcle", "elf64-powerpcle", "elf64-powerpcle")
OUTPUT_FORMAT("elf64-powerpc", "elf64-powerpc", "elf64-powerpc")
#endif
OUTPUT_ARCH(powerpc:common64)
-ENTRY(_start)
SECTIONS
{
- . = VDSO64_LBASE + SIZEOF_HEADERS;
+ PROVIDE(_vdso_datapage = . - 2 * PAGE_SIZE);
+ . = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
@@ -37,16 +39,24 @@ SECTIONS
PROVIDE(etext = .);
. = ALIGN(8);
+ VDSO_ftr_fixup_start = .;
__ftr_fixup : { *(__ftr_fixup) }
+ VDSO_ftr_fixup_end = .;
. = ALIGN(8);
+ VDSO_mmu_ftr_fixup_start = .;
__mmu_ftr_fixup : { *(__mmu_ftr_fixup) }
+ VDSO_mmu_ftr_fixup_end = .;
. = ALIGN(8);
+ VDSO_lwsync_fixup_start = .;
__lwsync_fixup : { *(__lwsync_fixup) }
+ VDSO_lwsync_fixup_end = .;
. = ALIGN(8);
+ VDSO_fw_ftr_fixup_start = .;
__fw_ftr_fixup : { *(__fw_ftr_fixup) }
+ VDSO_fw_ftr_fixup_end = .;
/*
* Other stuff is appended to the text segment:
@@ -61,56 +71,20 @@ SECTIONS
.gcc_except_table : { *(.gcc_except_table) }
.rela.dyn ALIGN(8) : { *(.rela.dyn) }
- .opd ALIGN(8) : { KEEP (*(.opd)) }
.got ALIGN(8) : { *(.got .toc) }
_end = .;
PROVIDE(end = .);
- /*
- * Stabs debugging sections are here too.
- */
- .stab 0 : { *(.stab) }
- .stabstr 0 : { *(.stabstr) }
- .stab.excl 0 : { *(.stab.excl) }
- .stab.exclstr 0 : { *(.stab.exclstr) }
- .stab.index 0 : { *(.stab.index) }
- .stab.indexstr 0 : { *(.stab.indexstr) }
- .comment 0 : { *(.comment) }
-
- /*
- * DWARF debug sections.
- * Symbols in the DWARF debugging sections are relative to the beginning
- * of the section so we begin them at 0.
- */
- /* DWARF 1 */
- .debug 0 : { *(.debug) }
- .line 0 : { *(.line) }
- /* GNU DWARF 1 extensions */
- .debug_srcinfo 0 : { *(.debug_srcinfo) }
- .debug_sfnames 0 : { *(.debug_sfnames) }
- /* DWARF 1.1 and DWARF 2 */
- .debug_aranges 0 : { *(.debug_aranges) }
- .debug_pubnames 0 : { *(.debug_pubnames) }
- /* DWARF 2 */
- .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
- .debug_abbrev 0 : { *(.debug_abbrev) }
- .debug_line 0 : { *(.debug_line) }
- .debug_frame 0 : { *(.debug_frame) }
- .debug_str 0 : { *(.debug_str) }
- .debug_loc 0 : { *(.debug_loc) }
- .debug_macinfo 0 : { *(.debug_macinfo) }
- /* SGI/MIPS DWARF 2 extensions */
- .debug_weaknames 0 : { *(.debug_weaknames) }
- .debug_funcnames 0 : { *(.debug_funcnames) }
- .debug_typenames 0 : { *(.debug_typenames) }
- .debug_varnames 0 : { *(.debug_varnames) }
+ DWARF_DEBUG
+ ELF_DETAILS
/DISCARD/ : {
*(.note.GNU-stack)
*(.branch_lt)
*(.data .data.* .gnu.linkonce.d.* .sdata*)
*(.bss .sbss .dynbss .dynsbss)
+ *(.opd)
}
}
@@ -138,18 +112,12 @@ VERSION
{
VDSO_VERSION_STRING {
global:
- /*
- * Has to be there for the kernel to find
- */
- __kernel_datapage_offset;
-
__kernel_get_syscall_map;
__kernel_gettimeofday;
__kernel_clock_gettime;
__kernel_clock_getres;
__kernel_get_tbfreq;
__kernel_sync_dicache;
- __kernel_sync_dicache_p5;
__kernel_sigtramp_rt64;
__kernel_getcpu;
__kernel_time;
@@ -157,3 +125,8 @@ VERSION
local: *;
};
}
+
+/*
+ * Make the sigreturn code visible to the kernel.
+ */
+VDSO_sigtramp_rt64 = __kernel_start_sigtramp_rt64;
diff --git a/arch/powerpc/kernel/vdso/vgettimeofday.c b/arch/powerpc/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..55a287c9a736
--- /dev/null
+++ b/arch/powerpc/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Powerpc userspace implementations of gettimeofday() and similar.
+ */
+#include <linux/time.h>
+#include <linux/types.h>
+
+#ifdef __powerpc64__
+int __c_kernel_clock_gettime(clockid_t clock, struct __kernel_timespec *ts,
+ const struct vdso_data *vd)
+{
+ return __cvdso_clock_gettime_data(vd, clock, ts);
+}
+
+int __c_kernel_clock_getres(clockid_t clock_id, struct __kernel_timespec *res,
+ const struct vdso_data *vd)
+{
+ return __cvdso_clock_getres_data(vd, clock_id, res);
+}
+#else
+int __c_kernel_clock_gettime(clockid_t clock, struct old_timespec32 *ts,
+ const struct vdso_data *vd)
+{
+ return __cvdso_clock_gettime32_data(vd, clock, ts);
+}
+
+int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
+ const struct vdso_data *vd)
+{
+ return __cvdso_clock_gettime_data(vd, clock, ts);
+}
+
+int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
+ const struct vdso_data *vd)
+{
+ return __cvdso_clock_getres_time32_data(vd, clock_id, res);
+}
+#endif
+
+int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
+ const struct vdso_data *vd)
+{
+ return __cvdso_gettimeofday_data(vd, tv, tz);
+}
+
+__kernel_old_time_t __c_kernel_time(__kernel_old_time_t *time, const struct vdso_data *vd)
+{
+ return __cvdso_time_data(vd, time);
+}
diff --git a/arch/powerpc/kernel/vdso32/.gitignore b/arch/powerpc/kernel/vdso32/.gitignore
deleted file mode 100644
index fea5809857a5..000000000000
--- a/arch/powerpc/kernel/vdso32/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-vdso32.lds
-vdso32.so.dbg
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
deleted file mode 100644
index e147bbdc12cd..000000000000
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ /dev/null
@@ -1,65 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-# List of files in the vdso, has to be asm only for now
-
-obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o getcpu.o
-
-# Build rules
-
-ifdef CROSS32_COMPILE
- VDSOCC := $(CROSS32_COMPILE)gcc
-else
- VDSOCC := $(CC)
-endif
-
-CC32FLAGS :=
-ifdef CONFIG_PPC64
-CC32FLAGS += -m32
-endif
-
-targets := $(obj-vdso32) vdso32.so vdso32.so.dbg
-obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
-
-GCOV_PROFILE := n
-KCOV_INSTRUMENT := n
-UBSAN_SANITIZE := n
-
-ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
- -Wl,-soname=linux-vdso32.so.1 -Wl,--hash-style=both
-asflags-y := -D__VDSO32__ -s
-
-obj-y += vdso32_wrapper.o
-extra-y += vdso32.lds
-CPPFLAGS_vdso32.lds += -P -C -Upowerpc
-
-# Force dependency (incbin is bad)
-$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
-
-# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
- $(call if_changed,vdso32ld)
-
-# strip rule for the .so file
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg FORCE
- $(call if_changed,objcopy)
-
-# assembly rules for the .S files
-$(obj-vdso32): %.o: %.S FORCE
- $(call if_changed_dep,vdso32as)
-
-# actual build commands
-quiet_cmd_vdso32ld = VDSO32L $@
- cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
-quiet_cmd_vdso32as = VDSO32A $@
- cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $<
-
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso32.so: $(obj)/vdso32.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso32.so
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
deleted file mode 100644
index a3951567118a..000000000000
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ /dev/null
@@ -1,339 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Userland implementation of gettimeofday() for 32 bits processes in a
- * ppc64 kernel for use in the vDSO
- *
- * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org,
- * IBM Corp.
- */
-#include <asm/processor.h>
-#include <asm/ppc_asm.h>
-#include <asm/vdso.h>
-#include <asm/vdso_datapage.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
-/* Offset for the low 32-bit part of a field of long type */
-#ifdef CONFIG_PPC64
-#define LOPART 4
-#else
-#define LOPART 0
-#endif
-
- .text
-/*
- * Exact prototype of gettimeofday
- *
- * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
- *
- */
-V_FUNCTION_BEGIN(__kernel_gettimeofday)
- .cfi_startproc
- mflr r12
- .cfi_register lr,r12
-
- mr. r10,r3 /* r10 saves tv */
- mr r11,r4 /* r11 saves tz */
- get_datapage r9, r0
- beq 3f
- LOAD_REG_IMMEDIATE(r7, 1000000) /* load up USEC_PER_SEC */
- bl __do_get_tspec@local /* get sec/usec from tb & kernel */
- stw r3,TVAL32_TV_SEC(r10)
- stw r4,TVAL32_TV_USEC(r10)
-
-3: cmplwi r11,0 /* check if tz is NULL */
- mtlr r12
- crclr cr0*4+so
- li r3,0
- beqlr
-
- lwz r4,CFG_TZ_MINUTEWEST(r9)/* fill tz */
- lwz r5,CFG_TZ_DSTTIME(r9)
- stw r4,TZONE_TZ_MINWEST(r11)
- stw r5,TZONE_TZ_DSTTIME(r11)
-
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_gettimeofday)
-
-/*
- * Exact prototype of clock_gettime()
- *
- * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
- *
- */
-V_FUNCTION_BEGIN(__kernel_clock_gettime)
- .cfi_startproc
- /* Check for supported clock IDs */
- cmpli cr0,r3,CLOCK_REALTIME
- cmpli cr1,r3,CLOCK_MONOTONIC
- cror cr0*4+eq,cr0*4+eq,cr1*4+eq
-
- cmpli cr5,r3,CLOCK_REALTIME_COARSE
- cmpli cr6,r3,CLOCK_MONOTONIC_COARSE
- cror cr5*4+eq,cr5*4+eq,cr6*4+eq
-
- cror cr0*4+eq,cr0*4+eq,cr5*4+eq
- bne cr0, .Lgettime_fallback
-
- mflr r12 /* r12 saves lr */
- .cfi_register lr,r12
- mr r11,r4 /* r11 saves tp */
- get_datapage r9, r0
- LOAD_REG_IMMEDIATE(r7, NSEC_PER_SEC) /* load up NSEC_PER_SEC */
- beq cr5, .Lcoarse_clocks
-.Lprecise_clocks:
- bl __do_get_tspec@local /* get sec/nsec from tb & kernel */
- bne cr1, .Lfinish /* not monotonic -> all done */
-
- /*
- * CLOCK_MONOTONIC
- */
-
- /* now we must fixup using wall to monotonic. We need to snapshot
- * that value and do the counter trick again. Fortunately, we still
- * have the counter value in r8 that was returned by __do_get_xsec.
- * At this point, r3,r4 contain our sec/nsec values, r5 and r6
- * can be used, r7 contains NSEC_PER_SEC.
- */
-
- lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9)
- lwz r6,WTOM_CLOCK_NSEC(r9)
-
- /* We now have our offset in r5,r6. We create a fake dependency
- * on that value and re-check the counter
- */
- or r0,r6,r5
- xor r0,r0,r0
- add r9,r9,r0
- lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
- cmpl cr0,r8,r0 /* check if updated */
- bne- .Lprecise_clocks
- b .Lfinish_monotonic
-
- /*
- * For coarse clocks we get data directly from the vdso data page, so
- * we don't need to call __do_get_tspec, but we still need to do the
- * counter trick.
- */
-.Lcoarse_clocks:
- lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
- andi. r0,r8,1 /* pending update ? loop */
- bne- .Lcoarse_clocks
- add r9,r9,r0 /* r0 is already 0 */
-
- /*
- * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE
- * too
- */
- lwz r3,STAMP_XTIME_SEC+LOPART(r9)
- lwz r4,STAMP_XTIME_NSEC+LOPART(r9)
- bne cr6,1f
-
- /* CLOCK_MONOTONIC_COARSE */
- lwz r5,(WTOM_CLOCK_SEC+LOPART)(r9)
- lwz r6,WTOM_CLOCK_NSEC(r9)
-
- /* check if counter has updated */
- or r0,r6,r5
-1: or r0,r0,r3
- or r0,r0,r4
- xor r0,r0,r0
- add r3,r3,r0
- lwz r0,CFG_TB_UPDATE_COUNT+LOPART(r9)
- cmpl cr0,r0,r8 /* check if updated */
- bne- .Lcoarse_clocks
-
- /* Counter has not updated, so continue calculating proper values for
- * sec and nsec if monotonic coarse, or just return with the proper
- * values for realtime.
- */
- bne cr6, .Lfinish
-
- /* Calculate and store result. Note that this mimics the C code,
- * which may cause funny results if nsec goes negative... is that
- * possible at all ?
- */
-.Lfinish_monotonic:
- add r3,r3,r5
- add r4,r4,r6
- cmpw cr0,r4,r7
- cmpwi cr1,r4,0
- blt 1f
- subf r4,r7,r4
- addi r3,r3,1
-1: bge cr1, .Lfinish
- addi r3,r3,-1
- add r4,r4,r7
-
-.Lfinish:
- stw r3,TSPC32_TV_SEC(r11)
- stw r4,TSPC32_TV_NSEC(r11)
-
- mtlr r12
- crclr cr0*4+so
- li r3,0
- blr
-
- /*
- * syscall fallback
- */
-.Lgettime_fallback:
- li r0,__NR_clock_gettime
- .cfi_restore lr
- sc
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_clock_gettime)
-
-
-/*
- * Exact prototype of clock_getres()
- *
- * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
- *
- */
-V_FUNCTION_BEGIN(__kernel_clock_getres)
- .cfi_startproc
- /* Check for supported clock IDs */
- cmplwi cr0, r3, CLOCK_MAX
- cmpwi cr1, r3, CLOCK_REALTIME_COARSE
- cmpwi cr7, r3, CLOCK_MONOTONIC_COARSE
- bgt cr0, 99f
- LOAD_REG_IMMEDIATE(r5, KTIME_LOW_RES)
- beq cr1, 1f
- beq cr7, 1f
-
- mflr r12
- .cfi_register lr,r12
- get_datapage r3, r0
- lwz r5, CLOCK_HRTIMER_RES(r3)
- mtlr r12
-1: li r3,0
- cmpli cr0,r4,0
- crclr cr0*4+so
- beqlr
- stw r3,TSPC32_TV_SEC(r4)
- stw r5,TSPC32_TV_NSEC(r4)
- blr
-
- /*
- * invalid clock
- */
-99:
- li r3, EINVAL
- crset so
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_clock_getres)
-
-
-/*
- * Exact prototype of time()
- *
- * time_t time(time *t);
- *
- */
-V_FUNCTION_BEGIN(__kernel_time)
- .cfi_startproc
- mflr r12
- .cfi_register lr,r12
-
- mr r11,r3 /* r11 holds t */
- get_datapage r9, r0
-
- lwz r3,STAMP_XTIME_SEC+LOPART(r9)
-
- cmplwi r11,0 /* check if t is NULL */
- mtlr r12
- crclr cr0*4+so
- beqlr
- stw r3,0(r11) /* store result at *t */
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_time)
-
-/*
- * This is the core of clock_gettime() and gettimeofday(),
- * it returns the current time in r3 (seconds) and r4.
- * On entry, r7 gives the resolution of r4, either USEC_PER_SEC
- * or NSEC_PER_SEC, giving r4 in microseconds or nanoseconds.
- * It expects the datapage ptr in r9 and doesn't clobber it.
- * It clobbers r0, r5 and r6.
- * On return, r8 contains the counter value that can be reused.
- * This clobbers cr0 but not any other cr field.
- */
-__do_get_tspec:
- .cfi_startproc
- /* Check for update count & load values. We use the low
- * order 32 bits of the update count
- */
-1: lwz r8,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
- andi. r0,r8,1 /* pending update ? loop */
- bne- 1b
- xor r0,r8,r8 /* create dependency */
- add r9,r9,r0
-
- /* Load orig stamp (offset to TB) */
- lwz r5,CFG_TB_ORIG_STAMP(r9)
- lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
-
- /* Get a stable TB value */
-2: MFTBU(r3)
- MFTBL(r4)
- MFTBU(r0)
- cmplw cr0,r3,r0
- bne- 2b
-
- /* Subtract tb orig stamp and shift left 12 bits.
- */
- subfc r4,r6,r4
- subfe r0,r5,r3
- slwi r0,r0,12
- rlwimi. r0,r4,12,20,31
- slwi r4,r4,12
-
- /*
- * Load scale factor & do multiplication.
- * We only use the high 32 bits of the tb_to_xs value.
- * Even with a 1GHz timebase clock, the high 32 bits of
- * tb_to_xs will be at least 4 million, so the error from
- * ignoring the low 32 bits will be no more than 0.25ppm.
- * The error will just make the clock run very very slightly
- * slow until the next time the kernel updates the VDSO data,
- * at which point the clock will catch up to the kernel's value,
- * so there is no long-term error accumulation.
- */
- lwz r5,CFG_TB_TO_XS(r9) /* load values */
- mulhwu r4,r4,r5
- li r3,0
-
- beq+ 4f /* skip high part computation if 0 */
- mulhwu r3,r0,r5
- mullw r5,r0,r5
- addc r4,r4,r5
- addze r3,r3
-4:
- /* At this point, we have seconds since the xtime stamp
- * as a 32.32 fixed-point number in r3 and r4.
- * Load & add the xtime stamp.
- */
- lwz r5,STAMP_XTIME_SEC+LOPART(r9)
- lwz r6,STAMP_SEC_FRAC(r9)
- addc r4,r4,r6
- adde r3,r3,r5
-
- /* We create a fake dependency on the result in r3/r4
- * and re-check the counter
- */
- or r6,r4,r3
- xor r0,r6,r6
- add r9,r9,r0
- lwz r0,(CFG_TB_UPDATE_COUNT+LOPART)(r9)
- cmplw cr0,r8,r0 /* check if updated */
- bne- 1b
-
- mulhwu r4,r4,r7 /* convert to micro or nanoseconds */
-
- blr
- .cfi_endproc
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32_wrapper.S
index 3f5ef035b0a9..10f92f265d51 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32_wrapper.S
@@ -7,7 +7,7 @@
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
vdso32_start:
- .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
+ .incbin "arch/powerpc/kernel/vdso/vdso32.so.dbg"
.balign PAGE_SIZE
vdso32_end:
diff --git a/arch/powerpc/kernel/vdso64/.gitignore b/arch/powerpc/kernel/vdso64/.gitignore
deleted file mode 100644
index 77a0b423642c..000000000000
--- a/arch/powerpc/kernel/vdso64/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-vdso64.lds
-vdso64.so.dbg
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
deleted file mode 100644
index 32ebb3522ea1..000000000000
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ /dev/null
@@ -1,47 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# List of files in the vdso, has to be asm only for now
-
-obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o getcpu.o
-
-# Build rules
-
-targets := $(obj-vdso64) vdso64.so vdso64.so.dbg
-obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64))
-
-GCOV_PROFILE := n
-KCOV_INSTRUMENT := n
-UBSAN_SANITIZE := n
-
-ccflags-y := -shared -fno-common -fno-builtin -nostdlib \
- -Wl,-soname=linux-vdso64.so.1 -Wl,--hash-style=both
-asflags-y := -D__VDSO64__ -s
-
-obj-y += vdso64_wrapper.o
-extra-y += vdso64.lds
-CPPFLAGS_vdso64.lds += -P -C -U$(ARCH)
-
-# Force dependency (incbin is bad)
-$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
-
-# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
- $(call if_changed,vdso64ld)
-
-# strip rule for the .so file
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg FORCE
- $(call if_changed,objcopy)
-
-# actual build commands
-quiet_cmd_vdso64ld = VDSO64L $@
- cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
-
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso64.so: $(obj)/vdso64.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso64.so
diff --git a/arch/powerpc/kernel/vdso64/cacheflush.S b/arch/powerpc/kernel/vdso64/cacheflush.S
deleted file mode 100644
index 526f5ba2593e..000000000000
--- a/arch/powerpc/kernel/vdso64/cacheflush.S
+++ /dev/null
@@ -1,80 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * vDSO provided cache flush routines
- *
- * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
- * IBM Corp.
- */
-#include <asm/processor.h>
-#include <asm/ppc_asm.h>
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-
- .text
-
-/*
- * Default "generic" version of __kernel_sync_dicache.
- *
- * void __kernel_sync_dicache(unsigned long start, unsigned long end)
- *
- * Flushes the data cache & invalidate the instruction cache for the
- * provided range [start, end[
- */
-V_FUNCTION_BEGIN(__kernel_sync_dicache)
- .cfi_startproc
- mflr r12
- .cfi_register lr,r12
- mr r11,r3
- bl V_LOCAL_FUNC(__get_datapage)
- mtlr r12
- mr r10,r3
-
- lwz r7,CFG_DCACHE_BLOCKSZ(r10)
- addi r5,r7,-1
- andc r6,r11,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5 /* ensure we get enough */
- lwz r9,CFG_DCACHE_LOGBLOCKSZ(r10)
- srd. r8,r8,r9 /* compute line count */
- crclr cr0*4+so
- beqlr /* nothing to do? */
- mtctr r8
-1: dcbst 0,r6
- add r6,r6,r7
- bdnz 1b
- sync
-
-/* Now invalidate the instruction cache */
-
- lwz r7,CFG_ICACHE_BLOCKSZ(r10)
- addi r5,r7,-1
- andc r6,r11,r5 /* round low to line bdy */
- subf r8,r6,r4 /* compute length */
- add r8,r8,r5
- lwz r9,CFG_ICACHE_LOGBLOCKSZ(r10)
- srd. r8,r8,r9 /* compute line count */
- crclr cr0*4+so
- beqlr /* nothing to do? */
- mtctr r8
-2: icbi 0,r6
- add r6,r6,r7
- bdnz 2b
- isync
- li r3,0
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_sync_dicache)
-
-
-/*
- * POWER5 version of __kernel_sync_dicache
- */
-V_FUNCTION_BEGIN(__kernel_sync_dicache_p5)
- .cfi_startproc
- crclr cr0*4+so
- sync
- isync
- li r3,0
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_sync_dicache_p5)
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
deleted file mode 100644
index dc84f5ae3802..000000000000
--- a/arch/powerpc/kernel/vdso64/datapage.S
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Access to the shared data page by the vDSO & syscall map
- *
- * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp.
- */
-
-#include <asm/processor.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-#include <asm/vdso.h>
-
- .text
-.global __kernel_datapage_offset;
-__kernel_datapage_offset:
- .long 0
-
-V_FUNCTION_BEGIN(__get_datapage)
- .cfi_startproc
- /* We don't want that exposed or overridable as we want other objects
- * to be able to bl directly to here
- */
- .protected __get_datapage
- .hidden __get_datapage
-
- mflr r0
- .cfi_register lr,r0
-
- bcl 20,31,data_page_branch
-data_page_branch:
- mflr r3
- mtlr r0
- addi r3, r3, __kernel_datapage_offset-data_page_branch
- lwz r0,0(r3)
- .cfi_restore lr
- add r3,r0,r3
- blr
- .cfi_endproc
-V_FUNCTION_END(__get_datapage)
-
-/*
- * void *__kernel_get_syscall_map(unsigned int *syscall_count) ;
- *
- * returns a pointer to the syscall map. the map is agnostic to the
- * size of "long", unlike kernel bitops, it stores bits from top to
- * bottom so that memory actually contains a linear bitmap
- * check for syscall N by testing bit (0x80000000 >> (N & 0x1f)) of
- * 32 bits int at N >> 5.
- */
-V_FUNCTION_BEGIN(__kernel_get_syscall_map)
- .cfi_startproc
- mflr r12
- .cfi_register lr,r12
- mr r4,r3
- bl V_LOCAL_FUNC(__get_datapage)
- mtlr r12
- addi r3,r3,CFG_SYSCALL_MAP64
- cmpldi cr0,r4,0
- crclr cr0*4+so
- beqlr
- li r0,NR_syscalls
- stw r0,0(r4)
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_get_syscall_map)
-
-
-/*
- * void unsigned long __kernel_get_tbfreq(void);
- *
- * returns the timebase frequency in HZ
- */
-V_FUNCTION_BEGIN(__kernel_get_tbfreq)
- .cfi_startproc
- mflr r12
- .cfi_register lr,r12
- bl V_LOCAL_FUNC(__get_datapage)
- ld r3,CFG_TB_TICKS_PER_SEC(r3)
- mtlr r12
- crclr cr0*4+so
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_get_tbfreq)
diff --git a/arch/powerpc/kernel/vdso64/getcpu.S b/arch/powerpc/kernel/vdso64/getcpu.S
deleted file mode 100644
index 12bbf236cdc4..000000000000
--- a/arch/powerpc/kernel/vdso64/getcpu.S
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- *
- * Copyright (C) IBM Corporation, 2012
- *
- * Author: Anton Blanchard <anton@au.ibm.com>
- */
-#include <asm/ppc_asm.h>
-#include <asm/vdso.h>
-
- .text
-/*
- * Exact prototype of getcpu
- *
- * int __kernel_getcpu(unsigned *cpu, unsigned *node);
- *
- */
-V_FUNCTION_BEGIN(__kernel_getcpu)
- .cfi_startproc
- mfspr r5,SPRN_SPRG_VDSO_READ
- cmpdi cr0,r3,0
- cmpdi cr1,r4,0
- clrlwi r6,r5,16
- rlwinm r7,r5,16,31-15,31-0
- beq cr0,1f
- stw r6,0(r3)
-1: beq cr1,2f
- stw r7,0(r4)
-2: crclr cr0*4+so
- li r3,0 /* always success */
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_getcpu)
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
deleted file mode 100644
index 1c9a04703250..000000000000
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ /dev/null
@@ -1,289 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Userland implementation of gettimeofday() for 64 bits processes in a
- * ppc64 kernel for use in the vDSO
- *
- * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org),
- * IBM Corp.
- */
-#include <asm/processor.h>
-#include <asm/ppc_asm.h>
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
- .text
-/*
- * Exact prototype of gettimeofday
- *
- * int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz);
- *
- */
-V_FUNCTION_BEGIN(__kernel_gettimeofday)
- .cfi_startproc
- mflr r12
- .cfi_register lr,r12
-
- mr r11,r3 /* r11 holds tv */
- mr r10,r4 /* r10 holds tz */
- bl V_LOCAL_FUNC(__get_datapage) /* get data page */
- cmpldi r11,0 /* check if tv is NULL */
- beq 2f
- lis r7,1000000@ha /* load up USEC_PER_SEC */
- addi r7,r7,1000000@l
- bl V_LOCAL_FUNC(__do_get_tspec) /* get sec/us from tb & kernel */
- std r4,TVAL64_TV_SEC(r11) /* store sec in tv */
- std r5,TVAL64_TV_USEC(r11) /* store usec in tv */
-2: cmpldi r10,0 /* check if tz is NULL */
- beq 1f
- lwz r4,CFG_TZ_MINUTEWEST(r3)/* fill tz */
- lwz r5,CFG_TZ_DSTTIME(r3)
- stw r4,TZONE_TZ_MINWEST(r10)
- stw r5,TZONE_TZ_DSTTIME(r10)
-1: mtlr r12
- crclr cr0*4+so
- li r3,0 /* always success */
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_gettimeofday)
-
-
-/*
- * Exact prototype of clock_gettime()
- *
- * int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp);
- *
- */
-V_FUNCTION_BEGIN(__kernel_clock_gettime)
- .cfi_startproc
- /* Check for supported clock IDs */
- cmpwi cr0,r3,CLOCK_REALTIME
- cmpwi cr1,r3,CLOCK_MONOTONIC
- cror cr0*4+eq,cr0*4+eq,cr1*4+eq
-
- cmpwi cr5,r3,CLOCK_REALTIME_COARSE
- cmpwi cr6,r3,CLOCK_MONOTONIC_COARSE
- cror cr5*4+eq,cr5*4+eq,cr6*4+eq
-
- cror cr0*4+eq,cr0*4+eq,cr5*4+eq
- bne cr0,99f
-
- mflr r12 /* r12 saves lr */
- .cfi_register lr,r12
- mr r11,r4 /* r11 saves tp */
- bl V_LOCAL_FUNC(__get_datapage) /* get data page */
- lis r7,NSEC_PER_SEC@h /* want nanoseconds */
- ori r7,r7,NSEC_PER_SEC@l
- beq cr5,70f
-50: bl V_LOCAL_FUNC(__do_get_tspec) /* get time from tb & kernel */
- bne cr1,80f /* if not monotonic, all done */
-
- /*
- * CLOCK_MONOTONIC
- */
-
- /* now we must fixup using wall to monotonic. We need to snapshot
- * that value and do the counter trick again. Fortunately, we still
- * have the counter value in r8 that was returned by __do_get_tspec.
- * At this point, r4,r5 contain our sec/nsec values.
- */
-
- ld r6,WTOM_CLOCK_SEC(r3)
- lwa r9,WTOM_CLOCK_NSEC(r3)
-
- /* We now have our result in r6,r9. We create a fake dependency
- * on that result and re-check the counter
- */
- or r0,r6,r9
- xor r0,r0,r0
- add r3,r3,r0
- ld r0,CFG_TB_UPDATE_COUNT(r3)
- cmpld cr0,r0,r8 /* check if updated */
- bne- 50b
- b 78f
-
- /*
- * For coarse clocks we get data directly from the vdso data page, so
- * we don't need to call __do_get_tspec, but we still need to do the
- * counter trick.
- */
-70: ld r8,CFG_TB_UPDATE_COUNT(r3)
- andi. r0,r8,1 /* pending update ? loop */
- bne- 70b
- add r3,r3,r0 /* r0 is already 0 */
-
- /*
- * CLOCK_REALTIME_COARSE, below values are needed for MONOTONIC_COARSE
- * too
- */
- ld r4,STAMP_XTIME_SEC(r3)
- ld r5,STAMP_XTIME_NSEC(r3)
- bne cr6,75f
-
- /* CLOCK_MONOTONIC_COARSE */
- ld r6,WTOM_CLOCK_SEC(r3)
- lwa r9,WTOM_CLOCK_NSEC(r3)
-
- /* check if counter has updated */
- or r0,r6,r9
-75: or r0,r0,r4
- or r0,r0,r5
- xor r0,r0,r0
- add r3,r3,r0
- ld r0,CFG_TB_UPDATE_COUNT(r3)
- cmpld cr0,r0,r8 /* check if updated */
- bne- 70b
-
- /* Counter has not updated, so continue calculating proper values for
- * sec and nsec if monotonic coarse, or just return with the proper
- * values for realtime.
- */
- bne cr6,80f
-
- /* Add wall->monotonic offset and check for overflow or underflow */
-78: add r4,r4,r6
- add r5,r5,r9
- cmpd cr0,r5,r7
- cmpdi cr1,r5,0
- blt 79f
- subf r5,r7,r5
- addi r4,r4,1
-79: bge cr1,80f
- addi r4,r4,-1
- add r5,r5,r7
-
-80: std r4,TSPC64_TV_SEC(r11)
- std r5,TSPC64_TV_NSEC(r11)
-
- mtlr r12
- crclr cr0*4+so
- li r3,0
- blr
-
- /*
- * syscall fallback
- */
-99:
- li r0,__NR_clock_gettime
- .cfi_restore lr
- sc
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_clock_gettime)
-
-
-/*
- * Exact prototype of clock_getres()
- *
- * int __kernel_clock_getres(clockid_t clock_id, struct timespec *res);
- *
- */
-V_FUNCTION_BEGIN(__kernel_clock_getres)
- .cfi_startproc
- /* Check for supported clock IDs */
- cmpwi cr0,r3,CLOCK_REALTIME
- cmpwi cr1,r3,CLOCK_MONOTONIC
- cror cr0*4+eq,cr0*4+eq,cr1*4+eq
- bne cr0,99f
-
- mflr r12
- .cfi_register lr,r12
- bl V_LOCAL_FUNC(__get_datapage)
- lwz r5, CLOCK_HRTIMER_RES(r3)
- mtlr r12
- li r3,0
- cmpldi cr0,r4,0
- crclr cr0*4+so
- beqlr
- std r3,TSPC64_TV_SEC(r4)
- std r5,TSPC64_TV_NSEC(r4)
- blr
-
- /*
- * syscall fallback
- */
-99:
- li r0,__NR_clock_getres
- sc
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_clock_getres)
-
-/*
- * Exact prototype of time()
- *
- * time_t time(time *t);
- *
- */
-V_FUNCTION_BEGIN(__kernel_time)
- .cfi_startproc
- mflr r12
- .cfi_register lr,r12
-
- mr r11,r3 /* r11 holds t */
- bl V_LOCAL_FUNC(__get_datapage)
-
- ld r4,STAMP_XTIME_SEC(r3)
-
- cmpldi r11,0 /* check if t is NULL */
- beq 2f
- std r4,0(r11) /* store result at *t */
-2: mtlr r12
- crclr cr0*4+so
- mr r3,r4
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_time)
-
-
-/*
- * This is the core of clock_gettime() and gettimeofday(),
- * it returns the current time in r4 (seconds) and r5.
- * On entry, r7 gives the resolution of r5, either USEC_PER_SEC
- * or NSEC_PER_SEC, giving r5 in microseconds or nanoseconds.
- * It expects the datapage ptr in r3 and doesn't clobber it.
- * It clobbers r0, r6 and r9.
- * On return, r8 contains the counter value that can be reused.
- * This clobbers cr0 but not any other cr field.
- */
-V_FUNCTION_BEGIN(__do_get_tspec)
- .cfi_startproc
- /* check for update count & load values */
-1: ld r8,CFG_TB_UPDATE_COUNT(r3)
- andi. r0,r8,1 /* pending update ? loop */
- bne- 1b
- xor r0,r8,r8 /* create dependency */
- add r3,r3,r0
-
- /* Get TB & offset it. We use the MFTB macro which will generate
- * workaround code for Cell.
- */
- MFTB(r6)
- ld r9,CFG_TB_ORIG_STAMP(r3)
- subf r6,r9,r6
-
- /* Scale result */
- ld r5,CFG_TB_TO_XS(r3)
- sldi r6,r6,12 /* compute time since stamp_xtime */
- mulhdu r6,r6,r5 /* in units of 2^-32 seconds */
-
- /* Add stamp since epoch */
- ld r4,STAMP_XTIME_SEC(r3)
- lwz r5,STAMP_SEC_FRAC(r3)
- or r0,r4,r5
- or r0,r0,r6
- xor r0,r0,r0
- add r3,r3,r0
- ld r0,CFG_TB_UPDATE_COUNT(r3)
- cmpld r0,r8 /* check if updated */
- bne- 1b /* reload if so */
-
- /* convert to seconds & nanoseconds and add to stamp */
- add r6,r6,r5 /* add on fractional seconds of xtime */
- mulhwu r5,r6,r7 /* compute micro or nanoseconds and */
- srdi r6,r6,32 /* seconds since stamp_xtime */
- clrldi r5,r5,32
- add r4,r4,r6
- blr
- .cfi_endproc
-V_FUNCTION_END(__do_get_tspec)
diff --git a/arch/powerpc/kernel/vdso64/note.S b/arch/powerpc/kernel/vdso64/note.S
deleted file mode 100644
index dc2a509f7e8a..000000000000
--- a/arch/powerpc/kernel/vdso64/note.S
+++ /dev/null
@@ -1 +0,0 @@
-#include "../vdso32/note.S"
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64_wrapper.S
index 1d56d81fe3b3..839d1a61411d 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64_wrapper.S
@@ -7,7 +7,7 @@
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
vdso64_start:
- .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
+ .incbin "arch/powerpc/kernel/vdso/vdso64.so.dbg"
.balign PAGE_SIZE
vdso64_end:
diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
index 4acd3fb2b38e..fd9432875ebc 100644
--- a/arch/powerpc/kernel/vecemu.c
+++ b/arch/powerpc/kernel/vecemu.c
@@ -10,6 +10,7 @@
#include <asm/processor.h>
#include <asm/switch_to.h>
#include <linux/uaccess.h>
+#include <asm/inst.h>
/* Functions in vector.S */
extern void vaddfp(vector128 *dst, vector128 *a, vector128 *b);
@@ -260,21 +261,24 @@ static unsigned int rfin(unsigned int x)
int emulate_altivec(struct pt_regs *regs)
{
- unsigned int instr, i;
+ ppc_inst_t instr;
+ unsigned int i, word;
unsigned int va, vb, vc, vd;
vector128 *vrs;
- if (get_user(instr, (unsigned int __user *) regs->nip))
+ if (get_user_instr(instr, (void __user *)regs->nip))
return -EFAULT;
- if ((instr >> 26) != 4)
+
+ word = ppc_inst_val(instr);
+ if (ppc_inst_primary_opcode(instr) != 4)
return -EINVAL; /* not an altivec instruction */
- vd = (instr >> 21) & 0x1f;
- va = (instr >> 16) & 0x1f;
- vb = (instr >> 11) & 0x1f;
- vc = (instr >> 6) & 0x1f;
+ vd = (word >> 21) & 0x1f;
+ va = (word >> 16) & 0x1f;
+ vb = (word >> 11) & 0x1f;
+ vc = (word >> 6) & 0x1f;
vrs = current->thread.vr_state.vr;
- switch (instr & 0x3f) {
+ switch (word & 0x3f) {
case 10:
switch (vc) {
case 0: /* vaddfp */
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 25c14a0981bf..5cf64740edb8 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -47,6 +47,10 @@ EXPORT_SYMBOL(store_vr_state)
*/
_GLOBAL(load_up_altivec)
mfmsr r5 /* grab the current MSR */
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
+ ori r5,r5,MSR_RI
+#endif
oris r5,r5,MSR_VEC@h
MTMSRD(r5) /* enable use of AltiVec now */
isync
@@ -65,23 +69,21 @@ _GLOBAL(load_up_altivec)
1:
/* enable use of VMX after return */
#ifdef CONFIG_PPC32
- mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
+ addi r5,r2,THREAD
oris r9,r9,MSR_VEC@h
-#ifdef CONFIG_VMAP_STACK
- tovirt(r5, r5)
-#endif
#else
ld r4,PACACURRENT(r13)
addi r5,r4,THREAD /* Get THREAD */
oris r12,r12,MSR_VEC@h
std r12,_MSR(r1)
+#ifdef CONFIG_PPC_BOOK3S_64
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+#endif
#endif
- /* Don't care if r4 overflows, this is desired behaviour */
- lbz r4,THREAD_LOAD_VEC(r5)
- addi r4,r4,1
+ li r4,1
stb r4,THREAD_LOAD_VEC(r5)
addi r6,r5,THREAD_VRSTATE
- li r4,1
li r10,VRSTATE_VSCR
stw r4,THREAD_USED_VR(r5)
lvx v0,r10,r6
@@ -89,6 +91,7 @@ _GLOBAL(load_up_altivec)
REST_32VRS(0,r4,r6)
/* restore registers and return */
blr
+_ASM_NOKPROBE_SYMBOL(load_up_altivec)
/*
* save_altivec(tsk)
@@ -127,6 +130,12 @@ _GLOBAL(load_up_vsx)
andis. r5,r12,MSR_VEC@h
beql+ load_up_altivec /* skip if already loaded */
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
+ li r5,MSR_RI
+ mtmsrd r5,1
+#endif
+
ld r4,PACACURRENT(r13)
addi r4,r4,THREAD /* Get THREAD */
li r6,1
@@ -134,7 +143,9 @@ _GLOBAL(load_up_vsx)
/* enable use of VSX after return */
oris r12,r12,MSR_VSX@h
std r12,_MSR(r1)
- b fast_exception_return
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+ b fast_interrupt_return_srr
#endif /* CONFIG_VSX */
@@ -144,8 +155,8 @@ _GLOBAL(load_up_vsx)
* usage of floating-point registers. These routines must be called
* with preempt disabled.
*/
-#ifdef CONFIG_PPC32
.data
+#ifdef CONFIG_PPC32
fpzero:
.long 0
fpone:
@@ -158,18 +169,17 @@ fphalf:
lfs fr,name@l(r11)
#else
- .section ".toc","aw"
fpzero:
- .tc FD_0_0[TC],0
+ .quad 0
fpone:
- .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */
+ .quad 0x3ff0000000000000 /* 1.0 */
fphalf:
- .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
+ .quad 0x3fe0000000000000 /* 0.5 */
-#define LDCONST(fr, name) \
- lfd fr,name@toc(r2)
+#define LDCONST(fr, name) \
+ addis r11,r2,name@toc@ha; \
+ lfd fr,name@toc@l(r11)
#endif
-
.text
/*
* Internal routine to enable floating point and set FPSCR to 0.
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index a32d478a7f41..7786e3ac7611 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -9,13 +9,32 @@
#define EMITS_PT_NOTE
#define RO_EXCEPTION_TABLE_ALIGN 0
+#define SOFT_MASK_TABLE(align) \
+ . = ALIGN(align); \
+ __soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) { \
+ __start___soft_mask_table = .; \
+ KEEP(*(__soft_mask_table)) \
+ __stop___soft_mask_table = .; \
+ }
+
+#define RESTART_TABLE(align) \
+ . = ALIGN(align); \
+ __restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) { \
+ __start___restart_table = .; \
+ KEEP(*(__restart_table)) \
+ __stop___restart_table = .; \
+ }
+
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT)
-#define ETEXT_ALIGN_SIZE (1 << CONFIG_ETEXT_SHIFT)
+
+#if STRICT_ALIGN_SIZE < PAGE_SIZE
+#error "CONFIG_DATA_SHIFT must be >= PAGE_SHIFT"
+#endif
ENTRY(_stext)
@@ -52,7 +71,7 @@ SECTIONS
.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
#ifdef CONFIG_PPC64
KEEP(*(.head.text.first_256B));
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
#else
KEEP(*(.head.text.real_vectors));
*(.head.text.real_trampolines);
@@ -86,10 +105,11 @@ SECTIONS
ALIGN_FUNCTION();
#endif
/* careful! __ftr_alt_* sections need to be close to .text */
- *(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
+ *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
#ifdef CONFIG_PPC64
*(.tramp.ftrace.text);
#endif
+ NOINSTR_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
@@ -106,24 +126,67 @@ SECTIONS
*(.sfpr);
MEM_KEEP(init.text)
MEM_KEEP(exit.text)
+ } :text
+
+ . = ALIGN(PAGE_SIZE);
+ _etext = .;
+ PROVIDE32 (etext = .);
+
+ /* Read-only data */
+ RO_DATA(PAGE_SIZE)
#ifdef CONFIG_PPC32
+ .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) {
+ *(.sdata2)
+ }
+#endif
+
+ .data.rel.ro : AT(ADDR(.data.rel.ro) - LOAD_OFFSET) {
+ *(.data.rel.ro*)
+ }
+
+ .branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) {
+ *(.branch_lt)
+ }
+
+#ifdef CONFIG_PPC32
+ .got1 : AT(ADDR(.got1) - LOAD_OFFSET) {
*(.got1)
+ }
+ .got2 : AT(ADDR(.got2) - LOAD_OFFSET) {
__got2_start = .;
*(.got2)
__got2_end = .;
-#endif /* CONFIG_PPC32 */
+ }
+ .got : AT(ADDR(.got) - LOAD_OFFSET) {
+ *(.got)
+ *(.got.plt)
+ }
+ .plt : AT(ADDR(.plt) - LOAD_OFFSET) {
+ /* XXX: is .plt (and .got.plt) required? */
+ *(.plt)
+ }
- } :text
+#else /* CONFIG_PPC32 */
+ .toc1 : AT(ADDR(.toc1) - LOAD_OFFSET) {
+ *(.toc1)
+ }
- . = ALIGN(ETEXT_ALIGN_SIZE);
- _etext = .;
- PROVIDE32 (etext = .);
+ .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) {
+ *(.got .toc)
+ }
- /* Read-only data */
- RO_DATA(PAGE_SIZE)
+ SOFT_MASK_TABLE(8)
+ RESTART_TABLE(8)
+
+#ifdef CONFIG_PPC64_ELF_ABI_V1
+ .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
+ __start_opd = .;
+ KEEP(*(.opd))
+ __end_opd = .;
+ }
+#endif
-#ifdef CONFIG_PPC64
. = ALIGN(8);
__stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
__start___stf_entry_barrier_fixup = .;
@@ -132,6 +195,27 @@ SECTIONS
}
. = ALIGN(8);
+ __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
+ __start___uaccess_flush_fixup = .;
+ *(__uaccess_flush_fixup)
+ __stop___uaccess_flush_fixup = .;
+ }
+
+ . = ALIGN(8);
+ __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
+ __start___entry_flush_fixup = .;
+ *(__entry_flush_fixup)
+ __stop___entry_flush_fixup = .;
+ }
+
+ . = ALIGN(8);
+ __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
+ __start___scv_entry_flush_fixup = .;
+ *(__scv_entry_flush_fixup)
+ __stop___scv_entry_flush_fixup = .;
+ }
+
+ . = ALIGN(8);
__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
__start___stf_exit_barrier_fixup = .;
*(__stf_exit_barrier_fixup)
@@ -144,7 +228,7 @@ SECTIONS
*(__rfi_flush_fixup)
__stop___rfi_flush_fixup = .;
}
-#endif /* CONFIG_PPC64 */
+#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC_BARRIER_NOSPEC
. = ALIGN(8);
@@ -155,7 +239,7 @@ SECTIONS
}
#endif /* CONFIG_PPC_BARRIER_NOSPEC */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
. = ALIGN(8);
__spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
__start__btb_flush_fixup = .;
@@ -164,15 +248,26 @@ SECTIONS
}
#endif
+ /*
+ * Various code relies on __init_begin being at the strict RWX boundary.
+ */
+ . = ALIGN(STRICT_ALIGN_SIZE);
+ __srwx_boundary = .;
+ __end_rodata = .;
+ __init_begin = .;
+
/*
* Init sections discarded at runtime
*/
- . = ALIGN(STRICT_ALIGN_SIZE);
- __init_begin = .;
- . = ALIGN(PAGE_SIZE);
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
_sinittext = .;
INIT_TEXT
+
+ /*
+ *.init.text might be RO so we must ensure this section ends on
+ * a page boundary.
+ */
+ . = ALIGN(PAGE_SIZE);
_einittext = .;
#ifdef CONFIG_PPC64
*(.tramp.ftrace.init);
@@ -186,21 +281,9 @@ SECTIONS
EXIT_TEXT
}
- .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
- INIT_DATA
- }
-
- .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
- INIT_SETUP(16)
- }
+ . = ALIGN(PAGE_SIZE);
- .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
- INIT_CALLS
- }
-
- .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
- CON_INITCALL
- }
+ INIT_DATA_SECTION(16)
. = ALIGN(8);
__ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
@@ -228,9 +311,6 @@ SECTIONS
__stop___fw_ftr_fixup = .;
}
#endif
- .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
- INIT_RAM_FS
- }
PERCPU_SECTION(L1_CACHE_BYTES)
@@ -244,9 +324,7 @@ SECTIONS
. = ALIGN(8);
.dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
{
-#ifdef CONFIG_PPC32
__dynamic_symtab = .;
-#endif
*(.dynsym)
}
.dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
@@ -256,6 +334,7 @@ SECTIONS
*(.dynamic)
}
.hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
+ .gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
.interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
.rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
{
@@ -281,53 +360,14 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_sdata = .;
-#ifdef CONFIG_PPC32
.data : AT(ADDR(.data) - LOAD_OFFSET) {
DATA_DATA
-#ifdef CONFIG_UBSAN
- *(.data..Lubsan_data*)
- *(.data..Lubsan_type*)
-#endif
*(.data.rel*)
+#ifdef CONFIG_PPC32
*(SDATA_MAIN)
- *(.sdata2)
- *(.got.plt) *(.got)
- *(.plt)
- *(.branch_lt)
- }
-#else
- .data : AT(ADDR(.data) - LOAD_OFFSET) {
- DATA_DATA
- *(.data.rel*)
- *(.toc1)
- *(.branch_lt)
- }
-
-#ifdef CONFIG_DEBUG_INFO_BTF
- .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {
- *(.BTF)
- }
#endif
-
- .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
- __start_opd = .;
- KEEP(*(.opd))
- __end_opd = .;
}
- . = ALIGN(256);
- .got : AT(ADDR(.got) - LOAD_OFFSET) {
- __toc_start = .;
-#ifndef CONFIG_RELOCATABLE
- __prom_init_toc_start = .;
- arch/powerpc/kernel/prom_init.o*(.toc .got)
- __prom_init_toc_end = .;
-#endif
- *(.got)
- *(.toc)
- }
-#endif
-
/* The initial task and kernel stack */
INIT_TASK_DATA_SECTION(THREAD_ALIGN)
@@ -364,9 +404,8 @@ SECTIONS
_end = . ;
PROVIDE32 (end = .);
- STABS_DEBUG
-
DWARF_DEBUG
+ ELF_DETAILS
DISCARDS
/DISCARD/ : {
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index af3c15a1d41e..dbcc4a793f0b 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -24,9 +24,12 @@
#include <linux/kdebug.h>
#include <linux/sched/debug.h>
#include <linux/delay.h>
+#include <linux/processor.h>
#include <linux/smp.h>
+#include <asm/interrupt.h>
#include <asm/paca.h>
+#include <asm/nmi.h>
/*
* The powerpc watchdog ensures that each CPU is able to service timers.
@@ -53,7 +56,7 @@
* solved by also having a SMP watchdog where all CPUs check all other
* CPUs heartbeat.
*
- * The SMP checker can detect lockups on other CPUs. A gobal "pending"
+ * The SMP checker can detect lockups on other CPUs. A global "pending"
* cpumask is kept, containing all CPUs which enable the watchdog. Each
* CPU clears their pending bit in their heartbeat timer. When the bitmask
* becomes empty, the last CPU to clear its pending bit updates a global
@@ -82,10 +85,41 @@ static DEFINE_PER_CPU(u64, wd_timer_tb);
/* SMP checker bits */
static unsigned long __wd_smp_lock;
+static unsigned long __wd_reporting;
+static unsigned long __wd_nmi_output;
static cpumask_t wd_smp_cpus_pending;
static cpumask_t wd_smp_cpus_stuck;
static u64 wd_smp_last_reset_tb;
+#ifdef CONFIG_PPC_PSERIES
+static u64 wd_timeout_pct;
+#endif
+
+/*
+ * Try to take the exclusive watchdog action / NMI IPI / printing lock.
+ * wd_smp_lock must be held. If this fails, we should return and wait
+ * for the watchdog to kick in again (or another CPU to trigger it).
+ *
+ * Importantly, if hardlockup_panic is set, wd_try_report failure should
+ * not delay the panic, because whichever other CPU is reporting will
+ * call panic.
+ */
+static bool wd_try_report(void)
+{
+ if (__wd_reporting)
+ return false;
+ __wd_reporting = 1;
+ return true;
+}
+
+/* End printing after successful wd_try_report. wd_smp_lock not required. */
+static void wd_end_reporting(void)
+{
+ smp_mb(); /* End printing "critical section" */
+ WARN_ON_ONCE(__wd_reporting == 0);
+ WRITE_ONCE(__wd_reporting, 0);
+}
+
static inline void wd_smp_lock(unsigned long *flags)
{
/*
@@ -125,108 +159,182 @@ static void wd_lockup_ipi(struct pt_regs *regs)
else
dump_stack();
+ /*
+ * __wd_nmi_output must be set after we printk from NMI context.
+ *
+ * printk from NMI context defers printing to the console to irq_work.
+ * If that NMI was taken in some code that is hard-locked, then irqs
+ * are disabled so irq_work will never fire. That can result in the
+ * hard lockup messages being delayed (indefinitely, until something
+ * else kicks the console drivers).
+ *
+ * Setting __wd_nmi_output will cause another CPU to notice and kick
+ * the console drivers for us.
+ *
+ * xchg is not needed here (it could be a smp_mb and store), but xchg
+ * gives the memory ordering and atomicity required.
+ */
+ xchg(&__wd_nmi_output, 1);
+
/* Do not panic from here because that can recurse into NMI IPI layer */
}
-static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
+static bool set_cpu_stuck(int cpu)
{
- cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
- cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
+ cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
+ cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+ /*
+ * See wd_smp_clear_cpu_pending()
+ */
+ smp_mb();
if (cpumask_empty(&wd_smp_cpus_pending)) {
- wd_smp_last_reset_tb = tb;
+ wd_smp_last_reset_tb = get_tb();
cpumask_andnot(&wd_smp_cpus_pending,
&wd_cpus_enabled,
&wd_smp_cpus_stuck);
+ return true;
}
-}
-static void set_cpu_stuck(int cpu, u64 tb)
-{
- set_cpumask_stuck(cpumask_of(cpu), tb);
+ return false;
}
-static void watchdog_smp_panic(int cpu, u64 tb)
+static void watchdog_smp_panic(int cpu)
{
+ static cpumask_t wd_smp_cpus_ipi; // protected by reporting
unsigned long flags;
+ u64 tb, last_reset;
int c;
wd_smp_lock(&flags);
/* Double check some things under lock */
- if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb)
+ tb = get_tb();
+ last_reset = wd_smp_last_reset_tb;
+ if ((s64)(tb - last_reset) < (s64)wd_smp_panic_timeout_tb)
goto out;
if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
goto out;
- if (cpumask_weight(&wd_smp_cpus_pending) == 0)
+ if (!wd_try_report())
+ goto out;
+ for_each_online_cpu(c) {
+ if (!cpumask_test_cpu(c, &wd_smp_cpus_pending))
+ continue;
+ if (c == cpu)
+ continue; // should not happen
+
+ __cpumask_set_cpu(c, &wd_smp_cpus_ipi);
+ if (set_cpu_stuck(c))
+ break;
+ }
+ if (cpumask_empty(&wd_smp_cpus_ipi)) {
+ wd_end_reporting();
goto out;
+ }
+ wd_smp_unlock(&flags);
pr_emerg("CPU %d detected hard LOCKUP on other CPUs %*pbl\n",
- cpu, cpumask_pr_args(&wd_smp_cpus_pending));
+ cpu, cpumask_pr_args(&wd_smp_cpus_ipi));
pr_emerg("CPU %d TB:%lld, last SMP heartbeat TB:%lld (%lldms ago)\n",
- cpu, tb, wd_smp_last_reset_tb,
- tb_to_ns(tb - wd_smp_last_reset_tb) / 1000000);
+ cpu, tb, last_reset, tb_to_ns(tb - last_reset) / 1000000);
if (!sysctl_hardlockup_all_cpu_backtrace) {
/*
* Try to trigger the stuck CPUs, unless we are going to
* get a backtrace on all of them anyway.
*/
- for_each_cpu(c, &wd_smp_cpus_pending) {
- if (c == cpu)
- continue;
+ for_each_cpu(c, &wd_smp_cpus_ipi) {
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
+ __cpumask_clear_cpu(c, &wd_smp_cpus_ipi);
}
- }
-
- /* Take the stuck CPUs out of the watch group */
- set_cpumask_stuck(&wd_smp_cpus_pending, tb);
-
- wd_smp_unlock(&flags);
-
- printk_safe_flush();
- /*
- * printk_safe_flush() seems to require another print
- * before anything actually goes out to console.
- */
- if (sysctl_hardlockup_all_cpu_backtrace)
+ } else {
trigger_allbutself_cpu_backtrace();
+ cpumask_clear(&wd_smp_cpus_ipi);
+ }
if (hardlockup_panic)
nmi_panic(NULL, "Hard LOCKUP");
+ wd_end_reporting();
+
return;
out:
wd_smp_unlock(&flags);
}
-static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
+static void wd_smp_clear_cpu_pending(int cpu)
{
if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
struct pt_regs *regs = get_irq_regs();
unsigned long flags;
- wd_smp_lock(&flags);
-
pr_emerg("CPU %d became unstuck TB:%lld\n",
- cpu, tb);
+ cpu, get_tb());
print_irqtrace_events(current);
if (regs)
show_regs(regs);
else
dump_stack();
+ wd_smp_lock(&flags);
cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
wd_smp_unlock(&flags);
+ } else {
+ /*
+ * The last CPU to clear pending should have reset the
+ * watchdog so we generally should not find it empty
+ * here if our CPU was clear. However it could happen
+ * due to a rare race with another CPU taking the
+ * last CPU out of the mask concurrently.
+ *
+ * We can't add a warning for it. But just in case
+ * there is a problem with the watchdog that is causing
+ * the mask to not be reset, try to kick it along here.
+ */
+ if (unlikely(cpumask_empty(&wd_smp_cpus_pending)))
+ goto none_pending;
}
return;
}
+
+ /*
+ * All other updates to wd_smp_cpus_pending are performed under
+ * wd_smp_lock. All of them are atomic except the case where the
+ * mask becomes empty and is reset. This will not happen here because
+ * cpu was tested to be in the bitmap (above), and a CPU only clears
+ * its own bit. _Except_ in the case where another CPU has detected a
+ * hard lockup on our CPU and takes us out of the pending mask. So in
+ * normal operation there will be no race here, no problem.
+ *
+ * In the lockup case, this atomic clear-bit vs a store that refills
+ * other bits in the accessed word wll not be a problem. The bit clear
+ * is atomic so it will not cause the store to get lost, and the store
+ * will never set this bit so it will not overwrite the bit clear. The
+ * only way for a stuck CPU to return to the pending bitmap is to
+ * become unstuck itself.
+ */
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+
+ /*
+ * Order the store to clear pending with the load(s) to check all
+ * words in the pending mask to check they are all empty. This orders
+ * with the same barrier on another CPU. This prevents two CPUs
+ * clearing the last 2 pending bits, but neither seeing the other's
+ * store when checking if the mask is empty, and missing an empty
+ * mask, which ends with a false positive.
+ */
+ smp_mb();
if (cpumask_empty(&wd_smp_cpus_pending)) {
unsigned long flags;
+none_pending:
+ /*
+ * Double check under lock because more than one CPU could see
+ * a clear mask with the lockless check after clearing their
+ * pending bits.
+ */
wd_smp_lock(&flags);
if (cpumask_empty(&wd_smp_cpus_pending)) {
- wd_smp_last_reset_tb = tb;
+ wd_smp_last_reset_tb = get_tb();
cpumask_andnot(&wd_smp_cpus_pending,
&wd_cpus_enabled,
&wd_smp_cpus_stuck);
@@ -241,33 +349,60 @@ static void watchdog_timer_interrupt(int cpu)
per_cpu(wd_timer_tb, cpu) = tb;
- wd_smp_clear_cpu_pending(cpu, tb);
+ wd_smp_clear_cpu_pending(cpu);
if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
- watchdog_smp_panic(cpu, tb);
+ watchdog_smp_panic(cpu);
+
+ if (__wd_nmi_output && xchg(&__wd_nmi_output, 0)) {
+ /*
+ * Something has called printk from NMI context. It might be
+ * stuck, so this triggers a flush that will get that
+ * printk output to the console.
+ *
+ * See wd_lockup_ipi.
+ */
+ printk_trigger_flush();
+ }
}
-void soft_nmi_interrupt(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
{
unsigned long flags;
int cpu = raw_smp_processor_id();
u64 tb;
- if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
- return;
+ /* should only arrive from kernel, with irqs disabled */
+ WARN_ON_ONCE(!arch_irq_disabled_regs(regs));
- nmi_enter();
+ if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
+ return 0;
__this_cpu_inc(irq_stat.soft_nmi_irqs);
tb = get_tb();
if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
+ /*
+ * Taking wd_smp_lock here means it is a soft-NMI lock, which
+ * means we can't take any regular or irqsafe spin locks while
+ * holding this lock. This is why timers can't printk while
+ * holding the lock.
+ */
wd_smp_lock(&flags);
if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
wd_smp_unlock(&flags);
- goto out;
+ return 0;
+ }
+ if (!wd_try_report()) {
+ wd_smp_unlock(&flags);
+ /* Couldn't report, try again in 100ms */
+ mtspr(SPRN_DEC, 100 * tb_ticks_per_usec * 1000);
+ return 0;
}
- set_cpu_stuck(cpu, tb);
+
+ set_cpu_stuck(cpu);
+
+ wd_smp_unlock(&flags);
pr_emerg("CPU %d self-detected hard LOCKUP @ %pS\n",
cpu, (void *)regs->nip);
@@ -278,19 +413,25 @@ void soft_nmi_interrupt(struct pt_regs *regs)
print_irqtrace_events(current);
show_regs(regs);
- wd_smp_unlock(&flags);
+ xchg(&__wd_nmi_output, 1); // see wd_lockup_ipi
if (sysctl_hardlockup_all_cpu_backtrace)
trigger_allbutself_cpu_backtrace();
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
+
+ wd_end_reporting();
}
+ /*
+ * We are okay to change DEC in soft_nmi_interrupt because the masked
+ * handler has marked a DEC as pending, so the timer interrupt will be
+ * replayed as soon as local irqs are enabled again.
+ */
if (wd_panic_timeout_tb < 0x7fffffff)
mtspr(SPRN_DEC, wd_panic_timeout_tb);
-out:
- nmi_exit();
+ return 0;
}
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
@@ -314,11 +455,15 @@ void arch_touch_nmi_watchdog(void)
{
unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
int cpu = smp_processor_id();
- u64 tb = get_tb();
+ u64 tb;
+ if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
+ return;
+
+ tb = get_tb();
if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
per_cpu(wd_timer_tb, cpu) = tb;
- wd_smp_clear_cpu_pending(cpu, tb);
+ wd_smp_clear_cpu_pending(cpu);
}
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
@@ -376,7 +521,7 @@ static void stop_watchdog(void *arg)
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
wd_smp_unlock(&flags);
- wd_smp_clear_cpu_pending(cpu, get_tb());
+ wd_smp_clear_cpu_pending(cpu);
}
static int stop_watchdog_on_cpu(unsigned int cpu)
@@ -386,7 +531,13 @@ static int stop_watchdog_on_cpu(unsigned int cpu)
static void watchdog_calc_timeouts(void)
{
- wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq;
+ u64 threshold = watchdog_thresh;
+
+#ifdef CONFIG_PPC_PSERIES
+ threshold += (READ_ONCE(wd_timeout_pct) * threshold) / 100;
+#endif
+
+ wd_panic_timeout_tb = threshold * ppc_tb_freq;
/* Have the SMP detector trigger a bit later */
wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
@@ -429,3 +580,12 @@ int __init watchdog_nmi_probe(void)
}
return 0;
}
+
+#ifdef CONFIG_PPC_PSERIES
+void watchdog_nmi_set_timeout_pct(u64 pct)
+{
+ pr_info("Set the NMI watchdog timeout factor to %llu%%\n", pct);
+ WRITE_ONCE(wd_timeout_pct, pct);
+ lockup_detector_reconfigure();
+}
+#endif
diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile
index 378f6108a414..0c2abe7f9908 100644
--- a/arch/powerpc/kexec/Makefile
+++ b/arch/powerpc/kexec/Makefile
@@ -3,23 +3,15 @@
# Makefile for the linux kernel.
#
-# Avoid clang warnings around longjmp/setjmp declarations
-CFLAGS_crash.o += -ffreestanding
-
obj-y += core.o crash.o core_$(BITS).o
obj-$(CONFIG_PPC32) += relocate_32.o
-obj-$(CONFIG_KEXEC_FILE) += file_load.o elf_$(BITS).o
-
-ifdef CONFIG_HAVE_IMA_KEXEC
-ifdef CONFIG_IMA
-obj-y += ima.o
-endif
-endif
-
+obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o
# Disable GCOV, KCOV & sanitizers in odd or sensitive code
GCOV_PROFILE_core_$(BITS).o := n
KCOV_INSTRUMENT_core_$(BITS).o := n
UBSAN_SANITIZE_core_$(BITS).o := n
+KASAN_SANITIZE_core.o := n
+KASAN_SANITIZE_core_$(BITS) := n
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index 078fe3d76feb..de64c7962991 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -18,8 +18,9 @@
#include <asm/kdump.h>
#include <asm/machdep.h>
#include <asm/pgalloc.h>
-#include <asm/prom.h>
#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/firmware.h>
void machine_kexec_mask_interrupts(void) {
unsigned int i;
@@ -48,19 +49,6 @@ void machine_crash_shutdown(struct pt_regs *regs)
default_machine_crash_shutdown(regs);
}
-/*
- * Do what every setup is needed on image and the
- * reboot code buffer to allow us to avoid allocations
- * later.
- */
-int machine_kexec_prepare(struct kimage *image)
-{
- if (ppc_md.machine_kexec_prepare)
- return ppc_md.machine_kexec_prepare(image);
- else
- return default_machine_kexec_prepare(image);
-}
-
void machine_kexec_cleanup(struct kimage *image)
{
}
@@ -68,11 +56,11 @@ void machine_kexec_cleanup(struct kimage *image)
void arch_crash_save_vmcoreinfo(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
+#ifdef CONFIG_NUMA
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
VMCOREINFO_SYMBOL(contig_page_data);
#endif
#if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -115,11 +103,12 @@ void machine_kexec(struct kimage *image)
void __init reserve_crashkernel(void)
{
- unsigned long long crash_size, crash_base;
+ unsigned long long crash_size, crash_base, total_mem_sz;
int ret;
+ total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
/* use common parsing */
- ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
+ ret = parse_crashkernel(boot_command_line, total_mem_sz,
&crash_size, &crash_base);
if (ret == 0 && crash_size > 0) {
crashk_res.start = crash_base;
@@ -146,11 +135,18 @@ void __init reserve_crashkernel(void)
if (!crashk_res.start) {
#ifdef CONFIG_PPC64
/*
- * On 64bit we split the RMO in half but cap it at half of
- * a small SLB (128MB) since the crash kernel needs to place
- * itself and some stacks to be in the first segment.
+ * On the LPAR platform place the crash kernel to mid of
+ * RMA size (max. of 512MB) to ensure the crash kernel
+ * gets enough space to place itself and some stack to be
+ * in the first segment. At the same time normal kernel
+ * also get enough space to allocate memory for essential
+ * system resource in the first segment. Keep the crash
+ * kernel starts at 128MB offset on other platforms.
*/
- crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_512M);
+ else
+ crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_128M);
#else
crashk_res.start = KDUMP_KERNELBASE;
#endif
@@ -178,6 +174,7 @@ void __init reserve_crashkernel(void)
/* Crash kernel trumps memory limit */
if (memory_limit && memory_limit <= crashk_res.end) {
memory_limit = crashk_res.end + 1;
+ total_mem_sz = memory_limit;
printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
memory_limit);
}
@@ -186,7 +183,7 @@ void __init reserve_crashkernel(void)
"for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20),
(unsigned long)(crashk_res.start >> 20),
- (unsigned long)(memblock_phys_mem_size() >> 20));
+ (unsigned long)(total_mem_sz >> 20));
if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
memblock_reserve(crashk_res.start, crash_size)) {
@@ -196,7 +193,7 @@ void __init reserve_crashkernel(void)
}
}
-int overlaps_crashkernel(unsigned long start, unsigned long size)
+int __init overlaps_crashkernel(unsigned long start, unsigned long size)
{
return (start + size) > crashk_res.start && start <= crashk_res.end;
}
diff --git a/arch/powerpc/kexec/core_32.c b/arch/powerpc/kexec/core_32.c
index bf9f1f906d64..c95f96850c9e 100644
--- a/arch/powerpc/kexec/core_32.c
+++ b/arch/powerpc/kexec/core_32.c
@@ -55,7 +55,7 @@ void default_machine_kexec(struct kimage *image)
reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
- if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x))
+ if (!IS_ENABLED(CONFIG_PPC_85xx) && !IS_ENABLED(CONFIG_44x))
relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start);
/* now call it */
@@ -63,7 +63,7 @@ void default_machine_kexec(struct kimage *image)
(*rnk)(page_list, reboot_code_buffer_phys, image->start);
}
-int default_machine_kexec_prepare(struct kimage *image)
+int machine_kexec_prepare(struct kimage *image)
{
return 0;
}
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index 04a7cba58eff..a79e28c91e2b 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/cpu.h>
#include <linux/hardirq.h>
+#include <linux/of.h>
#include <asm/page.h>
#include <asm/current.h>
@@ -25,14 +26,12 @@
#include <asm/paca.h>
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/hw_breakpoint.h>
-#include <asm/asm-prototypes.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>
-int default_machine_kexec_prepare(struct kimage *image)
+int machine_kexec_prepare(struct kimage *image)
{
int i;
unsigned long begin, end; /* limits of segment */
@@ -64,15 +63,18 @@ int default_machine_kexec_prepare(struct kimage *image)
begin = image->segment[i].mem;
end = begin + image->segment[i].memsz;
- if ((begin < high) && (end > low))
+ if ((begin < high) && (end > low)) {
+ of_node_put(node);
return -ETXTBSY;
+ }
}
}
return 0;
}
-static void copy_segments(unsigned long ind)
+/* Called during kexec sequence with MMU off */
+static notrace void copy_segments(unsigned long ind)
{
unsigned long entry;
unsigned long *ptr;
@@ -105,7 +107,8 @@ static void copy_segments(unsigned long ind)
}
}
-void kexec_copy_flush(struct kimage *image)
+/* Called during kexec sequence with MMU off */
+notrace void kexec_copy_flush(struct kimage *image)
{
long i, nr_segments = image->nr_segments;
struct kexec_segment ranges[KEXEC_SEGMENT_MAX];
@@ -152,6 +155,8 @@ static void kexec_smp_down(void *arg)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 1);
+ reset_sprs();
+
kexec_smp_wait();
/* NOTREACHED */
}
@@ -212,7 +217,7 @@ static void wake_offline_cpus(void)
if (!cpu_online(cpu)) {
printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
cpu);
- WARN_ON(cpu_up(cpu));
+ WARN_ON(add_cpu(cpu));
}
}
}
@@ -285,7 +290,7 @@ static union thread_union kexec_stack __init_task_data =
* For similar reasons to the stack above, the kexecing CPU needs to be on a
* static PACA; we switch to kexec_paca.
*/
-struct paca_struct kexec_paca;
+static struct paca_struct kexec_paca;
/* Our assembly helper, in misc_64.S */
extern void kexec_sequence(void *newstack, unsigned long start,
@@ -355,7 +360,7 @@ void default_machine_kexec(struct kimage *image)
* the RMA. On BookE there is no real MMU off mode, so we have to
* keep it enabled as well (but then we have bolted TLB entries).
*/
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
copy_with_mmu_off = false;
#else
copy_with_mmu_off = radix_enabled() ||
@@ -372,7 +377,7 @@ void default_machine_kexec(struct kimage *image)
/* NOTREACHED */
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;
static unsigned long htab_size;
@@ -401,7 +406,7 @@ static int __init export_htab_values(void)
if (!node)
return -ENODEV;
- /* remove any stale propertys so ours can be found */
+ /* remove any stale properties so ours can be found */
of_remove_property(node, of_find_property(node, htab_base_prop.name, NULL));
of_remove_property(node, of_find_property(node, htab_size_prop.name, NULL));
@@ -414,4 +419,4 @@ static int __init export_htab_values(void)
return 0;
}
late_initcall(export_htab_values);
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
index d488311efab1..252724ed666a 100644
--- a/arch/powerpc/kexec/crash.c
+++ b/arch/powerpc/kexec/crash.c
@@ -20,10 +20,10 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/kexec.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/setjmp.h>
#include <asm/debug.h>
+#include <asm/interrupt.h>
/*
* The primary CPU waits a while for all secondary CPUs to enter. This is to
@@ -40,6 +40,14 @@
#define REAL_MODE_TIMEOUT 10000
static int time_to_dump;
+
+/*
+ * In case of system reset, secondary CPUs enter crash_kexec_secondary with out
+ * having to send an IPI explicitly. So, indicate if the crash is via
+ * system reset to avoid sending another IPI.
+ */
+static int is_via_system_reset;
+
/*
* crash_wake_offline should be set to 1 by platforms that intend to wake
* up offline cpus prior to jumping to a kdump kernel. Currently powernv
@@ -101,11 +109,11 @@ void crash_ipi_callback(struct pt_regs *regs)
/* NOTREACHED */
}
-static void crash_kexec_prepare_cpus(int cpu)
+static void crash_kexec_prepare_cpus(void)
{
unsigned int msecs;
- unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
- int tries = 0;
+ volatile unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+ volatile int tries = 0;
int (*old_handler)(struct pt_regs *regs);
printk(KERN_EMERG "Sending IPI to other CPUs\n");
@@ -113,7 +121,15 @@ static void crash_kexec_prepare_cpus(int cpu)
if (crash_wake_offline)
ncpus = num_present_cpus() - 1;
- crash_send_ipi(crash_ipi_callback);
+ /*
+ * If we came in via system reset, secondaries enter via crash_kexec_secondary().
+ * So, wait a while for the secondary CPUs to enter for that case.
+ * Else, send IPI to all other CPUs.
+ */
+ if (is_via_system_reset)
+ mdelay(PRIMARY_TIMEOUT);
+ else
+ crash_send_ipi(crash_ipi_callback);
smp_wmb();
again:
@@ -202,7 +218,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
#else /* ! CONFIG_SMP */
-static void crash_kexec_prepare_cpus(int cpu)
+static void crash_kexec_prepare_cpus(void)
{
/*
* move the secondaries to us so that we can copy
@@ -224,7 +240,7 @@ void crash_kexec_secondary(struct pt_regs *regs)
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
-static void __maybe_unused crash_kexec_wait_realmode(int cpu)
+noinstr static void __maybe_unused crash_kexec_wait_realmode(int cpu)
{
unsigned int msecs;
int i;
@@ -248,6 +264,32 @@ static void __maybe_unused crash_kexec_wait_realmode(int cpu)
static inline void crash_kexec_wait_realmode(int cpu) {}
#endif /* CONFIG_SMP && CONFIG_PPC64 */
+void crash_kexec_prepare(void)
+{
+ /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
+ printk_deferred_enter();
+
+ /*
+ * This function is only called after the system
+ * has panicked or is otherwise in a critical state.
+ * The minimum amount of code to allow a kexec'd kernel
+ * to run successfully needs to happen here.
+ *
+ * In practice this means stopping other cpus in
+ * an SMP system.
+ * The kernel is broken so disable interrupts.
+ */
+ hard_irq_disable();
+
+ /*
+ * Make a note of crashing cpu. Will be used in machine_kexec
+ * such that another IPI will not be sent.
+ */
+ crashing_cpu = smp_processor_id();
+
+ crash_kexec_prepare_cpus();
+}
+
/*
* Register a function to be called on shutdown. Only use this if you
* can't reset your device in the second kernel.
@@ -311,32 +353,10 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
unsigned int i;
int (*old_handler)(struct pt_regs *regs);
- /*
- * This function is only called after the system
- * has panicked or is otherwise in a critical state.
- * The minimum amount of code to allow a kexec'd kernel
- * to run successfully needs to happen here.
- *
- * In practice this means stopping other cpus in
- * an SMP system.
- * The kernel is broken so disable interrupts.
- */
- hard_irq_disable();
-
- /*
- * Make a note of crashing cpu. Will be used in machine_kexec
- * such that another IPI will not be sent.
- */
- crashing_cpu = smp_processor_id();
-
- /*
- * If we came in via system reset, wait a while for the secondary
- * CPUs to enter.
- */
- if (TRAP(regs) == 0x100)
- mdelay(PRIMARY_TIMEOUT);
+ if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
+ is_via_system_reset = 1;
- crash_kexec_prepare_cpus(crashing_cpu);
+ crash_smp_send_stop();
crash_save_cpu(regs, crashing_cpu);
diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c
index 3072fd6dbe94..eeb258002d1e 100644
--- a/arch/powerpc/kexec/elf_64.c
+++ b/arch/powerpc/kexec/elf_64.c
@@ -19,6 +19,7 @@
#include <linux/kexec.h>
#include <linux/libfdt.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -29,12 +30,12 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
unsigned long cmdline_len)
{
int ret;
- unsigned int fdt_size;
unsigned long kernel_load_addr;
unsigned long initrd_load_addr = 0, fdt_load_addr;
void *fdt;
const void *slave_code;
struct elfhdr ehdr;
+ char *modified_cmdline = NULL;
struct kexec_elf_info elf_info;
struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size };
@@ -44,7 +45,15 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
ret = kexec_build_elf_info(kernel_buf, kernel_len, &ehdr, &elf_info);
if (ret)
- goto out;
+ return ERR_PTR(ret);
+
+ if (image->type == KEXEC_TYPE_CRASH) {
+ /* min & max buffer values for kdump case */
+ kbuf.buf_min = pbuf.buf_min = crashk_res.start;
+ kbuf.buf_max = pbuf.buf_max =
+ ((crashk_res.end < ppc64_rma_size) ?
+ crashk_res.end : (ppc64_rma_size - 1));
+ }
ret = kexec_elf_load(image, &ehdr, &elf_info, &kbuf, &kernel_load_addr);
if (ret)
@@ -60,6 +69,25 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
pr_debug("Loaded purgatory at 0x%lx\n", pbuf.mem);
+ /* Load additional segments needed for panic kernel */
+ if (image->type == KEXEC_TYPE_CRASH) {
+ ret = load_crashdump_segments_ppc64(image, &kbuf);
+ if (ret) {
+ pr_err("Failed to load kdump kernel segments\n");
+ goto out;
+ }
+
+ /* Setup cmdline for kdump kernel case */
+ modified_cmdline = setup_kdump_cmdline(image, cmdline,
+ cmdline_len);
+ if (!modified_cmdline) {
+ pr_err("Setting up cmdline for kdump kernel failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ cmdline = modified_cmdline;
+ }
+
if (initrd != NULL) {
kbuf.buffer = initrd;
kbuf.bufsz = kbuf.memsz = initrd_len;
@@ -74,49 +102,53 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
pr_debug("Loaded initrd at 0x%lx\n", initrd_load_addr);
}
- fdt_size = fdt_totalsize(initial_boot_params) * 2;
- fdt = kmalloc(fdt_size, GFP_KERNEL);
+ fdt = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
+ initrd_len, cmdline,
+ kexec_extra_fdt_size_ppc64(image));
if (!fdt) {
- pr_err("Not enough memory for the device tree.\n");
- ret = -ENOMEM;
- goto out;
- }
- ret = fdt_open_into(initial_boot_params, fdt, fdt_size);
- if (ret < 0) {
pr_err("Error setting up the new device tree.\n");
ret = -EINVAL;
goto out;
}
- ret = setup_new_fdt(image, fdt, initrd_load_addr, initrd_len, cmdline);
+ ret = setup_new_fdt_ppc64(image, fdt, initrd_load_addr,
+ initrd_len, cmdline);
if (ret)
- goto out;
+ goto out_free_fdt;
fdt_pack(fdt);
kbuf.buffer = fdt;
- kbuf.bufsz = kbuf.memsz = fdt_size;
+ kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt);
kbuf.buf_align = PAGE_SIZE;
kbuf.top_down = true;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
ret = kexec_add_buffer(&kbuf);
if (ret)
- goto out;
+ goto out_free_fdt;
+
+ /* FDT will be freed in arch_kimage_file_post_load_cleanup */
+ image->arch.fdt = fdt;
+
fdt_load_addr = kbuf.mem;
pr_debug("Loaded device tree at 0x%lx\n", fdt_load_addr);
slave_code = elf_info.buffer + elf_info.proghdrs[0].p_offset;
- ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
- fdt_load_addr);
+ ret = setup_purgatory_ppc64(image, slave_code, fdt, kernel_load_addr,
+ fdt_load_addr);
if (ret)
pr_err("Error setting up the purgatory.\n");
+ goto out;
+
+out_free_fdt:
+ kvfree(fdt);
out:
+ kfree(modified_cmdline);
kexec_free_elf_info(&elf_info);
- /* Make kimage_file_post_load_cleanup free the fdt buffer for us. */
- return ret ? ERR_PTR(ret) : fdt;
+ return ret ? ERR_PTR(ret) : NULL;
}
const struct kexec_file_ops kexec_elf64_ops = {
diff --git a/arch/powerpc/kexec/file_load.c b/arch/powerpc/kexec/file_load.c
index 143c91724617..4284f76cbef5 100644
--- a/arch/powerpc/kexec/file_load.c
+++ b/arch/powerpc/kexec/file_load.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * ppc64 code to implement the kexec_file_load syscall
+ * powerpc code to implement the kexec_file_load syscall
*
* Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
* Copyright (C) 2004 IBM Corp.
@@ -18,23 +18,44 @@
#include <linux/kexec.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
-#include <asm/ima.h>
+#include <asm/setup.h>
-#define SLAVE_CODE_SIZE 256
+#define SLAVE_CODE_SIZE 256 /* First 0x100 bytes */
-const struct kexec_file_ops * const kexec_file_loaders[] = {
- &kexec_elf64_ops,
- NULL
-};
-
-int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
- unsigned long buf_len)
+/**
+ * setup_kdump_cmdline - Prepend "elfcorehdr=<addr> " to command line
+ * of kdump kernel for exporting the core.
+ * @image: Kexec image
+ * @cmdline: Command line parameters to update.
+ * @cmdline_len: Length of the cmdline parameters.
+ *
+ * kdump segment must be setup before calling this function.
+ *
+ * Returns new cmdline buffer for kdump kernel on success, NULL otherwise.
+ */
+char *setup_kdump_cmdline(struct kimage *image, char *cmdline,
+ unsigned long cmdline_len)
{
- /* We don't support crash kernels yet. */
- if (image->type == KEXEC_TYPE_CRASH)
- return -EOPNOTSUPP;
+ int elfcorehdr_strlen;
+ char *cmdline_ptr;
+
+ cmdline_ptr = kzalloc(COMMAND_LINE_SIZE, GFP_KERNEL);
+ if (!cmdline_ptr)
+ return NULL;
+
+ elfcorehdr_strlen = sprintf(cmdline_ptr, "elfcorehdr=0x%lx ",
+ image->elf_load_addr);
- return kexec_image_probe_default(image, buf, buf_len);
+ if (elfcorehdr_strlen + cmdline_len > COMMAND_LINE_SIZE) {
+ pr_err("Appending elfcorehdr=<addr> exceeds cmdline size\n");
+ kfree(cmdline_ptr);
+ return NULL;
+ }
+
+ memcpy(cmdline_ptr + elfcorehdr_strlen, cmdline, cmdline_len);
+ // Ensure it's nul terminated
+ cmdline_ptr[COMMAND_LINE_SIZE - 1] = '\0';
+ return cmdline_ptr;
}
/**
@@ -86,169 +107,3 @@ int setup_purgatory(struct kimage *image, const void *slave_code,
return 0;
}
-
-/**
- * delete_fdt_mem_rsv - delete memory reservation with given address and size
- *
- * Return: 0 on success, or negative errno on error.
- */
-int delete_fdt_mem_rsv(void *fdt, unsigned long start, unsigned long size)
-{
- int i, ret, num_rsvs = fdt_num_mem_rsv(fdt);
-
- for (i = 0; i < num_rsvs; i++) {
- uint64_t rsv_start, rsv_size;
-
- ret = fdt_get_mem_rsv(fdt, i, &rsv_start, &rsv_size);
- if (ret) {
- pr_err("Malformed device tree.\n");
- return -EINVAL;
- }
-
- if (rsv_start == start && rsv_size == size) {
- ret = fdt_del_mem_rsv(fdt, i);
- if (ret) {
- pr_err("Error deleting device tree reservation.\n");
- return -EINVAL;
- }
-
- return 0;
- }
- }
-
- return -ENOENT;
-}
-
-/*
- * setup_new_fdt - modify /chosen and memory reservation for the next kernel
- * @image: kexec image being loaded.
- * @fdt: Flattened device tree for the next kernel.
- * @initrd_load_addr: Address where the next initrd will be loaded.
- * @initrd_len: Size of the next initrd, or 0 if there will be none.
- * @cmdline: Command line for the next kernel, or NULL if there will
- * be none.
- *
- * Return: 0 on success, or negative errno on error.
- */
-int setup_new_fdt(const struct kimage *image, void *fdt,
- unsigned long initrd_load_addr, unsigned long initrd_len,
- const char *cmdline)
-{
- int ret, chosen_node;
- const void *prop;
-
- /* Remove memory reservation for the current device tree. */
- ret = delete_fdt_mem_rsv(fdt, __pa(initial_boot_params),
- fdt_totalsize(initial_boot_params));
- if (ret == 0)
- pr_debug("Removed old device tree reservation.\n");
- else if (ret != -ENOENT)
- return ret;
-
- chosen_node = fdt_path_offset(fdt, "/chosen");
- if (chosen_node == -FDT_ERR_NOTFOUND) {
- chosen_node = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"),
- "chosen");
- if (chosen_node < 0) {
- pr_err("Error creating /chosen.\n");
- return -EINVAL;
- }
- } else if (chosen_node < 0) {
- pr_err("Malformed device tree: error reading /chosen.\n");
- return -EINVAL;
- }
-
- /* Did we boot using an initrd? */
- prop = fdt_getprop(fdt, chosen_node, "linux,initrd-start", NULL);
- if (prop) {
- uint64_t tmp_start, tmp_end, tmp_size;
-
- tmp_start = fdt64_to_cpu(*((const fdt64_t *) prop));
-
- prop = fdt_getprop(fdt, chosen_node, "linux,initrd-end", NULL);
- if (!prop) {
- pr_err("Malformed device tree.\n");
- return -EINVAL;
- }
- tmp_end = fdt64_to_cpu(*((const fdt64_t *) prop));
-
- /*
- * kexec reserves exact initrd size, while firmware may
- * reserve a multiple of PAGE_SIZE, so check for both.
- */
- tmp_size = tmp_end - tmp_start;
- ret = delete_fdt_mem_rsv(fdt, tmp_start, tmp_size);
- if (ret == -ENOENT)
- ret = delete_fdt_mem_rsv(fdt, tmp_start,
- round_up(tmp_size, PAGE_SIZE));
- if (ret == 0)
- pr_debug("Removed old initrd reservation.\n");
- else if (ret != -ENOENT)
- return ret;
-
- /* If there's no new initrd, delete the old initrd's info. */
- if (initrd_len == 0) {
- ret = fdt_delprop(fdt, chosen_node,
- "linux,initrd-start");
- if (ret) {
- pr_err("Error deleting linux,initrd-start.\n");
- return -EINVAL;
- }
-
- ret = fdt_delprop(fdt, chosen_node, "linux,initrd-end");
- if (ret) {
- pr_err("Error deleting linux,initrd-end.\n");
- return -EINVAL;
- }
- }
- }
-
- if (initrd_len) {
- ret = fdt_setprop_u64(fdt, chosen_node,
- "linux,initrd-start",
- initrd_load_addr);
- if (ret < 0)
- goto err;
-
- /* initrd-end is the first address after the initrd image. */
- ret = fdt_setprop_u64(fdt, chosen_node, "linux,initrd-end",
- initrd_load_addr + initrd_len);
- if (ret < 0)
- goto err;
-
- ret = fdt_add_mem_rsv(fdt, initrd_load_addr, initrd_len);
- if (ret) {
- pr_err("Error reserving initrd memory: %s\n",
- fdt_strerror(ret));
- return -EINVAL;
- }
- }
-
- if (cmdline != NULL) {
- ret = fdt_setprop_string(fdt, chosen_node, "bootargs", cmdline);
- if (ret < 0)
- goto err;
- } else {
- ret = fdt_delprop(fdt, chosen_node, "bootargs");
- if (ret && ret != -FDT_ERR_NOTFOUND) {
- pr_err("Error deleting bootargs.\n");
- return -EINVAL;
- }
- }
-
- ret = setup_ima_buffer(image, fdt, chosen_node);
- if (ret) {
- pr_err("Error setting up the new device tree.\n");
- return ret;
- }
-
- ret = fdt_setprop(fdt, chosen_node, "linux,booted-from-kexec", NULL, 0);
- if (ret)
- goto err;
-
- return 0;
-
-err:
- pr_err("Error setting up the new device tree.\n");
- return -EINVAL;
-}
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
new file mode 100644
index 000000000000..349a781cea0b
--- /dev/null
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -0,0 +1,1287 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ppc64 code to implement the kexec_file_load syscall
+ *
+ * Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
+ * Copyright (C) 2004 IBM Corp.
+ * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
+ * Copyright (C) 2005 R Sharada (sharada@in.ibm.com)
+ * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com)
+ * Copyright (C) 2020 IBM Corporation
+ *
+ * Based on kexec-tools' kexec-ppc64.c, kexec-elf-rel-ppc64.c, fs2dt.c.
+ * Heavily modified for the kernel by
+ * Hari Bathini, IBM Corporation.
+ */
+
+#include <linux/kexec.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+#include <linux/of_device.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/setup.h>
+#include <asm/drmem.h>
+#include <asm/firmware.h>
+#include <asm/kexec_ranges.h>
+#include <asm/crashdump-ppc64.h>
+
+struct umem_info {
+ u64 *buf; /* data buffer for usable-memory property */
+ u32 size; /* size allocated for the data buffer */
+ u32 max_entries; /* maximum no. of entries */
+ u32 idx; /* index of current entry */
+
+ /* usable memory ranges to look up */
+ unsigned int nr_ranges;
+ const struct crash_mem_range *ranges;
+};
+
+const struct kexec_file_ops * const kexec_file_loaders[] = {
+ &kexec_elf64_ops,
+ NULL
+};
+
+/**
+ * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
+ * regions like opal/rtas, tce-table, initrd,
+ * kernel, htab which should be avoided while
+ * setting up kexec load segments.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_initrd_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_htab_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_kernel_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_reserved_mem_ranges(mem_ranges);
+ if (ret)
+ goto out;
+
+ /* exclude memory ranges should be sorted for easy lookup */
+ sort_memory_ranges(*mem_ranges, true);
+out:
+ if (ret)
+ pr_err("Failed to setup exclude memory ranges\n");
+ return ret;
+}
+
+/**
+ * get_usable_memory_ranges - Get usable memory ranges. This list includes
+ * regions like crashkernel, opal/rtas & tce-table,
+ * that kdump kernel could use.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ /*
+ * Early boot failure observed on guests when low memory (first memory
+ * block?) is not added to usable memory. So, add [0, crashk_res.end]
+ * instead of [crashk_res.start, crashk_res.end] to workaround it.
+ * Also, crashed kernel's memory must be added to reserve map to
+ * avoid kdump kernel from using it.
+ */
+ ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
+ if (ret)
+ goto out;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+out:
+ if (ret)
+ pr_err("Failed to setup usable memory ranges\n");
+ return ret;
+}
+
+/**
+ * get_crash_memory_ranges - Get crash memory ranges. This list includes
+ * first/crashing kernel's memory regions that
+ * would be exported via an elfcore.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
+{
+ phys_addr_t base, end;
+ struct crash_mem *tmem;
+ u64 i;
+ int ret;
+
+ for_each_mem_range(i, &base, &end) {
+ u64 size = end - base;
+
+ /* Skip backup memory region, which needs a separate entry */
+ if (base == BACKUP_SRC_START) {
+ if (size > BACKUP_SRC_SIZE) {
+ base = BACKUP_SRC_END + 1;
+ size -= BACKUP_SRC_SIZE;
+ } else
+ continue;
+ }
+
+ ret = add_mem_range(mem_ranges, base, size);
+ if (ret)
+ goto out;
+
+ /* Try merging adjacent ranges before reallocation attempt */
+ if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
+ sort_memory_ranges(*mem_ranges, true);
+ }
+
+ /* Reallocate memory ranges if there is no space to split ranges */
+ tmem = *mem_ranges;
+ if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
+ tmem = realloc_mem_ranges(mem_ranges);
+ if (!tmem)
+ goto out;
+ }
+
+ /* Exclude crashkernel region */
+ ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
+ if (ret)
+ goto out;
+
+ /*
+ * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
+ * regions are exported to save their context at the time of
+ * crash, they should actually be backed up just like the
+ * first 64K bytes of memory.
+ */
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ /* create a separate program header for the backup region */
+ ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
+ if (ret)
+ goto out;
+
+ sort_memory_ranges(*mem_ranges, false);
+out:
+ if (ret)
+ pr_err("Failed to setup crash memory ranges\n");
+ return ret;
+}
+
+/**
+ * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
+ * memory regions that should be added to the
+ * memory reserve map to ensure the region is
+ * protected from any mischief.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_reserved_mem_ranges(mem_ranges);
+out:
+ if (ret)
+ pr_err("Failed to setup reserved memory ranges\n");
+ return ret;
+}
+
+/**
+ * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
+ * in the memory regions between buf_min & buf_max
+ * for the buffer. If found, sets kbuf->mem.
+ * @kbuf: Buffer contents and memory parameters.
+ * @buf_min: Minimum address for the buffer.
+ * @buf_max: Maximum address for the buffer.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
+ u64 buf_min, u64 buf_max)
+{
+ int ret = -EADDRNOTAVAIL;
+ phys_addr_t start, end;
+ u64 i;
+
+ for_each_mem_range_rev(i, &start, &end) {
+ /*
+ * memblock uses [start, end) convention while it is
+ * [start, end] here. Fix the off-by-one to have the
+ * same convention.
+ */
+ end -= 1;
+
+ if (start > buf_max)
+ continue;
+
+ /* Memory hole not found */
+ if (end < buf_min)
+ break;
+
+ /* Adjust memory region based on the given range */
+ if (start < buf_min)
+ start = buf_min;
+ if (end > buf_max)
+ end = buf_max;
+
+ start = ALIGN(start, kbuf->buf_align);
+ if (start < end && (end - start + 1) >= kbuf->memsz) {
+ /* Suitable memory range found. Set kbuf->mem */
+ kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
+ kbuf->buf_align);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
+ * suitable buffer with top down approach.
+ * @kbuf: Buffer contents and memory parameters.
+ * @buf_min: Minimum address for the buffer.
+ * @buf_max: Maximum address for the buffer.
+ * @emem: Exclude memory ranges.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
+ u64 buf_min, u64 buf_max,
+ const struct crash_mem *emem)
+{
+ int i, ret = 0, err = -EADDRNOTAVAIL;
+ u64 start, end, tmin, tmax;
+
+ tmax = buf_max;
+ for (i = (emem->nr_ranges - 1); i >= 0; i--) {
+ start = emem->ranges[i].start;
+ end = emem->ranges[i].end;
+
+ if (start > tmax)
+ continue;
+
+ if (end < tmax) {
+ tmin = (end < buf_min ? buf_min : end + 1);
+ ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
+ if (!ret)
+ return 0;
+ }
+
+ tmax = start - 1;
+
+ if (tmax < buf_min) {
+ ret = err;
+ break;
+ }
+ ret = 0;
+ }
+
+ if (!ret) {
+ tmin = buf_min;
+ ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
+ }
+ return ret;
+}
+
+/**
+ * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
+ * in the memory regions between buf_min & buf_max
+ * for the buffer. If found, sets kbuf->mem.
+ * @kbuf: Buffer contents and memory parameters.
+ * @buf_min: Minimum address for the buffer.
+ * @buf_max: Maximum address for the buffer.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
+ u64 buf_min, u64 buf_max)
+{
+ int ret = -EADDRNOTAVAIL;
+ phys_addr_t start, end;
+ u64 i;
+
+ for_each_mem_range(i, &start, &end) {
+ /*
+ * memblock uses [start, end) convention while it is
+ * [start, end] here. Fix the off-by-one to have the
+ * same convention.
+ */
+ end -= 1;
+
+ if (end < buf_min)
+ continue;
+
+ /* Memory hole not found */
+ if (start > buf_max)
+ break;
+
+ /* Adjust memory region based on the given range */
+ if (start < buf_min)
+ start = buf_min;
+ if (end > buf_max)
+ end = buf_max;
+
+ start = ALIGN(start, kbuf->buf_align);
+ if (start < end && (end - start + 1) >= kbuf->memsz) {
+ /* Suitable memory range found. Set kbuf->mem */
+ kbuf->mem = start;
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
+ * suitable buffer with bottom up approach.
+ * @kbuf: Buffer contents and memory parameters.
+ * @buf_min: Minimum address for the buffer.
+ * @buf_max: Maximum address for the buffer.
+ * @emem: Exclude memory ranges.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
+ u64 buf_min, u64 buf_max,
+ const struct crash_mem *emem)
+{
+ int i, ret = 0, err = -EADDRNOTAVAIL;
+ u64 start, end, tmin, tmax;
+
+ tmin = buf_min;
+ for (i = 0; i < emem->nr_ranges; i++) {
+ start = emem->ranges[i].start;
+ end = emem->ranges[i].end;
+
+ if (end < tmin)
+ continue;
+
+ if (start > tmin) {
+ tmax = (start > buf_max ? buf_max : start - 1);
+ ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
+ if (!ret)
+ return 0;
+ }
+
+ tmin = end + 1;
+
+ if (tmin > buf_max) {
+ ret = err;
+ break;
+ }
+ ret = 0;
+ }
+
+ if (!ret) {
+ tmax = buf_max;
+ ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
+ }
+ return ret;
+}
+
+/**
+ * check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
+ * @um_info: Usable memory buffer and ranges info.
+ * @cnt: No. of entries to accommodate.
+ *
+ * Frees up the old buffer if memory reallocation fails.
+ *
+ * Returns buffer on success, NULL on error.
+ */
+static u64 *check_realloc_usable_mem(struct umem_info *um_info, int cnt)
+{
+ u32 new_size;
+ u64 *tbuf;
+
+ if ((um_info->idx + cnt) <= um_info->max_entries)
+ return um_info->buf;
+
+ new_size = um_info->size + MEM_RANGE_CHUNK_SZ;
+ tbuf = krealloc(um_info->buf, new_size, GFP_KERNEL);
+ if (tbuf) {
+ um_info->buf = tbuf;
+ um_info->size = new_size;
+ um_info->max_entries = (um_info->size / sizeof(u64));
+ }
+
+ return tbuf;
+}
+
+/**
+ * add_usable_mem - Add the usable memory ranges within the given memory range
+ * to the buffer
+ * @um_info: Usable memory buffer and ranges info.
+ * @base: Base address of memory range to look for.
+ * @end: End address of memory range to look for.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int add_usable_mem(struct umem_info *um_info, u64 base, u64 end)
+{
+ u64 loc_base, loc_end;
+ bool add;
+ int i;
+
+ for (i = 0; i < um_info->nr_ranges; i++) {
+ add = false;
+ loc_base = um_info->ranges[i].start;
+ loc_end = um_info->ranges[i].end;
+ if (loc_base >= base && loc_end <= end)
+ add = true;
+ else if (base < loc_end && end > loc_base) {
+ if (loc_base < base)
+ loc_base = base;
+ if (loc_end > end)
+ loc_end = end;
+ add = true;
+ }
+
+ if (add) {
+ if (!check_realloc_usable_mem(um_info, 2))
+ return -ENOMEM;
+
+ um_info->buf[um_info->idx++] = cpu_to_be64(loc_base);
+ um_info->buf[um_info->idx++] =
+ cpu_to_be64(loc_end - loc_base + 1);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * kdump_setup_usable_lmb - This is a callback function that gets called by
+ * walk_drmem_lmbs for every LMB to set its
+ * usable memory ranges.
+ * @lmb: LMB info.
+ * @usm: linux,drconf-usable-memory property value.
+ * @data: Pointer to usable memory buffer and ranges info.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
+ void *data)
+{
+ struct umem_info *um_info;
+ int tmp_idx, ret;
+ u64 base, end;
+
+ /*
+ * kdump load isn't supported on kernels already booted with
+ * linux,drconf-usable-memory property.
+ */
+ if (*usm) {
+ pr_err("linux,drconf-usable-memory property already exists!");
+ return -EINVAL;
+ }
+
+ um_info = data;
+ tmp_idx = um_info->idx;
+ if (!check_realloc_usable_mem(um_info, 1))
+ return -ENOMEM;
+
+ um_info->idx++;
+ base = lmb->base_addr;
+ end = base + drmem_lmb_size() - 1;
+ ret = add_usable_mem(um_info, base, end);
+ if (!ret) {
+ /*
+ * Update the no. of ranges added. Two entries (base & size)
+ * for every range added.
+ */
+ um_info->buf[tmp_idx] =
+ cpu_to_be64((um_info->idx - tmp_idx - 1) / 2);
+ }
+
+ return ret;
+}
+
+#define NODE_PATH_LEN 256
+/**
+ * add_usable_mem_property - Add usable memory property for the given
+ * memory node.
+ * @fdt: Flattened device tree for the kdump kernel.
+ * @dn: Memory node.
+ * @um_info: Usable memory buffer and ranges info.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int add_usable_mem_property(void *fdt, struct device_node *dn,
+ struct umem_info *um_info)
+{
+ int n_mem_addr_cells, n_mem_size_cells, node;
+ char path[NODE_PATH_LEN];
+ int i, len, ranges, ret;
+ const __be32 *prop;
+ u64 base, end;
+
+ of_node_get(dn);
+
+ if (snprintf(path, NODE_PATH_LEN, "%pOF", dn) > (NODE_PATH_LEN - 1)) {
+ pr_err("Buffer (%d) too small for memory node: %pOF\n",
+ NODE_PATH_LEN, dn);
+ return -EOVERFLOW;
+ }
+ pr_debug("Memory node path: %s\n", path);
+
+ /* Now that we know the path, find its offset in kdump kernel's fdt */
+ node = fdt_path_offset(fdt, path);
+ if (node < 0) {
+ pr_err("Malformed device tree: error reading %s\n", path);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Get the address & size cells */
+ n_mem_addr_cells = of_n_addr_cells(dn);
+ n_mem_size_cells = of_n_size_cells(dn);
+ pr_debug("address cells: %d, size cells: %d\n", n_mem_addr_cells,
+ n_mem_size_cells);
+
+ um_info->idx = 0;
+ if (!check_realloc_usable_mem(um_info, 2)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ prop = of_get_property(dn, "reg", &len);
+ if (!prop || len <= 0) {
+ ret = 0;
+ goto out;
+ }
+
+ /*
+ * "reg" property represents sequence of (addr,size) tuples
+ * each representing a memory range.
+ */
+ ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
+
+ for (i = 0; i < ranges; i++) {
+ base = of_read_number(prop, n_mem_addr_cells);
+ prop += n_mem_addr_cells;
+ end = base + of_read_number(prop, n_mem_size_cells) - 1;
+ prop += n_mem_size_cells;
+
+ ret = add_usable_mem(um_info, base, end);
+ if (ret)
+ goto out;
+ }
+
+ /*
+ * No kdump kernel usable memory found in this memory node.
+ * Write (0,0) tuple in linux,usable-memory property for
+ * this region to be ignored.
+ */
+ if (um_info->idx == 0) {
+ um_info->buf[0] = 0;
+ um_info->buf[1] = 0;
+ um_info->idx = 2;
+ }
+
+ ret = fdt_setprop(fdt, node, "linux,usable-memory", um_info->buf,
+ (um_info->idx * sizeof(u64)));
+
+out:
+ of_node_put(dn);
+ return ret;
+}
+
+
+/**
+ * update_usable_mem_fdt - Updates kdump kernel's fdt with linux,usable-memory
+ * and linux,drconf-usable-memory DT properties as
+ * appropriate to restrict its memory usage.
+ * @fdt: Flattened device tree for the kdump kernel.
+ * @usable_mem: Usable memory ranges for kdump kernel.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int update_usable_mem_fdt(void *fdt, struct crash_mem *usable_mem)
+{
+ struct umem_info um_info;
+ struct device_node *dn;
+ int node, ret = 0;
+
+ if (!usable_mem) {
+ pr_err("Usable memory ranges for kdump kernel not found\n");
+ return -ENOENT;
+ }
+
+ node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
+ if (node == -FDT_ERR_NOTFOUND)
+ pr_debug("No dynamic reconfiguration memory found\n");
+ else if (node < 0) {
+ pr_err("Malformed device tree: error reading /ibm,dynamic-reconfiguration-memory.\n");
+ return -EINVAL;
+ }
+
+ um_info.buf = NULL;
+ um_info.size = 0;
+ um_info.max_entries = 0;
+ um_info.idx = 0;
+ /* Memory ranges to look up */
+ um_info.ranges = &(usable_mem->ranges[0]);
+ um_info.nr_ranges = usable_mem->nr_ranges;
+
+ dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (dn) {
+ ret = walk_drmem_lmbs(dn, &um_info, kdump_setup_usable_lmb);
+ of_node_put(dn);
+
+ if (ret) {
+ pr_err("Could not setup linux,drconf-usable-memory property for kdump\n");
+ goto out;
+ }
+
+ ret = fdt_setprop(fdt, node, "linux,drconf-usable-memory",
+ um_info.buf, (um_info.idx * sizeof(u64)));
+ if (ret) {
+ pr_err("Failed to update fdt with linux,drconf-usable-memory property");
+ goto out;
+ }
+ }
+
+ /*
+ * Walk through each memory node and set linux,usable-memory property
+ * for the corresponding node in kdump kernel's fdt.
+ */
+ for_each_node_by_type(dn, "memory") {
+ ret = add_usable_mem_property(fdt, dn, &um_info);
+ if (ret) {
+ pr_err("Failed to set linux,usable-memory property for %s node",
+ dn->full_name);
+ of_node_put(dn);
+ goto out;
+ }
+ }
+
+out:
+ kfree(um_info.buf);
+ return ret;
+}
+
+/**
+ * load_backup_segment - Locate a memory hole to place the backup region.
+ * @image: Kexec image.
+ * @kbuf: Buffer contents and memory parameters.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int load_backup_segment(struct kimage *image, struct kexec_buf *kbuf)
+{
+ void *buf;
+ int ret;
+
+ /*
+ * Setup a source buffer for backup segment.
+ *
+ * A source buffer has no meaning for backup region as data will
+ * be copied from backup source, after crash, in the purgatory.
+ * But as load segment code doesn't recognize such segments,
+ * setup a dummy source buffer to keep it happy for now.
+ */
+ buf = vzalloc(BACKUP_SRC_SIZE);
+ if (!buf)
+ return -ENOMEM;
+
+ kbuf->buffer = buf;
+ kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf->bufsz = kbuf->memsz = BACKUP_SRC_SIZE;
+ kbuf->top_down = false;
+
+ ret = kexec_add_buffer(kbuf);
+ if (ret) {
+ vfree(buf);
+ return ret;
+ }
+
+ image->arch.backup_buf = buf;
+ image->arch.backup_start = kbuf->mem;
+ return 0;
+}
+
+/**
+ * update_backup_region_phdr - Update backup region's offset for the core to
+ * export the region appropriately.
+ * @image: Kexec image.
+ * @ehdr: ELF core header.
+ *
+ * Assumes an exclusive program header is setup for the backup region
+ * in the ELF headers
+ *
+ * Returns nothing.
+ */
+static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
+{
+ Elf64_Phdr *phdr;
+ unsigned int i;
+
+ phdr = (Elf64_Phdr *)(ehdr + 1);
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ if (phdr->p_paddr == BACKUP_SRC_START) {
+ phdr->p_offset = image->arch.backup_start;
+ pr_debug("Backup region offset updated to 0x%lx\n",
+ image->arch.backup_start);
+ return;
+ }
+ }
+}
+
+/**
+ * load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
+ * segment needed to load kdump kernel.
+ * @image: Kexec image.
+ * @kbuf: Buffer contents and memory parameters.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
+{
+ struct crash_mem *cmem = NULL;
+ unsigned long headers_sz;
+ void *headers = NULL;
+ int ret;
+
+ ret = get_crash_memory_ranges(&cmem);
+ if (ret)
+ goto out;
+
+ /* Setup elfcorehdr segment */
+ ret = crash_prepare_elf64_headers(cmem, false, &headers, &headers_sz);
+ if (ret) {
+ pr_err("Failed to prepare elf headers for the core\n");
+ goto out;
+ }
+
+ /* Fix the offset for backup region in the ELF header */
+ update_backup_region_phdr(image, headers);
+
+ kbuf->buffer = headers;
+ kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
+ kbuf->bufsz = kbuf->memsz = headers_sz;
+ kbuf->top_down = false;
+
+ ret = kexec_add_buffer(kbuf);
+ if (ret) {
+ vfree(headers);
+ goto out;
+ }
+
+ image->elf_load_addr = kbuf->mem;
+ image->elf_headers_sz = headers_sz;
+ image->elf_headers = headers;
+out:
+ kfree(cmem);
+ return ret;
+}
+
+/**
+ * load_crashdump_segments_ppc64 - Initialize the additional segements needed
+ * to load kdump kernel.
+ * @image: Kexec image.
+ * @kbuf: Buffer contents and memory parameters.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int load_crashdump_segments_ppc64(struct kimage *image,
+ struct kexec_buf *kbuf)
+{
+ int ret;
+
+ /* Load backup segment - first 64K bytes of the crashing kernel */
+ ret = load_backup_segment(image, kbuf);
+ if (ret) {
+ pr_err("Failed to load backup segment\n");
+ return ret;
+ }
+ pr_debug("Loaded the backup region at 0x%lx\n", kbuf->mem);
+
+ /* Load elfcorehdr segment - to export crashing kernel's vmcore */
+ ret = load_elfcorehdr_segment(image, kbuf);
+ if (ret) {
+ pr_err("Failed to load elfcorehdr segment\n");
+ return ret;
+ }
+ pr_debug("Loaded elf core header at 0x%lx, bufsz=0x%lx memsz=0x%lx\n",
+ image->elf_load_addr, kbuf->bufsz, kbuf->memsz);
+
+ return 0;
+}
+
+/**
+ * setup_purgatory_ppc64 - initialize PPC64 specific purgatory's global
+ * variables and call setup_purgatory() to initialize
+ * common global variable.
+ * @image: kexec image.
+ * @slave_code: Slave code for the purgatory.
+ * @fdt: Flattened device tree for the next kernel.
+ * @kernel_load_addr: Address where the kernel is loaded.
+ * @fdt_load_addr: Address where the flattened device tree is loaded.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
+ const void *fdt, unsigned long kernel_load_addr,
+ unsigned long fdt_load_addr)
+{
+ struct device_node *dn = NULL;
+ int ret;
+
+ ret = setup_purgatory(image, slave_code, fdt, kernel_load_addr,
+ fdt_load_addr);
+ if (ret)
+ goto out;
+
+ if (image->type == KEXEC_TYPE_CRASH) {
+ u32 my_run_at_load = 1;
+
+ /*
+ * Tell relocatable kernel to run at load address
+ * via the word meant for that at 0x5c.
+ */
+ ret = kexec_purgatory_get_set_symbol(image, "run_at_load",
+ &my_run_at_load,
+ sizeof(my_run_at_load),
+ false);
+ if (ret)
+ goto out;
+ }
+
+ /* Tell purgatory where to look for backup region */
+ ret = kexec_purgatory_get_set_symbol(image, "backup_start",
+ &image->arch.backup_start,
+ sizeof(image->arch.backup_start),
+ false);
+ if (ret)
+ goto out;
+
+ /* Setup OPAL base & entry values */
+ dn = of_find_node_by_path("/ibm,opal");
+ if (dn) {
+ u64 val;
+
+ of_property_read_u64(dn, "opal-base-address", &val);
+ ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
+ sizeof(val), false);
+ if (ret)
+ goto out;
+
+ of_property_read_u64(dn, "opal-entry-address", &val);
+ ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
+ sizeof(val), false);
+ }
+out:
+ if (ret)
+ pr_err("Failed to setup purgatory symbols");
+ of_node_put(dn);
+ return ret;
+}
+
+/**
+ * kexec_extra_fdt_size_ppc64 - Return the estimated additional size needed to
+ * setup FDT for kexec/kdump kernel.
+ * @image: kexec image being loaded.
+ *
+ * Returns the estimated extra size needed for kexec/kdump kernel FDT.
+ */
+unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
+{
+ u64 usm_entries;
+
+ if (image->type != KEXEC_TYPE_CRASH)
+ return 0;
+
+ /*
+ * For kdump kernel, account for linux,usable-memory and
+ * linux,drconf-usable-memory properties. Get an approximate on the
+ * number of usable memory entries and use for FDT size estimation.
+ */
+ usm_entries = ((memblock_end_of_DRAM() / drmem_lmb_size()) +
+ (2 * (resource_size(&crashk_res) / drmem_lmb_size())));
+ return (unsigned int)(usm_entries * sizeof(u64));
+}
+
+/**
+ * add_node_props - Reads node properties from device node structure and add
+ * them to fdt.
+ * @fdt: Flattened device tree of the kernel
+ * @node_offset: offset of the node to add a property at
+ * @dn: device node pointer
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
+{
+ int ret = 0;
+ struct property *pp;
+
+ if (!dn)
+ return -EINVAL;
+
+ for_each_property_of_node(dn, pp) {
+ ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
+ if (ret < 0) {
+ pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/**
+ * update_cpus_node - Update cpus node of flattened device tree using of_root
+ * device node.
+ * @fdt: Flattened device tree of the kernel.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int update_cpus_node(void *fdt)
+{
+ struct device_node *cpus_node, *dn;
+ int cpus_offset, cpus_subnode_offset, ret = 0;
+
+ cpus_offset = fdt_path_offset(fdt, "/cpus");
+ if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
+ pr_err("Malformed device tree: error reading /cpus node: %s\n",
+ fdt_strerror(cpus_offset));
+ return cpus_offset;
+ }
+
+ if (cpus_offset > 0) {
+ ret = fdt_del_node(fdt, cpus_offset);
+ if (ret < 0) {
+ pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
+ return -EINVAL;
+ }
+ }
+
+ /* Add cpus node to fdt */
+ cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
+ if (cpus_offset < 0) {
+ pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
+ return -EINVAL;
+ }
+
+ /* Add cpus node properties */
+ cpus_node = of_find_node_by_path("/cpus");
+ ret = add_node_props(fdt, cpus_offset, cpus_node);
+ of_node_put(cpus_node);
+ if (ret < 0)
+ return ret;
+
+ /* Loop through all subnodes of cpus and add them to fdt */
+ for_each_node_by_type(dn, "cpu") {
+ cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
+ if (cpus_subnode_offset < 0) {
+ pr_err("Unable to add %s subnode: %s\n", dn->full_name,
+ fdt_strerror(cpus_subnode_offset));
+ ret = cpus_subnode_offset;
+ goto out;
+ }
+
+ ret = add_node_props(fdt, cpus_subnode_offset, dn);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ of_node_put(dn);
+ return ret;
+}
+
+static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
+ const char *propname)
+{
+ const void *prop, *fdtprop;
+ int len = 0, fdtlen = 0;
+
+ prop = of_get_property(dn, propname, &len);
+ fdtprop = fdt_getprop(fdt, node_offset, propname, &fdtlen);
+
+ if (fdtprop && !prop)
+ return fdt_delprop(fdt, node_offset, propname);
+ else if (prop)
+ return fdt_setprop(fdt, node_offset, propname, prop, len);
+ else
+ return -FDT_ERR_NOTFOUND;
+}
+
+static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
+{
+ struct device_node *dn;
+ int pci_offset, root_offset, ret = 0;
+
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ return 0;
+
+ root_offset = fdt_path_offset(fdt, "/");
+ for_each_node_with_property(dn, dmapropname) {
+ pci_offset = fdt_subnode_offset(fdt, root_offset, of_node_full_name(dn));
+ if (pci_offset < 0)
+ continue;
+
+ ret = copy_property(fdt, pci_offset, dn, "ibm,dma-window");
+ if (ret < 0)
+ break;
+ ret = copy_property(fdt, pci_offset, dn, dmapropname);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * setup_new_fdt_ppc64 - Update the flattend device-tree of the kernel
+ * being loaded.
+ * @image: kexec image being loaded.
+ * @fdt: Flattened device tree for the next kernel.
+ * @initrd_load_addr: Address where the next initrd will be loaded.
+ * @initrd_len: Size of the next initrd, or 0 if there will be none.
+ * @cmdline: Command line for the next kernel, or NULL if there will
+ * be none.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
+ unsigned long initrd_load_addr,
+ unsigned long initrd_len, const char *cmdline)
+{
+ struct crash_mem *umem = NULL, *rmem = NULL;
+ int i, nr_ranges, ret;
+
+ /*
+ * Restrict memory usage for kdump kernel by setting up
+ * usable memory ranges and memory reserve map.
+ */
+ if (image->type == KEXEC_TYPE_CRASH) {
+ ret = get_usable_memory_ranges(&umem);
+ if (ret)
+ goto out;
+
+ ret = update_usable_mem_fdt(fdt, umem);
+ if (ret) {
+ pr_err("Error setting up usable-memory property for kdump kernel\n");
+ goto out;
+ }
+
+ /*
+ * Ensure we don't touch crashed kernel's memory except the
+ * first 64K of RAM, which will be backed up.
+ */
+ ret = fdt_add_mem_rsv(fdt, BACKUP_SRC_END + 1,
+ crashk_res.start - BACKUP_SRC_SIZE);
+ if (ret) {
+ pr_err("Error reserving crash memory: %s\n",
+ fdt_strerror(ret));
+ goto out;
+ }
+
+ /* Ensure backup region is not used by kdump/capture kernel */
+ ret = fdt_add_mem_rsv(fdt, image->arch.backup_start,
+ BACKUP_SRC_SIZE);
+ if (ret) {
+ pr_err("Error reserving memory for backup: %s\n",
+ fdt_strerror(ret));
+ goto out;
+ }
+ }
+
+ /* Update cpus nodes information to account hotplug CPUs. */
+ ret = update_cpus_node(fdt);
+ if (ret < 0)
+ goto out;
+
+#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
+#define DMA64_PROPNAME "linux,dma64-ddr-window-info"
+ ret = update_pci_dma_nodes(fdt, DIRECT64_PROPNAME);
+ if (ret < 0)
+ goto out;
+
+ ret = update_pci_dma_nodes(fdt, DMA64_PROPNAME);
+ if (ret < 0)
+ goto out;
+#undef DMA64_PROPNAME
+#undef DIRECT64_PROPNAME
+
+ /* Update memory reserve map */
+ ret = get_reserved_memory_ranges(&rmem);
+ if (ret)
+ goto out;
+
+ nr_ranges = rmem ? rmem->nr_ranges : 0;
+ for (i = 0; i < nr_ranges; i++) {
+ u64 base, size;
+
+ base = rmem->ranges[i].start;
+ size = rmem->ranges[i].end - base + 1;
+ ret = fdt_add_mem_rsv(fdt, base, size);
+ if (ret) {
+ pr_err("Error updating memory reserve map: %s\n",
+ fdt_strerror(ret));
+ goto out;
+ }
+ }
+
+out:
+ kfree(rmem);
+ kfree(umem);
+ return ret;
+}
+
+/**
+ * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
+ * tce-table, reserved-ranges & such (exclude
+ * memory ranges) as they can't be used for kexec
+ * segment buffer. Sets kbuf->mem when a suitable
+ * memory hole is found.
+ * @kbuf: Buffer contents and memory parameters.
+ *
+ * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
+{
+ struct crash_mem **emem;
+ u64 buf_min, buf_max;
+ int ret;
+
+ /* Look up the exclude ranges list while locating the memory hole */
+ emem = &(kbuf->image->arch.exclude_ranges);
+ if (!(*emem) || ((*emem)->nr_ranges == 0)) {
+ pr_warn("No exclude range list. Using the default locate mem hole method\n");
+ return kexec_locate_mem_hole(kbuf);
+ }
+
+ buf_min = kbuf->buf_min;
+ buf_max = kbuf->buf_max;
+ /* Segments for kdump kernel should be within crashkernel region */
+ if (kbuf->image->type == KEXEC_TYPE_CRASH) {
+ buf_min = (buf_min < crashk_res.start ?
+ crashk_res.start : buf_min);
+ buf_max = (buf_max > crashk_res.end ?
+ crashk_res.end : buf_max);
+ }
+
+ if (buf_min > buf_max) {
+ pr_err("Invalid buffer min and/or max values\n");
+ return -EINVAL;
+ }
+
+ if (kbuf->top_down)
+ ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
+ *emem);
+ else
+ ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
+ *emem);
+
+ /* Add the buffer allocated to the exclude list for the next lookup */
+ if (!ret) {
+ add_mem_range(emem, kbuf->mem, kbuf->memsz);
+ sort_memory_ranges(*emem, true);
+ } else {
+ pr_err("Failed to locate memory buffer of size %lu\n",
+ kbuf->memsz);
+ }
+ return ret;
+}
+
+/**
+ * arch_kexec_kernel_image_probe - Does additional handling needed to setup
+ * kexec segments.
+ * @image: kexec image being loaded.
+ * @buf: Buffer pointing to elf data.
+ * @buf_len: Length of the buffer.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
+ unsigned long buf_len)
+{
+ int ret;
+
+ /* Get exclude memory ranges needed for setting up kexec segments */
+ ret = get_exclude_memory_ranges(&(image->arch.exclude_ranges));
+ if (ret) {
+ pr_err("Failed to setup exclude memory ranges for buffer lookup\n");
+ return ret;
+ }
+
+ return kexec_image_probe_default(image, buf, buf_len);
+}
+
+/**
+ * arch_kimage_file_post_load_cleanup - Frees up all the allocations done
+ * while loading the image.
+ * @image: kexec image being loaded.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int arch_kimage_file_post_load_cleanup(struct kimage *image)
+{
+ kfree(image->arch.exclude_ranges);
+ image->arch.exclude_ranges = NULL;
+
+ vfree(image->arch.backup_buf);
+ image->arch.backup_buf = NULL;
+
+ vfree(image->elf_headers);
+ image->elf_headers = NULL;
+ image->elf_headers_sz = 0;
+
+ kvfree(image->arch.fdt);
+ image->arch.fdt = NULL;
+
+ return kexec_image_post_load_cleanup_default(image);
+}
diff --git a/arch/powerpc/kexec/ima.c b/arch/powerpc/kexec/ima.c
deleted file mode 100644
index 720e50e490b6..000000000000
--- a/arch/powerpc/kexec/ima.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2016 IBM Corporation
- *
- * Authors:
- * Thiago Jung Bauermann <bauerman@linux.vnet.ibm.com>
- */
-
-#include <linux/slab.h>
-#include <linux/kexec.h>
-#include <linux/of.h>
-#include <linux/memblock.h>
-#include <linux/libfdt.h>
-
-static int get_addr_size_cells(int *addr_cells, int *size_cells)
-{
- struct device_node *root;
-
- root = of_find_node_by_path("/");
- if (!root)
- return -EINVAL;
-
- *addr_cells = of_n_addr_cells(root);
- *size_cells = of_n_size_cells(root);
-
- of_node_put(root);
-
- return 0;
-}
-
-static int do_get_kexec_buffer(const void *prop, int len, unsigned long *addr,
- size_t *size)
-{
- int ret, addr_cells, size_cells;
-
- ret = get_addr_size_cells(&addr_cells, &size_cells);
- if (ret)
- return ret;
-
- if (len < 4 * (addr_cells + size_cells))
- return -ENOENT;
-
- *addr = of_read_number(prop, addr_cells);
- *size = of_read_number(prop + 4 * addr_cells, size_cells);
-
- return 0;
-}
-
-/**
- * ima_get_kexec_buffer - get IMA buffer from the previous kernel
- * @addr: On successful return, set to point to the buffer contents.
- * @size: On successful return, set to the buffer size.
- *
- * Return: 0 on success, negative errno on error.
- */
-int ima_get_kexec_buffer(void **addr, size_t *size)
-{
- int ret, len;
- unsigned long tmp_addr;
- size_t tmp_size;
- const void *prop;
-
- prop = of_get_property(of_chosen, "linux,ima-kexec-buffer", &len);
- if (!prop)
- return -ENOENT;
-
- ret = do_get_kexec_buffer(prop, len, &tmp_addr, &tmp_size);
- if (ret)
- return ret;
-
- *addr = __va(tmp_addr);
- *size = tmp_size;
-
- return 0;
-}
-
-/**
- * ima_free_kexec_buffer - free memory used by the IMA buffer
- */
-int ima_free_kexec_buffer(void)
-{
- int ret;
- unsigned long addr;
- size_t size;
- struct property *prop;
-
- prop = of_find_property(of_chosen, "linux,ima-kexec-buffer", NULL);
- if (!prop)
- return -ENOENT;
-
- ret = do_get_kexec_buffer(prop->value, prop->length, &addr, &size);
- if (ret)
- return ret;
-
- ret = of_remove_property(of_chosen, prop);
- if (ret)
- return ret;
-
- return memblock_free(addr, size);
-
-}
-
-/**
- * remove_ima_buffer - remove the IMA buffer property and reservation from @fdt
- *
- * The IMA measurement buffer is of no use to a subsequent kernel, so we always
- * remove it from the device tree.
- */
-void remove_ima_buffer(void *fdt, int chosen_node)
-{
- int ret, len;
- unsigned long addr;
- size_t size;
- const void *prop;
-
- prop = fdt_getprop(fdt, chosen_node, "linux,ima-kexec-buffer", &len);
- if (!prop)
- return;
-
- ret = do_get_kexec_buffer(prop, len, &addr, &size);
- fdt_delprop(fdt, chosen_node, "linux,ima-kexec-buffer");
- if (ret)
- return;
-
- ret = delete_fdt_mem_rsv(fdt, addr, size);
- if (!ret)
- pr_debug("Removed old IMA buffer reservation.\n");
-}
-
-#ifdef CONFIG_IMA_KEXEC
-/**
- * arch_ima_add_kexec_buffer - do arch-specific steps to add the IMA buffer
- *
- * Architectures should use this function to pass on the IMA buffer
- * information to the next kernel.
- *
- * Return: 0 on success, negative errno on error.
- */
-int arch_ima_add_kexec_buffer(struct kimage *image, unsigned long load_addr,
- size_t size)
-{
- image->arch.ima_buffer_addr = load_addr;
- image->arch.ima_buffer_size = size;
-
- return 0;
-}
-
-static int write_number(void *p, u64 value, int cells)
-{
- if (cells == 1) {
- u32 tmp;
-
- if (value > U32_MAX)
- return -EINVAL;
-
- tmp = cpu_to_be32(value);
- memcpy(p, &tmp, sizeof(tmp));
- } else if (cells == 2) {
- u64 tmp;
-
- tmp = cpu_to_be64(value);
- memcpy(p, &tmp, sizeof(tmp));
- } else
- return -EINVAL;
-
- return 0;
-}
-
-/**
- * setup_ima_buffer - add IMA buffer information to the fdt
- * @image: kexec image being loaded.
- * @fdt: Flattened device tree for the next kernel.
- * @chosen_node: Offset to the chosen node.
- *
- * Return: 0 on success, or negative errno on error.
- */
-int setup_ima_buffer(const struct kimage *image, void *fdt, int chosen_node)
-{
- int ret, addr_cells, size_cells, entry_size;
- u8 value[16];
-
- remove_ima_buffer(fdt, chosen_node);
- if (!image->arch.ima_buffer_size)
- return 0;
-
- ret = get_addr_size_cells(&addr_cells, &size_cells);
- if (ret)
- return ret;
-
- entry_size = 4 * (addr_cells + size_cells);
-
- if (entry_size > sizeof(value))
- return -EINVAL;
-
- ret = write_number(value, image->arch.ima_buffer_addr, addr_cells);
- if (ret)
- return ret;
-
- ret = write_number(value + 4 * addr_cells, image->arch.ima_buffer_size,
- size_cells);
- if (ret)
- return ret;
-
- ret = fdt_setprop(fdt, chosen_node, "linux,ima-kexec-buffer", value,
- entry_size);
- if (ret < 0)
- return -EINVAL;
-
- ret = fdt_add_mem_rsv(fdt, image->arch.ima_buffer_addr,
- image->arch.ima_buffer_size);
- if (ret)
- return -EINVAL;
-
- pr_debug("IMA buffer at 0x%llx, size = 0x%zx\n",
- image->arch.ima_buffer_addr, image->arch.ima_buffer_size);
-
- return 0;
-}
-#endif /* CONFIG_IMA_KEXEC */
diff --git a/arch/powerpc/kexec/ranges.c b/arch/powerpc/kexec/ranges.c
new file mode 100644
index 000000000000..563e9989a5bf
--- /dev/null
+++ b/arch/powerpc/kexec/ranges.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * powerpc code to implement the kexec_file_load syscall
+ *
+ * Copyright (C) 2004 Adam Litke (agl@us.ibm.com)
+ * Copyright (C) 2004 IBM Corp.
+ * Copyright (C) 2004,2005 Milton D Miller II, IBM Corporation
+ * Copyright (C) 2005 R Sharada (sharada@in.ibm.com)
+ * Copyright (C) 2006 Mohan Kumar M (mohan@in.ibm.com)
+ * Copyright (C) 2020 IBM Corporation
+ *
+ * Based on kexec-tools' kexec-ppc64.c, fs2dt.c.
+ * Heavily modified for the kernel by
+ * Hari Bathini, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) "kexec ranges: " fmt
+
+#include <linux/sort.h>
+#include <linux/kexec.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <asm/sections.h>
+#include <asm/kexec_ranges.h>
+
+/**
+ * get_max_nr_ranges - Get the max no. of ranges crash_mem structure
+ * could hold, given the size allocated for it.
+ * @size: Allocation size of crash_mem structure.
+ *
+ * Returns the maximum no. of ranges.
+ */
+static inline unsigned int get_max_nr_ranges(size_t size)
+{
+ return ((size - sizeof(struct crash_mem)) /
+ sizeof(struct crash_mem_range));
+}
+
+/**
+ * get_mem_rngs_size - Get the allocated size of mem_rngs based on
+ * max_nr_ranges and chunk size.
+ * @mem_rngs: Memory ranges.
+ *
+ * Returns the maximum size of @mem_rngs.
+ */
+static inline size_t get_mem_rngs_size(struct crash_mem *mem_rngs)
+{
+ size_t size;
+
+ if (!mem_rngs)
+ return 0;
+
+ size = (sizeof(struct crash_mem) +
+ (mem_rngs->max_nr_ranges * sizeof(struct crash_mem_range)));
+
+ /*
+ * Memory is allocated in size multiple of MEM_RANGE_CHUNK_SZ.
+ * So, align to get the actual length.
+ */
+ return ALIGN(size, MEM_RANGE_CHUNK_SZ);
+}
+
+/**
+ * __add_mem_range - add a memory range to memory ranges list.
+ * @mem_ranges: Range list to add the memory range to.
+ * @base: Base address of the range to add.
+ * @size: Size of the memory range to add.
+ *
+ * (Re)allocates memory, if needed.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int __add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
+{
+ struct crash_mem *mem_rngs = *mem_ranges;
+
+ if (!mem_rngs || (mem_rngs->nr_ranges == mem_rngs->max_nr_ranges)) {
+ mem_rngs = realloc_mem_ranges(mem_ranges);
+ if (!mem_rngs)
+ return -ENOMEM;
+ }
+
+ mem_rngs->ranges[mem_rngs->nr_ranges].start = base;
+ mem_rngs->ranges[mem_rngs->nr_ranges].end = base + size - 1;
+ pr_debug("Added memory range [%#016llx - %#016llx] at index %d\n",
+ base, base + size - 1, mem_rngs->nr_ranges);
+ mem_rngs->nr_ranges++;
+ return 0;
+}
+
+/**
+ * __merge_memory_ranges - Merges the given memory ranges list.
+ * @mem_rngs: Range list to merge.
+ *
+ * Assumes a sorted range list.
+ *
+ * Returns nothing.
+ */
+static void __merge_memory_ranges(struct crash_mem *mem_rngs)
+{
+ struct crash_mem_range *ranges;
+ int i, idx;
+
+ if (!mem_rngs)
+ return;
+
+ idx = 0;
+ ranges = &(mem_rngs->ranges[0]);
+ for (i = 1; i < mem_rngs->nr_ranges; i++) {
+ if (ranges[i].start <= (ranges[i-1].end + 1))
+ ranges[idx].end = ranges[i].end;
+ else {
+ idx++;
+ if (i == idx)
+ continue;
+
+ ranges[idx] = ranges[i];
+ }
+ }
+ mem_rngs->nr_ranges = idx + 1;
+}
+
+/* cmp_func_t callback to sort ranges with sort() */
+static int rngcmp(const void *_x, const void *_y)
+{
+ const struct crash_mem_range *x = _x, *y = _y;
+
+ if (x->start > y->start)
+ return 1;
+ if (x->start < y->start)
+ return -1;
+ return 0;
+}
+
+/**
+ * sort_memory_ranges - Sorts the given memory ranges list.
+ * @mem_rngs: Range list to sort.
+ * @merge: If true, merge the list after sorting.
+ *
+ * Returns nothing.
+ */
+void sort_memory_ranges(struct crash_mem *mem_rngs, bool merge)
+{
+ int i;
+
+ if (!mem_rngs)
+ return;
+
+ /* Sort the ranges in-place */
+ sort(&(mem_rngs->ranges[0]), mem_rngs->nr_ranges,
+ sizeof(mem_rngs->ranges[0]), rngcmp, NULL);
+
+ if (merge)
+ __merge_memory_ranges(mem_rngs);
+
+ /* For debugging purpose */
+ pr_debug("Memory ranges:\n");
+ for (i = 0; i < mem_rngs->nr_ranges; i++) {
+ pr_debug("\t[%03d][%#016llx - %#016llx]\n", i,
+ mem_rngs->ranges[i].start,
+ mem_rngs->ranges[i].end);
+ }
+}
+
+/**
+ * realloc_mem_ranges - reallocate mem_ranges with size incremented
+ * by MEM_RANGE_CHUNK_SZ. Frees up the old memory,
+ * if memory allocation fails.
+ * @mem_ranges: Memory ranges to reallocate.
+ *
+ * Returns pointer to reallocated memory on success, NULL otherwise.
+ */
+struct crash_mem *realloc_mem_ranges(struct crash_mem **mem_ranges)
+{
+ struct crash_mem *mem_rngs = *mem_ranges;
+ unsigned int nr_ranges;
+ size_t size;
+
+ size = get_mem_rngs_size(mem_rngs);
+ nr_ranges = mem_rngs ? mem_rngs->nr_ranges : 0;
+
+ size += MEM_RANGE_CHUNK_SZ;
+ mem_rngs = krealloc(*mem_ranges, size, GFP_KERNEL);
+ if (!mem_rngs) {
+ kfree(*mem_ranges);
+ *mem_ranges = NULL;
+ return NULL;
+ }
+
+ mem_rngs->nr_ranges = nr_ranges;
+ mem_rngs->max_nr_ranges = get_max_nr_ranges(size);
+ *mem_ranges = mem_rngs;
+
+ return mem_rngs;
+}
+
+/**
+ * add_mem_range - Updates existing memory range, if there is an overlap.
+ * Else, adds a new memory range.
+ * @mem_ranges: Range list to add the memory range to.
+ * @base: Base address of the range to add.
+ * @size: Size of the memory range to add.
+ *
+ * (Re)allocates memory, if needed.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
+{
+ struct crash_mem *mem_rngs = *mem_ranges;
+ u64 mstart, mend, end;
+ unsigned int i;
+
+ if (!size)
+ return 0;
+
+ end = base + size - 1;
+
+ if (!mem_rngs || !(mem_rngs->nr_ranges))
+ return __add_mem_range(mem_ranges, base, size);
+
+ for (i = 0; i < mem_rngs->nr_ranges; i++) {
+ mstart = mem_rngs->ranges[i].start;
+ mend = mem_rngs->ranges[i].end;
+ if (base < mend && end > mstart) {
+ if (base < mstart)
+ mem_rngs->ranges[i].start = base;
+ if (end > mend)
+ mem_rngs->ranges[i].end = end;
+ return 0;
+ }
+ }
+
+ return __add_mem_range(mem_ranges, base, size);
+}
+
+/**
+ * add_tce_mem_ranges - Adds tce-table range to the given memory ranges list.
+ * @mem_ranges: Range list to add the memory range(s) to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int add_tce_mem_ranges(struct crash_mem **mem_ranges)
+{
+ struct device_node *dn = NULL;
+ int ret = 0;
+
+ for_each_node_by_type(dn, "pci") {
+ u64 base;
+ u32 size;
+
+ ret = of_property_read_u64(dn, "linux,tce-base", &base);
+ ret |= of_property_read_u32(dn, "linux,tce-size", &size);
+ if (ret) {
+ /*
+ * It is ok to have pci nodes without tce. So, ignore
+ * property does not exist error.
+ */
+ if (ret == -EINVAL) {
+ ret = 0;
+ continue;
+ }
+ break;
+ }
+
+ ret = add_mem_range(mem_ranges, base, size);
+ if (ret)
+ break;
+ }
+
+ of_node_put(dn);
+ return ret;
+}
+
+/**
+ * add_initrd_mem_range - Adds initrd range to the given memory ranges list,
+ * if the initrd was retained.
+ * @mem_ranges: Range list to add the memory range to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int add_initrd_mem_range(struct crash_mem **mem_ranges)
+{
+ u64 base, end;
+ int ret;
+
+ /* This range means something, only if initrd was retained */
+ if (!strstr(saved_command_line, "retain_initrd"))
+ return 0;
+
+ ret = of_property_read_u64(of_chosen, "linux,initrd-start", &base);
+ ret |= of_property_read_u64(of_chosen, "linux,initrd-end", &end);
+ if (!ret)
+ ret = add_mem_range(mem_ranges, base, end - base + 1);
+
+ return ret;
+}
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+/**
+ * add_htab_mem_range - Adds htab range to the given memory ranges list,
+ * if it exists
+ * @mem_ranges: Range list to add the memory range to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int add_htab_mem_range(struct crash_mem **mem_ranges)
+{
+ if (!htab_address)
+ return 0;
+
+ return add_mem_range(mem_ranges, __pa(htab_address), htab_size_bytes);
+}
+#endif
+
+/**
+ * add_kernel_mem_range - Adds kernel text region to the given
+ * memory ranges list.
+ * @mem_ranges: Range list to add the memory range to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int add_kernel_mem_range(struct crash_mem **mem_ranges)
+{
+ return add_mem_range(mem_ranges, 0, __pa(_end));
+}
+
+/**
+ * add_rtas_mem_range - Adds RTAS region to the given memory ranges list.
+ * @mem_ranges: Range list to add the memory range to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int add_rtas_mem_range(struct crash_mem **mem_ranges)
+{
+ struct device_node *dn;
+ u32 base, size;
+ int ret = 0;
+
+ dn = of_find_node_by_path("/rtas");
+ if (!dn)
+ return 0;
+
+ ret = of_property_read_u32(dn, "linux,rtas-base", &base);
+ ret |= of_property_read_u32(dn, "rtas-size", &size);
+ if (!ret)
+ ret = add_mem_range(mem_ranges, base, size);
+
+ of_node_put(dn);
+ return ret;
+}
+
+/**
+ * add_opal_mem_range - Adds OPAL region to the given memory ranges list.
+ * @mem_ranges: Range list to add the memory range to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int add_opal_mem_range(struct crash_mem **mem_ranges)
+{
+ struct device_node *dn;
+ u64 base, size;
+ int ret;
+
+ dn = of_find_node_by_path("/ibm,opal");
+ if (!dn)
+ return 0;
+
+ ret = of_property_read_u64(dn, "opal-base-address", &base);
+ ret |= of_property_read_u64(dn, "opal-runtime-size", &size);
+ if (!ret)
+ ret = add_mem_range(mem_ranges, base, size);
+
+ of_node_put(dn);
+ return ret;
+}
+
+/**
+ * add_reserved_mem_ranges - Adds "/reserved-ranges" regions exported by f/w
+ * to the given memory ranges list.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
+{
+ int n_mem_addr_cells, n_mem_size_cells, i, len, cells, ret = 0;
+ const __be32 *prop;
+
+ prop = of_get_property(of_root, "reserved-ranges", &len);
+ if (!prop)
+ return 0;
+
+ n_mem_addr_cells = of_n_addr_cells(of_root);
+ n_mem_size_cells = of_n_size_cells(of_root);
+ cells = n_mem_addr_cells + n_mem_size_cells;
+
+ /* Each reserved range is an (address,size) pair */
+ for (i = 0; i < (len / (sizeof(u32) * cells)); i++) {
+ u64 base, size;
+
+ base = of_read_number(prop + (i * cells), n_mem_addr_cells);
+ size = of_read_number(prop + (i * cells) + n_mem_addr_cells,
+ n_mem_size_cells);
+
+ ret = add_mem_range(mem_ranges, base, size);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S
index 61946c19e07c..d9f0dd9b34ff 100644
--- a/arch/powerpc/kexec/relocate_32.S
+++ b/arch/powerpc/kexec/relocate_32.S
@@ -25,14 +25,14 @@ relocate_new_kernel:
/* r4 = reboot_code_buffer */
/* r5 = start_address */
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_85xx
mr r29, r3
mr r30, r4
mr r31, r5
#define ENTRY_MAPPING_KEXEC_SETUP
-#include <kernel/fsl_booke_entry_mapping.S>
+#include <kernel/85xx_entry_mapping.S>
#undef ENTRY_MAPPING_KEXEC_SETUP
mr r3, r29
@@ -93,7 +93,7 @@ wmmucr:
* Invalidate all the TLB entries except the current entry
* where we are running from
*/
- bl 0f /* Find our address */
+ bcl 20,31,$+4 /* Find our address */
0: mflr r5 /* Make it accessible */
tlbsx r23,0,r5 /* Find entry we are in */
li r4,0 /* Start at TLB entry 0 */
@@ -158,7 +158,7 @@ write_out:
/* Switch to other address space in MSR */
insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
- bl 1f
+ bcl 20,31,$+4
1: mflr r8
addi r8, r8, (2f-1b) /* Find the target offset */
@@ -202,7 +202,7 @@ next_tlb:
li r9,0
insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
- bl 1f
+ bcl 20,31,$+4
1: mflr r8
and r8, r8, r11 /* Get our offset within page */
addi r8, r8, (2f-1b)
@@ -240,7 +240,7 @@ setup_map_47x:
sync
/* Find the entry we are running from */
- bl 2f
+ bcl 20,31,$+4
2: mflr r23
tlbsx r23, 0, r23
tlbre r24, r23, 0 /* TLB Word 0 */
@@ -296,7 +296,7 @@ clear_utlb_entry:
/* Update the msr to the new TS */
insrwi r5, r7, 1, 26
- bl 1f
+ bcl 20,31,$+4
1: mflr r6
addi r6, r6, (2f-1b)
@@ -355,7 +355,7 @@ write_utlb:
/* Defaults to 256M */
lis r10, 0x1000
- bl 1f
+ bcl 20,31,$+4
1: mflr r4
addi r4, r4, (2f-1b) /* virtual address of 2f */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 711fca9bc6f0..a9f57dad6d91 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -7,7 +7,7 @@ source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
- ---help---
+ help
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
This option alone does not add any kernel code.
@@ -26,6 +26,7 @@ config KVM
select KVM_VFIO
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
+ select INTERVAL_TREE
config KVM_BOOK3S_HANDLER
bool
@@ -38,7 +39,6 @@ config KVM_BOOK3S_32_HANDLER
config KVM_BOOK3S_64_HANDLER
bool
select KVM_BOOK3S_HANDLER
- select PPC_DAWR_FORCE_ENABLE
config KVM_BOOK3S_PR_POSSIBLE
bool
@@ -51,10 +51,12 @@ config KVM_BOOK3S_HV_POSSIBLE
config KVM_BOOK3S_32
tristate "KVM support for PowerPC book3s_32 processors"
depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
+ depends on !CONTEXT_TRACKING_USER
select KVM
select KVM_BOOK3S_32_HANDLER
select KVM_BOOK3S_PR_POSSIBLE
- ---help---
+ select PPC_FPU
+ help
Support running unmodified book3s_32 guest kernels
in virtual machines on book3s_32 host processors.
@@ -69,8 +71,9 @@ config KVM_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
+ select PPC_64S_HASH_MMU
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV)
- ---help---
+ help
Support running unmodified book3s_64 and book3s_32 guest kernels
in virtual machines on book3s_64 host processors.
@@ -85,7 +88,7 @@ config KVM_BOOK3S_64_HV
select KVM_BOOK3S_HV_POSSIBLE
select MMU_NOTIFIER
select CMA
- ---help---
+ help
Support running unmodified book3s_64 guest kernels in
virtual machines on POWER7 and newer processors that have
hypervisor mode available to the host.
@@ -103,23 +106,50 @@ config KVM_BOOK3S_64_HV
config KVM_BOOK3S_64_PR
tristate "KVM support without using hypervisor mode in host"
depends on KVM_BOOK3S_64
+ depends on !CONTEXT_TRACKING_USER
select KVM_BOOK3S_PR_POSSIBLE
- ---help---
+ help
Support running guest kernels in virtual machines on processors
without using hypervisor mode in the host, by running the
guest in user mode (problem state) and emulating all
privileged instructions and registers.
+ This is only available for hash MMU mode and only supports
+ guests that use hash MMU mode.
+
This is not as fast as using hypervisor mode, but works on
machines where hypervisor mode is not available or not usable,
and can emulate processors that are different from the host
processor, including emulating 32-bit processors on a 64-bit
host.
+ Selecting this option will cause the SCV facility to be
+ disabled when the kernel is booted on the pseries platform in
+ hash MMU mode (regardless of PR VMs running). When any PR VMs
+ are running, "AIL" mode is disabled which may slow interrupts
+ and system calls on the host.
+
config KVM_BOOK3S_HV_EXIT_TIMING
- bool "Detailed timing for hypervisor real-mode code"
+ bool
+
+config KVM_BOOK3S_HV_P9_TIMING
+ bool "Detailed timing for the P9 entry point"
+ select KVM_BOOK3S_HV_EXIT_TIMING
depends on KVM_BOOK3S_HV_POSSIBLE && DEBUG_FS
- ---help---
+ help
+ Calculate time taken for each vcpu during vcpu entry and
+ exit, time spent inside the guest and time spent handling
+ hypercalls and page faults. The total, minimum and maximum
+ times in nanoseconds together with the number of executions
+ are reported in debugfs in kvm/vm#/vcpu#/timings.
+
+ If unsure, say N.
+
+config KVM_BOOK3S_HV_P8_TIMING
+ bool "Detailed timing for hypervisor real-mode code (for POWER8)"
+ select KVM_BOOK3S_HV_EXIT_TIMING
+ depends on KVM_BOOK3S_HV_POSSIBLE && DEBUG_FS && !KVM_BOOK3S_HV_P9_TIMING
+ help
Calculate time taken for each vcpu in the real-mode guest entry,
exit, and interrupt handling code, plus time spent in the guest
and in nap mode due to idle (cede) while other threads are still
@@ -130,13 +160,28 @@ config KVM_BOOK3S_HV_EXIT_TIMING
If unsure, say N.
+config KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND
+ bool "Nested L0 host workaround for L1 KVM host PMU handling bug" if EXPERT
+ depends on KVM_BOOK3S_HV_POSSIBLE
+ default !EXPERT
+ help
+ Old nested HV capable Linux guests have a bug where they don't
+ reflect the PMU in-use status of their L2 guest to the L0 host
+ while the L2 PMU registers are live. This can result in loss
+ of L2 PMU register state, causing perf to not work correctly in
+ L2 guests.
+
+ Selecting this option for the L0 host implements a workaround for
+ those buggy L1s which saves the L2 state, at the cost of performance
+ in all nested-capable guest entry/exit.
+
config KVM_BOOKE_HV
bool
config KVM_EXIT_TIMING
bool "Detailed exit timing"
depends on KVM_E500V2 || KVM_E500MC
- ---help---
+ help
Calculate elapsed time for every exit/enter cycle. A per-vcpu
report is available in debugfs kvm/vm#_vcpu#_timing.
The overhead is relatively small, however it is not recommended for
@@ -146,11 +191,12 @@ config KVM_EXIT_TIMING
config KVM_E500V2
bool "KVM support for PowerPC E500v2 processors"
- depends on E500 && !PPC_E500MC
+ depends on PPC_E500 && !PPC_E500MC
+ depends on !CONTEXT_TRACKING_USER
select KVM
select KVM_MMIO
select MMU_NOTIFIER
- ---help---
+ help
Support running unmodified E500 guest kernels in virtual machines on
E500v2 host processors.
@@ -162,11 +208,12 @@ config KVM_E500V2
config KVM_E500MC
bool "KVM support for PowerPC E500MC/E5500/E6500 processors"
depends on PPC_E500MC
+ depends on !CONTEXT_TRACKING_USER
select KVM
select KVM_MMIO
select KVM_BOOKE_HV
select MMU_NOTIFIER
- ---help---
+ help
Support running unmodified E500MC/E5500/E6500 guest kernels in
virtual machines on E500MC/E5500/E6500 host processors.
@@ -177,7 +224,7 @@ config KVM_E500MC
config KVM_MPIC
bool "KVM in-kernel MPIC emulation"
- depends on KVM && E500
+ depends on KVM && PPC_E500
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
select HAVE_KVM_IRQ_ROUTING
@@ -194,7 +241,7 @@ config KVM_XICS
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
default y
- ---help---
+ help
Include support for the XICS (eXternal Interrupt Controller
Specification) interrupt controller architecture used on
IBM POWER (pSeries) servers.
@@ -204,6 +251,4 @@ config KVM_XIVE
default y
depends on KVM_XICS && PPC_XIVE_NATIVE && KVM_BOOK3S_HV_POSSIBLE
-source "drivers/vhost/Kconfig"
-
endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 2bfeaa13befb..5319d889b184 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -4,11 +4,8 @@
#
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
-KVM := ../../../virt/kvm
-common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o
-common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
-common-objs-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o
+include $(srctree)/virt/kvm/Makefile.kvm
common-objs-y += powerpc.o emulate_loadstore.o
obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
@@ -40,9 +37,6 @@ kvm-e500mc-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
-kvm-book3s_64-builtin-objs-$(CONFIG_SPAPR_TCE_IOMMU) := \
- book3s_64_vio_hv.o
-
kvm-pr-y := \
fpu.o \
emulate.o \
@@ -57,6 +51,7 @@ kvm-pr-y := \
book3s_32_mmu.o
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
+ book3s_64_entry.o \
tm.o
ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -78,7 +73,7 @@ kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
book3s_hv_tm.o
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
- book3s_hv_rm_xics.o book3s_hv_rm_xive.o
+ book3s_hv_rm_xics.o
kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
book3s_hv_tm_builtin.o
@@ -86,10 +81,12 @@ kvm-book3s_64-builtin-tm-objs-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_hmi.o \
+ book3s_hv_p9_entry.o \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
book3s_hv_ras.o \
book3s_hv_builtin.o \
+ book3s_hv_p9_perf.o \
$(kvm-book3s_64-builtin-tm-objs-y) \
$(kvm-book3s_64-builtin-xics-objs-y)
endif
@@ -123,9 +120,8 @@ kvm-book3s_32-objs := \
kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
kvm-objs-$(CONFIG_KVM_MPIC) += mpic.o
-kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
-kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
+kvm-y += $(kvm-objs-m) $(kvm-objs-y)
obj-$(CONFIG_KVM_E500V2) += kvm.o
obj-$(CONFIG_KVM_E500MC) += kvm.o
@@ -136,3 +132,8 @@ obj-$(CONFIG_KVM_BOOK3S_64_PR) += kvm-pr.o
obj-$(CONFIG_KVM_BOOK3S_64_HV) += kvm-hv.o
obj-y += $(kvm-book3s_64-builtin-objs-y)
+
+# KVM does a lot in real-mode, and 64-bit Book3S KASAN doesn't support that
+ifdef CONFIG_PPC_BOOK3S_64
+KASAN_SANITIZE := n
+endif
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index d07a8e12fa15..6d525285dbe8 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -36,42 +36,63 @@
#include "book3s.h"
#include "trace.h"
-#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
-#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
-
/* #define EXIT_DEBUG */
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- { "exits", VCPU_STAT(sum_exits) },
- { "mmio", VCPU_STAT(mmio_exits) },
- { "sig", VCPU_STAT(signal_exits) },
- { "sysc", VCPU_STAT(syscall_exits) },
- { "inst_emu", VCPU_STAT(emulated_inst_exits) },
- { "dec", VCPU_STAT(dec_exits) },
- { "ext_intr", VCPU_STAT(ext_intr_exits) },
- { "queue_intr", VCPU_STAT(queue_intr) },
- { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) },
- { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) },
- { "halt_wait_ns", VCPU_STAT(halt_wait_ns) },
- { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
- { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
- { "halt_successful_wait", VCPU_STAT(halt_successful_wait) },
- { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
- { "halt_wakeup", VCPU_STAT(halt_wakeup) },
- { "pf_storage", VCPU_STAT(pf_storage) },
- { "sp_storage", VCPU_STAT(sp_storage) },
- { "pf_instruc", VCPU_STAT(pf_instruc) },
- { "sp_instruc", VCPU_STAT(sp_instruc) },
- { "ld", VCPU_STAT(ld) },
- { "ld_slow", VCPU_STAT(ld_slow) },
- { "st", VCPU_STAT(st) },
- { "st_slow", VCPU_STAT(st_slow) },
- { "pthru_all", VCPU_STAT(pthru_all) },
- { "pthru_host", VCPU_STAT(pthru_host) },
- { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
- { "largepages_2M", VM_STAT(num_2M_pages, .mode = 0444) },
- { "largepages_1G", VM_STAT(num_1G_pages, .mode = 0444) },
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_ICOUNTER(VM, num_2M_pages),
+ STATS_DESC_ICOUNTER(VM, num_1G_pages)
+};
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, sum_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, light_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, isi_exits),
+ STATS_DESC_COUNTER(VCPU, dsi_exits),
+ STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
+ STATS_DESC_COUNTER(VCPU, dec_exits),
+ STATS_DESC_COUNTER(VCPU, ext_intr_exits),
+ STATS_DESC_COUNTER(VCPU, halt_successful_wait),
+ STATS_DESC_COUNTER(VCPU, dbell_exits),
+ STATS_DESC_COUNTER(VCPU, gdbell_exits),
+ STATS_DESC_COUNTER(VCPU, ld),
+ STATS_DESC_COUNTER(VCPU, st),
+ STATS_DESC_COUNTER(VCPU, pf_storage),
+ STATS_DESC_COUNTER(VCPU, pf_instruc),
+ STATS_DESC_COUNTER(VCPU, sp_storage),
+ STATS_DESC_COUNTER(VCPU, sp_instruc),
+ STATS_DESC_COUNTER(VCPU, queue_intr),
+ STATS_DESC_COUNTER(VCPU, ld_slow),
+ STATS_DESC_COUNTER(VCPU, st_slow),
+ STATS_DESC_COUNTER(VCPU, pthru_all),
+ STATS_DESC_COUNTER(VCPU, pthru_host),
+ STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
+};
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
@@ -174,6 +195,12 @@ void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu, ulong flags)
}
EXPORT_SYMBOL_GPL(kvmppc_core_queue_machine_check);
+void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu)
+{
+ kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_SYSCALL, 0);
+}
+EXPORT_SYMBOL(kvmppc_core_queue_syscall);
+
void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
/* might as well deliver this straight away */
@@ -561,12 +588,12 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
@@ -758,9 +785,9 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
}
EXPORT_SYMBOL_GPL(kvmppc_set_msr);
-int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{
- return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
+ return vcpu->kvm->arch.kvm_ops->vcpu_run(vcpu);
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
@@ -799,21 +826,19 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
}
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
- return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
+
}
-void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
- kvm->arch.kvm_ops->free_memslot(free, dont);
+ return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
}
-int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
+void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- return kvm->arch.kvm_ops->create_memslot(slot, npages);
+ kvm->arch.kvm_ops->free_memslot(slot);
}
void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
@@ -822,45 +847,39 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
}
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
- return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
+ return kvm->arch.kvm_ops->prepare_memory_region(kvm, old, new, change);
}
void kvmppc_core_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
-}
-
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
-{
- return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
+ kvm->arch.kvm_ops->commit_memory_region(kvm, old, new, change);
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm->arch.kvm_ops->age_hva(kvm, start, end);
+ return kvm->arch.kvm_ops->unmap_gfn_range(kvm, range);
}
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
+ return kvm->arch.kvm_ops->age_gfn(kvm, range);
}
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
- return 0;
+ return kvm->arch.kvm_ops->test_age_gfn(kvm, range);
}
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
+ return kvm->arch.kvm_ops->set_spte_gfn(kvm, range);
}
int kvmppc_core_init_vm(struct kvm *kvm)
@@ -886,13 +905,15 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
#ifdef CONFIG_KVM_XICS
/*
- * Free the XIVE devices which are not directly freed by the
+ * Free the XIVE and XICS devices which are not directly freed by the
* device 'release' method
*/
kfree(kvm->arch.xive_devices.native);
kvm->arch.xive_devices.native = NULL;
kfree(kvm->arch.xive_devices.xics_on_xive);
kvm->arch.xive_devices.xics_on_xive = NULL;
+ kfree(kvm->arch.xics_device);
+ kvm->arch.xics_device = NULL;
#endif /* CONFIG_KVM_XICS */
}
@@ -1051,13 +1072,10 @@ static int kvmppc_book3s_init(void)
#ifdef CONFIG_KVM_XICS
#ifdef CONFIG_KVM_XIVE
if (xics_on_xive()) {
- kvmppc_xive_init_module();
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
- if (kvmppc_xive_native_supported()) {
- kvmppc_xive_native_init_module();
+ if (kvmppc_xive_native_supported())
kvm_register_device_ops(&kvm_xive_native_ops,
KVM_DEV_TYPE_XIVE);
- }
} else
#endif
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
@@ -1067,12 +1085,6 @@ static int kvmppc_book3s_init(void)
static void kvmppc_book3s_exit(void)
{
-#ifdef CONFIG_KVM_XICS
- if (xics_on_xive()) {
- kvmppc_xive_exit_module();
- kvmppc_xive_native_exit_module();
- }
-#endif
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
kvmppc_book3s_exit_pr();
#endif
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
index 3a4613985949..58391b4b32ed 100644
--- a/arch/powerpc/kvm/book3s.h
+++ b/arch/powerpc/kvm/book3s.h
@@ -9,22 +9,22 @@
extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
struct kvm_memory_slot *memslot);
-extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
- unsigned long end);
-extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start,
- unsigned long end);
-extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
-extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
+extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range);
+extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
+extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
+extern bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range);
+extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
-extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+extern int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu,
int sprn, ulong spr_val);
extern int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu,
int sprn, ulong *spr_val);
extern int kvmppc_book3s_init_pr(void);
-extern void kvmppc_book3s_exit_pr(void);
+void kvmppc_book3s_exit_pr(void);
+extern int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val);
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index f21e73492ce3..0215f32932a9 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -234,7 +234,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
case 2:
case 6:
pte->may_write = true;
- /* fall through */
+ fallthrough;
case 3:
case 5:
case 7:
@@ -337,7 +337,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *v;
/* flush this VA on all cpus */
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index d4cb3bcf41b6..4b3a8d80cfa3 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -353,10 +353,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
preempt_enable();
}
-/* From mm/mmu_context_hash32.c */
-#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
-
-int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
+int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int err;
diff --git a/arch/powerpc/kvm/book3s_32_sr.S b/arch/powerpc/kvm/book3s_32_sr.S
index e3ab9df6cf19..6cfcd20d4668 100644
--- a/arch/powerpc/kvm/book3s_32_sr.S
+++ b/arch/powerpc/kvm/book3s_32_sr.S
@@ -122,11 +122,27 @@
/* 0x0 - 0xb */
- /* 'current->mm' needs to be in r4 */
- tophys(r4, r2)
- lwz r4, MM(r4)
- tophys(r4, r4)
- /* This only clobbers r0, r3, r4 and r5 */
+ /* switch_mmu_context() needs paging, let's enable it */
+ mfmsr r9
+ ori r11, r9, MSR_DR
+ mtmsr r11
+ sync
+
+ /* switch_mmu_context() clobbers r12, rescue it */
+ SAVE_GPR(12, r1)
+
+ /* Calling switch_mmu_context(<inv>, current->mm, <inv>); */
+ lwz r4, MM(r2)
bl switch_mmu_context
+ /* restore r12 */
+ REST_GPR(12, r1)
+
+ /* Disable paging again */
+ mfmsr r9
+ li r6, MSR_DR
+ andc r9, r9, r6
+ mtmsr r9
+ sync
+
.endm
diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S
new file mode 100644
index 000000000000..6c2b1d17cb63
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_entry.S
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <asm/asm-offsets.h>
+#include <asm/cache.h>
+#include <asm/code-patching-asm.h>
+#include <asm/exception-64s.h>
+#include <asm/export.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_book3s_asm.h>
+#include <asm/mmu.h>
+#include <asm/ppc_asm.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+#include <asm/ultravisor-api.h>
+
+/*
+ * These are branched to from interrupt handlers in exception-64s.S which set
+ * IKVM_REAL or IKVM_VIRT, if HSTATE_IN_GUEST was found to be non-zero.
+ */
+
+/*
+ * This is a hcall, so register convention is as
+ * Documentation/powerpc/papr_hcalls.rst.
+ *
+ * This may also be a syscall from PR-KVM userspace that is to be
+ * reflected to the PR guest kernel, so registers may be set up for
+ * a system call rather than hcall. We don't currently clobber
+ * anything here, but the 0xc00 handler has already clobbered CTR
+ * and CR0, so PR-KVM can not support a guest kernel that preserves
+ * those registers across its system calls.
+ *
+ * The state of registers is as kvmppc_interrupt, except CFAR is not
+ * saved, R13 is not in SCRATCH0, and R10 does not contain the trap.
+ */
+.global kvmppc_hcall
+.balign IFETCH_ALIGN_BYTES
+kvmppc_hcall:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ lbz r10,HSTATE_IN_GUEST(r13)
+ cmpwi r10,KVM_GUEST_MODE_HV_P9
+ beq kvmppc_p9_exit_hcall
+#endif
+ ld r10,PACA_EXGEN+EX_R13(r13)
+ SET_SCRATCH0(r10)
+ li r10,0xc00
+ /* Now we look like kvmppc_interrupt */
+ li r11,PACA_EXGEN
+ b .Lgot_save_area
+
+/*
+ * KVM interrupt entry occurs after GEN_INT_ENTRY runs, and follows that
+ * call convention:
+ *
+ * guest R9-R13, CTR, CFAR, PPR saved in PACA EX_xxx save area
+ * guest (H)DAR, (H)DSISR are also in the save area for relevant interrupts
+ * guest R13 also saved in SCRATCH0
+ * R13 = PACA
+ * R11 = (H)SRR0
+ * R12 = (H)SRR1
+ * R9 = guest CR
+ * PPR is set to medium
+ *
+ * With the addition for KVM:
+ * R10 = trap vector
+ */
+.global kvmppc_interrupt
+.balign IFETCH_ALIGN_BYTES
+kvmppc_interrupt:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ std r10,HSTATE_SCRATCH0(r13)
+ lbz r10,HSTATE_IN_GUEST(r13)
+ cmpwi r10,KVM_GUEST_MODE_HV_P9
+ beq kvmppc_p9_exit_interrupt
+ ld r10,HSTATE_SCRATCH0(r13)
+#endif
+ li r11,PACA_EXGEN
+ cmpdi r10,0x200
+ bgt+ .Lgot_save_area
+ li r11,PACA_EXMC
+ beq .Lgot_save_area
+ li r11,PACA_EXNMI
+.Lgot_save_area:
+ add r11,r11,r13
+BEGIN_FTR_SECTION
+ ld r12,EX_CFAR(r11)
+ std r12,HSTATE_CFAR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ ld r12,EX_CTR(r11)
+ mtctr r12
+BEGIN_FTR_SECTION
+ ld r12,EX_PPR(r11)
+ std r12,HSTATE_PPR(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ ld r12,EX_R12(r11)
+ std r12,HSTATE_SCRATCH0(r13)
+ sldi r12,r9,32
+ or r12,r12,r10
+ ld r9,EX_R9(r11)
+ ld r10,EX_R10(r11)
+ ld r11,EX_R11(r11)
+
+ /*
+ * Hcalls and other interrupts come here after normalising register
+ * contents and save locations:
+ *
+ * R12 = (guest CR << 32) | interrupt vector
+ * R13 = PACA
+ * guest R12 saved in shadow HSTATE_SCRATCH0
+ * guest R13 saved in SPRN_SCRATCH0
+ */
+ std r9,HSTATE_SCRATCH2(r13)
+ lbz r9,HSTATE_IN_GUEST(r13)
+ cmpwi r9,KVM_GUEST_MODE_SKIP
+ beq- .Lmaybe_skip
+.Lno_skip:
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ cmpwi r9,KVM_GUEST_MODE_GUEST
+ beq kvmppc_interrupt_pr
+#endif
+ b kvmppc_interrupt_hv
+#else
+ b kvmppc_interrupt_pr
+#endif
+
+/*
+ * "Skip" interrupts are part of a trick KVM uses a with hash guests to load
+ * the faulting instruction in guest memory from the hypervisor without
+ * walking page tables.
+ *
+ * When the guest takes a fault that requires the hypervisor to load the
+ * instruction (e.g., MMIO emulation), KVM is running in real-mode with HV=1
+ * and the guest MMU context loaded. It sets KVM_GUEST_MODE_SKIP, and sets
+ * MSR[DR]=1 while leaving MSR[IR]=0, so it continues to fetch HV instructions
+ * but loads and stores will access the guest context. This is used to load
+ * the faulting instruction using the faulting guest effective address.
+ *
+ * However the guest context may not be able to translate, or it may cause a
+ * machine check or other issue, which results in a fault in the host
+ * (even with KVM-HV).
+ *
+ * These faults come here because KVM_GUEST_MODE_SKIP was set, so if they
+ * are (or are likely) caused by that load, the instruction is skipped by
+ * just returning with the PC advanced +4, where it is noticed the load did
+ * not execute and it goes to the slow path which walks the page tables to
+ * read guest memory.
+ */
+.Lmaybe_skip:
+ cmpwi r12,BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq 1f
+ cmpwi r12,BOOK3S_INTERRUPT_DATA_STORAGE
+ beq 1f
+ cmpwi r12,BOOK3S_INTERRUPT_DATA_SEGMENT
+ beq 1f
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ /* HSRR interrupts get 2 added to interrupt number */
+ cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | 0x2
+ beq 2f
+#endif
+ b .Lno_skip
+1: mfspr r9,SPRN_SRR0
+ addi r9,r9,4
+ mtspr SPRN_SRR0,r9
+ ld r12,HSTATE_SCRATCH0(r13)
+ ld r9,HSTATE_SCRATCH2(r13)
+ GET_SCRATCH0(r13)
+ RFI_TO_KERNEL
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+2: mfspr r9,SPRN_HSRR0
+ addi r9,r9,4
+ mtspr SPRN_HSRR0,r9
+ ld r12,HSTATE_SCRATCH0(r13)
+ ld r9,HSTATE_SCRATCH2(r13)
+ GET_SCRATCH0(r13)
+ HRFI_TO_KERNEL
+#endif
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+/* Stack frame offsets for kvmppc_p9_enter_guest */
+#define SFS (144 + STACK_FRAME_MIN_SIZE)
+#define STACK_SLOT_NVGPRS (SFS - 144) /* 18 gprs */
+
+/*
+ * void kvmppc_p9_enter_guest(struct vcpu *vcpu);
+ *
+ * Enter the guest on a ISAv3.0 or later system.
+ */
+.balign IFETCH_ALIGN_BYTES
+_GLOBAL(kvmppc_p9_enter_guest)
+EXPORT_SYMBOL_GPL(kvmppc_p9_enter_guest)
+ mflr r0
+ std r0,PPC_LR_STKOFF(r1)
+ stdu r1,-SFS(r1)
+
+ std r1,HSTATE_HOST_R1(r13)
+
+ mfcr r4
+ stw r4,SFS+8(r1)
+
+ reg = 14
+ .rept 18
+ std reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ ld r4,VCPU_LR(r3)
+ mtlr r4
+ ld r4,VCPU_CTR(r3)
+ mtctr r4
+ ld r4,VCPU_XER(r3)
+ mtspr SPRN_XER,r4
+
+ ld r1,VCPU_CR(r3)
+
+BEGIN_FTR_SECTION
+ ld r4,VCPU_CFAR(r3)
+ mtspr SPRN_CFAR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+BEGIN_FTR_SECTION
+ ld r4,VCPU_PPR(r3)
+ mtspr SPRN_PPR,r4
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
+ reg = 4
+ .rept 28
+ ld reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ ld r4,VCPU_KVM(r3)
+ lbz r4,KVM_SECURE_GUEST(r4)
+ cmpdi r4,0
+ ld r4,VCPU_GPR(R4)(r3)
+ bne .Lret_to_ultra
+
+ mtcr r1
+
+ ld r0,VCPU_GPR(R0)(r3)
+ ld r1,VCPU_GPR(R1)(r3)
+ ld r2,VCPU_GPR(R2)(r3)
+ ld r3,VCPU_GPR(R3)(r3)
+
+ HRFI_TO_GUEST
+ b .
+
+ /*
+ * Use UV_RETURN ultracall to return control back to the Ultravisor
+ * after processing an hypercall or interrupt that was forwarded
+ * (a.k.a. reflected) to the Hypervisor.
+ *
+ * All registers have already been reloaded except the ucall requires:
+ * R0 = hcall result
+ * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
+ * R3 = UV_RETURN
+ */
+.Lret_to_ultra:
+ mtcr r1
+ ld r1,VCPU_GPR(R1)(r3)
+
+ ld r0,VCPU_GPR(R3)(r3)
+ mfspr r2,SPRN_SRR1
+ LOAD_REG_IMMEDIATE(r3, UV_RETURN)
+ sc 2
+
+/*
+ * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
+ * above if the interrupt was taken for a guest that was entered via
+ * kvmppc_p9_enter_guest().
+ *
+ * The exit code recovers the host stack and vcpu pointer, saves all guest GPRs
+ * and CR, LR, XER as well as guest MSR and NIA into the VCPU, then re-
+ * establishes the host stack and registers to return from the
+ * kvmppc_p9_enter_guest() function, which saves CTR and other guest registers
+ * (SPRs and FP, VEC, etc).
+ */
+.balign IFETCH_ALIGN_BYTES
+kvmppc_p9_exit_hcall:
+ mfspr r11,SPRN_SRR0
+ mfspr r12,SPRN_SRR1
+ li r10,0xc00
+ std r10,HSTATE_SCRATCH0(r13)
+
+.balign IFETCH_ALIGN_BYTES
+kvmppc_p9_exit_interrupt:
+ /*
+ * If set to KVM_GUEST_MODE_HV_P9 but we're still in the
+ * hypervisor, that means we can't return from the entry stack.
+ */
+ rldicl. r10,r12,64-MSR_HV_LG,63
+ bne- kvmppc_p9_bad_interrupt
+
+ std r1,HSTATE_SCRATCH1(r13)
+ std r3,HSTATE_SCRATCH2(r13)
+ ld r1,HSTATE_HOST_R1(r13)
+ ld r3,HSTATE_KVM_VCPU(r13)
+
+ std r9,VCPU_CR(r3)
+
+1:
+ std r11,VCPU_PC(r3)
+ std r12,VCPU_MSR(r3)
+
+ reg = 14
+ .rept 18
+ std reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ /* r1, r3, r9-r13 are saved to vcpu by C code */
+ std r0,VCPU_GPR(R0)(r3)
+ std r2,VCPU_GPR(R2)(r3)
+ reg = 4
+ .rept 5
+ std reg,__VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ LOAD_PACA_TOC()
+
+ mflr r4
+ std r4,VCPU_LR(r3)
+ mfspr r4,SPRN_XER
+ std r4,VCPU_XER(r3)
+
+ reg = 14
+ .rept 18
+ ld reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ lwz r4,SFS+8(r1)
+ mtcr r4
+
+ /*
+ * Flush the link stack here, before executing the first blr on the
+ * way out of the guest.
+ *
+ * The link stack won't match coming out of the guest anyway so the
+ * only cost is the flush itself. The call clobbers r0.
+ */
+1: nop
+ patch_site 1b patch__call_kvm_flush_link_stack_p9
+
+ addi r1,r1,SFS
+ ld r0,PPC_LR_STKOFF(r1)
+ mtlr r0
+ blr
+
+/*
+ * Took an interrupt somewhere right before HRFID to guest, so registers are
+ * in a bad way. Return things hopefully enough to run host virtual code and
+ * run the Linux interrupt handler (SRESET or MCE) to print something useful.
+ *
+ * We could be really clever and save all host registers in known locations
+ * before setting HSTATE_IN_GUEST, then restoring them all here, and setting
+ * return address to a fixup that sets them up again. But that's a lot of
+ * effort for a small bit of code. Lots of other things to do first.
+ */
+kvmppc_p9_bad_interrupt:
+BEGIN_MMU_FTR_SECTION
+ /*
+ * Hash host doesn't try to recover MMU (requires host SLB reload)
+ */
+ b .
+END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
+ /*
+ * Clean up guest registers to give host a chance to run.
+ */
+ li r10,0
+ mtspr SPRN_AMR,r10
+ mtspr SPRN_IAMR,r10
+ mtspr SPRN_CIABR,r10
+ mtspr SPRN_DAWRX0,r10
+BEGIN_FTR_SECTION
+ mtspr SPRN_DAWRX1,r10
+END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
+
+ /*
+ * Switch to host MMU mode (don't have the real host PID but we aren't
+ * going back to userspace).
+ */
+ hwsync
+ isync
+
+ mtspr SPRN_PID,r10
+
+ ld r10, HSTATE_KVM_VCPU(r13)
+ ld r10, VCPU_KVM(r10)
+ lwz r10, KVM_HOST_LPID(r10)
+ mtspr SPRN_LPID,r10
+
+ ld r10, HSTATE_KVM_VCPU(r13)
+ ld r10, VCPU_KVM(r10)
+ ld r10, KVM_HOST_LPCR(r10)
+ mtspr SPRN_LPCR,r10
+
+ isync
+
+ /*
+ * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
+ * MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
+ */
+ li r10,KVM_GUEST_MODE_NONE
+ stb r10,HSTATE_IN_GUEST(r13)
+ li r10,MSR_RI
+ andc r12,r12,r10
+
+ /*
+ * Go back to interrupt handler. MCE and SRESET have their specific
+ * PACA save area so they should be used directly. They set up their
+ * own stack. The other handlers all use EXGEN. They will use the
+ * guest r1 if it looks like a kernel stack, so just load the
+ * emergency stack and go to program check for all other interrupts.
+ */
+ ld r10,HSTATE_SCRATCH0(r13)
+ cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK
+ beq .Lcall_machine_check_common
+
+ cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET
+ beq .Lcall_system_reset_common
+
+ b .
+
+.Lcall_machine_check_common:
+ b machine_check_common
+
+.Lcall_system_reset_common:
+ b system_reset_common
+#endif
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 599133256a95..61290282fd9e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -196,7 +196,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
hva_t ptegp;
u64 pteg[16];
u64 avpn = 0;
- u64 v, r;
+ u64 r;
u64 v_val, v_mask;
u64 eaddr_mask;
int i;
@@ -285,7 +285,6 @@ do_second:
goto do_second;
}
- v = be64_to_cpu(pteg[i]);
r = be64_to_cpu(pteg[i+1]);
pp = (r & HPTE_R_PP) | key;
if (r & HPTE_R_PP0)
@@ -311,7 +310,7 @@ do_second:
case 2:
case 6:
gpte->may_write = true;
- /* fall through */
+ fallthrough;
case 3:
case 5:
case 7:
@@ -531,7 +530,7 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
bool large)
{
u64 mask = 0xFFFFFFFFFULL;
- long i;
+ unsigned long i;
struct kvm_vcpu *v;
dprintk("KVM MMU: tlbie(0x%lx)\n", va);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 044dd49eeb9d..bc6a381b5346 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -8,6 +8,7 @@
*/
#include <linux/kvm_host.h>
+#include <linux/pkeys.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -89,7 +90,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
unsigned long pfn;
/* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/* Get host physical address for gpa */
@@ -133,6 +134,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
else
kvmppc_mmu_flush_icache(pfn);
+ rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
/*
@@ -149,7 +151,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
cpte = kvmppc_mmu_hpte_cache_next(vcpu);
spin_lock(&kvm->mmu_lock);
- if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
+ if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {
r = -EAGAIN;
goto out_unlock;
}
@@ -226,7 +228,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
struct kvmppc_sid_map *map;
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
u16 sid_map_mask;
- static int backwards_map = 0;
+ static int backwards_map;
if (kvmppc_get_msr(vcpu) & MSR_PR)
gvsid |= VSID_PR;
@@ -384,7 +386,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
__destroy_context(to_book3s(vcpu)->context_id[0]);
}
-int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
+int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
int err;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 6c372f5c61b6..e9744b41a226 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -27,6 +27,7 @@
#include <asm/cputable.h>
#include <asm/pte-walk.h>
+#include "book3s.h"
#include "trace_hv.h"
//#define DEBUG_RESIZE_HPT 1
@@ -57,7 +58,7 @@ struct kvm_resize_hpt {
/* Possible values and their usage:
* <0 an error occurred during allocation,
* -EBUSY allocation is in the progress,
- * 0 allocation made successfuly.
+ * 0 allocation made successfully.
*/
int error;
@@ -255,22 +256,34 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
int kvmppc_mmu_hv_init(void)
{
- unsigned long host_lpid, rsvd_lpid;
+ unsigned long nr_lpids;
if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
return -EINVAL;
- /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
- host_lpid = 0;
- if (cpu_has_feature(CPU_FTR_HVMODE))
- host_lpid = mfspr(SPRN_LPID);
- rsvd_lpid = LPID_RSVD;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (WARN_ON(mfspr(SPRN_LPID) != 0))
+ return -EINVAL;
+ nr_lpids = 1UL << mmu_lpid_bits;
+ } else {
+ nr_lpids = 1UL << KVM_MAX_NESTED_GUESTS_SHIFT;
+ }
- kvmppc_init_lpid(rsvd_lpid + 1);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* POWER7 has 10-bit LPIDs, POWER8 has 12-bit LPIDs */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ WARN_ON(nr_lpids != 1UL << 12);
+ else
+ WARN_ON(nr_lpids != 1UL << 10);
- kvmppc_claim_lpid(host_lpid);
- /* rsvd_lpid is reserved for use in partition switching */
- kvmppc_claim_lpid(rsvd_lpid);
+ /*
+ * Reserve the last implemented LPID use in partition
+ * switching for POWER7 and POWER8.
+ */
+ nr_lpids -= 1;
+ }
+
+ kvmppc_init_lpid(nr_lpids);
return 0;
}
@@ -281,11 +294,10 @@ static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
{
long ret;
- /* Protect linux PTE lookup from page table destruction */
- rcu_read_lock_sched(); /* this disables preemption too */
+ preempt_disable();
ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
kvm->mm->pgd, false, pte_idx_ret);
- rcu_read_unlock_sched();
+ preempt_enable();
if (ret == H_TOO_HARD) {
/* this can't happen */
pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
@@ -413,7 +425,7 @@ static int instruction_is_store(unsigned int instr)
return (instr & mask) != 0;
}
-int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu,
unsigned long gpa, gva_t ea, int is_store)
{
u32 last_inst;
@@ -473,10 +485,10 @@ int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.paddr_accessed = gpa;
vcpu->arch.vaddr_accessed = ea;
- return kvmppc_emulate_mmio(run, vcpu);
+ return kvmppc_emulate_mmio(vcpu);
}
-int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
@@ -485,21 +497,21 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
__be64 *hptep;
unsigned long mmu_seq, psize, pte_size;
unsigned long gpa_base, gfn_base;
- unsigned long gpa, gfn, hva, pfn;
+ unsigned long gpa, gfn, hva, pfn, hpa;
struct kvm_memory_slot *memslot;
unsigned long *rmap;
struct revmap_entry *rev;
- struct page *page, *pages[1];
- long index, ret, npages;
+ struct page *page;
+ long index, ret;
bool is_ci;
- unsigned int writing, write_ok;
- struct vm_area_struct *vma;
+ bool writing, write_ok;
+ unsigned int shift;
unsigned long rcbits;
long mmio_update;
- struct mm_struct *mm;
+ pte_t pte, *ptep;
if (kvm_is_radix(kvm))
- return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
+ return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr);
/*
* Real-mode code has already searched the HPT and found the
@@ -519,7 +531,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
gpa_base = r & HPTE_R_RPN & ~(psize - 1);
gfn_base = gpa_base >> PAGE_SHIFT;
gpa = gpa_base | (ea & (psize - 1));
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
}
}
@@ -555,7 +567,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* No memslot means it's an emulated MMIO region */
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
/*
@@ -566,63 +578,67 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return -EFAULT;
/* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
ret = -EFAULT;
- is_ci = false;
- pfn = 0;
page = NULL;
- mm = kvm->mm;
- pte_size = PAGE_SIZE;
writing = (dsisr & DSISR_ISSTORE) != 0;
/* If writing != 0, then the HPTE must allow writing, if we get here */
write_ok = writing;
hva = gfn_to_hva_memslot(memslot, gfn);
- npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
- if (npages < 1) {
- /* Check if it's an I/O mapping */
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, hva);
- if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
- (vma->vm_flags & VM_PFNMAP)) {
- pfn = vma->vm_pgoff +
- ((hva - vma->vm_start) >> PAGE_SHIFT);
- pte_size = psize;
- is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
- write_ok = vma->vm_flags & VM_WRITE;
- }
- up_read(&mm->mmap_sem);
- if (!pfn)
- goto out_put;
+
+ /*
+ * Do a fast check first, since __gfn_to_pfn_memslot doesn't
+ * do it with !atomic && !async, which is how we call it.
+ * We always ask for write permission since the common case
+ * is that the page is writable.
+ */
+ if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
+ write_ok = true;
} else {
- page = pages[0];
- pfn = page_to_pfn(page);
- if (PageHuge(page)) {
- page = compound_head(page);
- pte_size <<= compound_order(page);
- }
- /* if the guest wants write access, see if that is OK */
- if (!writing && hpte_is_writable(r)) {
- pte_t *ptep, pte;
- unsigned long flags;
- /*
- * We need to protect against page table destruction
- * hugepage split and collapse.
- */
- local_irq_save(flags);
- ptep = find_current_mm_pte(mm->pgd, hva, NULL, NULL);
- if (ptep) {
- pte = kvmppc_read_update_linux_pte(ptep, 1);
- if (__pte_write(pte))
- write_ok = 1;
- }
- local_irq_restore(flags);
+ /* Call KVM generic code to do the slow-path check */
+ pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
+ writing, &write_ok, NULL);
+ if (is_error_noslot_pfn(pfn))
+ return -EFAULT;
+ page = NULL;
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+ if (PageReserved(page))
+ page = NULL;
}
}
+ /*
+ * Read the PTE from the process' radix tree and use that
+ * so we get the shift and attribute bits.
+ */
+ spin_lock(&kvm->mmu_lock);
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
+ pte = __pte(0);
+ if (ptep)
+ pte = READ_ONCE(*ptep);
+ spin_unlock(&kvm->mmu_lock);
+ /*
+ * If the PTE disappeared temporarily due to a THP
+ * collapse, just return and let the guest try again.
+ */
+ if (!pte_present(pte)) {
+ if (page)
+ put_page(page);
+ return RESUME_GUEST;
+ }
+ hpa = pte_pfn(pte) << PAGE_SHIFT;
+ pte_size = PAGE_SIZE;
+ if (shift)
+ pte_size = 1ul << shift;
+ is_ci = pte_ci(pte);
+
if (psize > pte_size)
goto out_put;
+ if (pte_size > psize)
+ hpa |= hva & (pte_size - psize);
/* Check WIMG vs. the actual page we're accessing */
if (!hpte_cache_flags_ok(r, is_ci)) {
@@ -636,14 +652,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
/*
- * Set the HPTE to point to pfn.
- * Since the pfn is at PAGE_SIZE granularity, make sure we
+ * Set the HPTE to point to hpa.
+ * Since the hpa is at PAGE_SIZE granularity, make sure we
* don't mask out lower-order bits if psize < PAGE_SIZE.
*/
if (psize < PAGE_SIZE)
psize = PAGE_SIZE;
- r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) |
- ((pfn << PAGE_SHIFT) & ~(psize - 1));
+ r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa;
if (hpte_is_writable(r) && !write_ok)
r = hpte_make_readonly(r);
ret = RESUME_GUEST;
@@ -678,7 +693,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Check if we might have been invalidated; let the guest retry if so */
ret = RESUME_GUEST;
- if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
+ if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) {
unlock_rmap(rmap);
goto out_unlock;
}
@@ -708,20 +723,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
asm volatile("ptesync" : : : "memory");
preempt_enable();
if (page && hpte_is_writable(r))
- SetPageDirty(page);
+ set_page_dirty_lock(page);
out_put:
trace_kvm_page_fault_exit(vcpu, hpte, ret);
- if (page) {
- /*
- * We drop pages[0] here, not page because page might
- * have been set to the head page of a compound, but
- * we have to drop the reference on the correct tail
- * page to match the get inside gup()
- */
- put_page(pages[0]);
- }
+ if (page)
+ put_page(page);
return ret;
out_unlock:
@@ -734,11 +742,11 @@ void kvmppc_rmap_reset(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int srcu_idx;
+ int srcu_idx, bkt;
srcu_idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
+ kvm_for_each_memslot(memslot, bkt, slots) {
/* Mutual exclusion with kvm_unmap_hva_range etc. */
spin_lock(&kvm->mmu_lock);
/*
@@ -752,51 +760,6 @@ void kvmppc_rmap_reset(struct kvm *kvm)
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
-typedef int (*hva_handler_fn)(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn);
-
-static int kvm_handle_hva_range(struct kvm *kvm,
- unsigned long start,
- unsigned long end,
- hva_handler_fn handler)
-{
- int ret;
- int retval = 0;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
-
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gfn, gfn_end;
-
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
- /*
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
- * {gfn, gfn+1, ..., gfn_end-1}.
- */
- gfn = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
-
- for (; gfn < gfn_end; ++gfn) {
- ret = handler(kvm, memslot, gfn);
- retval |= ret;
- }
- }
-
- return retval;
-}
-
-static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
- hva_handler_fn handler)
-{
- return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
-}
-
/* Must be called with both HPTE and rmap locked */
static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
struct kvm_memory_slot *memslot,
@@ -840,8 +803,8 @@ static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i,
}
}
-static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
{
unsigned long i;
__be64 *hptep;
@@ -874,16 +837,21 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
unlock_rmap(rmapp);
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
}
- return 0;
}
-int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- hva_handler_fn handler;
+ gfn_t gfn;
- handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
- kvm_handle_hva_range(kvm, start, end, handler);
- return 0;
+ if (kvm_is_radix(kvm)) {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ kvm_unmap_radix(kvm, range->slot, gfn);
+ } else {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ kvm_unmap_rmapp(kvm, range->slot, gfn);
+ }
+
+ return false;
}
void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
@@ -913,13 +881,13 @@ void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
}
}
-static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
{
struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j;
__be64 *hptep;
- int ret = 0;
+ bool ret = false;
unsigned long *rmapp;
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
@@ -927,7 +895,7 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_REFERENCED) {
*rmapp &= ~KVMPPC_RMAP_REFERENCED;
- ret = 1;
+ ret = true;
}
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);
@@ -959,7 +927,7 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
rev[i].guest_rpte |= HPTE_R_R;
note_hpte_modification(kvm, &rev[i]);
}
- ret = 1;
+ ret = true;
}
__unlock_hpte(hptep, be64_to_cpu(hptep[0]));
} while ((i = j) != head);
@@ -968,26 +936,34 @@ static int kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
return ret;
}
-int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- hva_handler_fn handler;
+ gfn_t gfn;
+ bool ret = false;
- handler = kvm_is_radix(kvm) ? kvm_age_radix : kvm_age_rmapp;
- return kvm_handle_hva_range(kvm, start, end, handler);
+ if (kvm_is_radix(kvm)) {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ ret |= kvm_age_radix(kvm, range->slot, gfn);
+ } else {
+ for (gfn = range->start; gfn < range->end; gfn++)
+ ret |= kvm_age_rmapp(kvm, range->slot, gfn);
+ }
+
+ return ret;
}
-static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
{
struct revmap_entry *rev = kvm->arch.hpt.rev;
unsigned long head, i, j;
unsigned long *hp;
- int ret = 1;
+ bool ret = true;
unsigned long *rmapp;
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
if (*rmapp & KVMPPC_RMAP_REFERENCED)
- return 1;
+ return true;
lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_REFERENCED)
@@ -1002,27 +978,33 @@ static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot,
goto out;
} while ((i = j) != head);
}
- ret = 0;
+ ret = false;
out:
unlock_rmap(rmapp);
return ret;
}
-int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
+bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- hva_handler_fn handler;
+ WARN_ON(range->start + 1 != range->end);
- handler = kvm_is_radix(kvm) ? kvm_test_age_radix : kvm_test_age_rmapp;
- return kvm_handle_hva(kvm, hva, handler);
+ if (kvm_is_radix(kvm))
+ return kvm_test_age_radix(kvm, range->slot, range->start);
+ else
+ return kvm_test_age_rmapp(kvm, range->slot, range->start);
}
-void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
+bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range)
{
- hva_handler_fn handler;
+ WARN_ON(range->start + 1 != range->end);
+
+ if (kvm_is_radix(kvm))
+ kvm_unmap_radix(kvm, range->slot, range->start);
+ else
+ kvm_unmap_rmapp(kvm, range->slot, range->start);
- handler = kvm_is_radix(kvm) ? kvm_unmap_radix : kvm_unmap_rmapp;
- kvm_handle_hva(kvm, hva, handler);
+ return false;
}
static int vcpus_running(struct kvm *kvm)
@@ -2138,9 +2120,8 @@ static const struct file_operations debugfs_htab_fops = {
void kvmppc_mmu_debugfs_init(struct kvm *kvm)
{
- kvm->arch.htab_dentry = debugfs_create_file("htab", 0400,
- kvm->arch.debugfs_dir, kvm,
- &debugfs_htab_fops);
+ debugfs_create_file("htab", 0400, kvm->debugfs_dentry, kvm,
+ &debugfs_htab_fops);
}
void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 803940d79b73..5d5e12f3bf86 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -11,16 +11,18 @@
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/debugfs.h>
+#include <linux/pgtable.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
#include <asm/ultravisor.h>
#include <asm/kvm_book3s_uvmem.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/firmware.h>
/*
* Supported radix tree geometry.
@@ -33,14 +35,18 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
gva_t eaddr, void *to, void *from,
unsigned long n)
{
- int uninitialized_var(old_pid), old_lpid;
+ int old_pid, old_lpid;
unsigned long quadrant, ret = n;
bool is_load = !!to;
/* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
if (kvmhv_on_pseries())
return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
- __pa(to), __pa(from), n);
+ (to != NULL) ? __pa(to): 0,
+ (from != NULL) ? __pa(from): 0, n);
+
+ if (eaddr & (0xFFFUL << 52))
+ return ret;
quadrant = 1;
if (!pid)
@@ -52,6 +58,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
preempt_disable();
+ asm volatile("hwsync" ::: "memory");
+ isync();
/* switch the lpid first to avoid running host with unallocated pid */
old_lpid = mfspr(SPRN_LPID);
if (old_lpid != lpid)
@@ -63,11 +71,15 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
}
isync();
+ pagefault_disable();
if (is_load)
- ret = probe_user_read(to, (const void __user *)from, n);
+ ret = __copy_from_user_inatomic(to, (const void __user *)from, n);
else
- ret = probe_user_write((void __user *)to, from, n);
+ ret = __copy_to_user_inatomic((void __user *)to, from, n);
+ pagefault_enable();
+ asm volatile("hwsync" ::: "memory");
+ isync();
/* switch the pid first to avoid running host with unallocated pid */
if (quadrant == 1 && pid != old_pid)
mtspr(SPRN_PID, old_pid);
@@ -79,7 +91,6 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
return ret;
}
-EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
void *to, void *from, unsigned long n)
@@ -115,14 +126,12 @@ long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
return ret;
}
-EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);
long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
unsigned long n)
{
return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
}
-EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);
int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, u64 root,
@@ -160,7 +169,10 @@ int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
return -EINVAL;
/* Read the entry from guest memory */
addr = base + (index * sizeof(rpte));
+
+ kvm_vcpu_srcu_read_lock(vcpu);
ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (ret) {
if (pte_ret_p)
*pte_ret_p = addr;
@@ -236,7 +248,9 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Read the table to find the root of the radix tree */
ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
+ kvm_vcpu_srcu_read_lock(vcpu);
ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (ret)
return ret;
@@ -313,9 +327,19 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
}
psi = shift_to_mmu_psize(pshift);
- rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
- lpid, rb);
+
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) {
+ rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
+ lpid, rb);
+ } else {
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_TLB,
+ psize_to_rpti_pgsize(psi),
+ addr, addr + psize);
+ }
+
if (rc)
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
@@ -329,8 +353,14 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
return;
}
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
- lpid, TLBIEL_INVAL_SET_LPID);
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ else
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL,
+ 0, -1UL);
if (rc)
pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
}
@@ -342,7 +372,7 @@ static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
return __radix_pte_update(ptep, clr, set);
}
-void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
+static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
@@ -353,7 +383,13 @@ static struct kmem_cache *kvm_pmd_cache;
static pte_t *kvmppc_pte_alloc(void)
{
- return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
+ pte_t *pte;
+
+ pte = kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
+ /* pmd_populate() will only reference _pa(pte). */
+ kmemleak_ignore(pte);
+
+ return pte;
}
static void kvmppc_pte_free(pte_t *ptep)
@@ -363,7 +399,13 @@ static void kvmppc_pte_free(pte_t *ptep)
static pmd_t *kvmppc_pmd_alloc(void)
{
- return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
+ pmd_t *pmd;
+
+ pmd = kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
+ /* pud_populate() will only reference _pa(pmd). */
+ kmemleak_ignore(pmd);
+
+ return pmd;
}
static void kvmppc_pmd_free(pmd_t *pmdp)
@@ -417,15 +459,19 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
* Callers are responsible for flushing the PWC.
*
* When page tables are being unmapped/freed as part of page fault path
- * (full == false), ptes are not expected. There is code to unmap them
- * and emit a warning if encountered, but there may already be data
- * corruption due to the unexpected mappings.
+ * (full == false), valid ptes are generally not expected; however, there
+ * is one situation where they arise, which is when dirty page logging is
+ * turned off for a memslot while the VM is running. The new memslot
+ * becomes visible to page faults before the memslot commit function
+ * gets to flush the memslot, which can lead to a 2MB page mapping being
+ * installed for a guest physical address where there are already 64kB
+ * (or 4kB) mappings (of sub-pages of the same 2MB page).
*/
static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
unsigned int lpid)
{
if (full) {
- memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
+ memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
} else {
pte_t *p = pte;
unsigned long it;
@@ -433,7 +479,6 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
if (pte_val(*p) == 0)
continue;
- WARN_ON_ONCE(1);
kvmppc_unmap_pte(kvm, p,
pte_pfn(*p) << PAGE_SHIFT,
PAGE_SHIFT, NULL, lpid);
@@ -499,13 +544,14 @@ void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
unsigned long ig;
for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
+ p4d_t *p4d = p4d_offset(pgd, 0);
pud_t *pud;
- if (!pgd_present(*pgd))
+ if (!p4d_present(*p4d))
continue;
- pud = pud_offset(pgd, 0);
+ pud = pud_offset(p4d, 0);
kvmppc_unmap_free_pud(kvm, pud, lpid);
- pgd_clear(pgd);
+ p4d_clear(p4d);
}
}
@@ -566,6 +612,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
unsigned long *rmapp, struct rmap_nested **n_rmap)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud, *new_pud = NULL;
pmd_t *pmd, *new_pmd = NULL;
pte_t *ptep, *new_ptep = NULL;
@@ -573,9 +620,11 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
/* Traverse the guest's 2nd-level tree, allocate new levels needed */
pgd = pgtable + pgd_index(gpa);
+ p4d = p4d_offset(pgd, gpa);
+
pud = NULL;
- if (pgd_present(*pgd))
- pud = pud_offset(pgd, gpa);
+ if (p4d_present(*p4d))
+ pud = pud_offset(p4d, gpa);
else
new_pud = pud_alloc_one(kvm->mm, gpa);
@@ -591,18 +640,18 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
/* Check if we might have been invalidated; let the guest retry if so */
spin_lock(&kvm->mmu_lock);
ret = -EAGAIN;
- if (mmu_notifier_retry(kvm, mmu_seq))
+ if (mmu_invalidate_retry(kvm, mmu_seq))
goto out_unlock;
/* Now traverse again under the lock and change the tree */
ret = -ENOMEM;
- if (pgd_none(*pgd)) {
+ if (p4d_none(*p4d)) {
if (!new_pud)
goto out_unlock;
- pgd_populate(kvm->mm, pgd, new_pud);
+ p4d_populate(kvm->mm, p4d, new_pud);
new_pud = NULL;
}
- pud = pud_offset(pgd, gpa);
+ pud = pud_offset(p4d, gpa);
if (pud_is_leaf(*pud)) {
unsigned long hgpa = gpa & PUD_MASK;
@@ -735,7 +784,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
return ret;
}
-bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
+bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
unsigned long gpa, unsigned int lpid)
{
unsigned long pgflags;
@@ -750,12 +799,12 @@ bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
pgflags = _PAGE_ACCESSED;
if (writing)
pgflags |= _PAGE_DIRTY;
- /*
- * We are walking the secondary (partition-scoped) page table here.
- * We can do this without disabling irq because the Linux MM
- * subsystem doesn't do THP splits and collapses on this tree.
- */
- ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
+
+ if (nested)
+ ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
+ else
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
+
if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
return true;
@@ -781,7 +830,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
bool large_enable;
/* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/*
@@ -791,14 +840,14 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
* is that the page is writable.
*/
hva = gfn_to_hva_memslot(memslot, gfn);
- if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
+ if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
upgrade_write = true;
} else {
unsigned long pfn;
/* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
- writing, upgrade_p);
+ writing, upgrade_p, NULL);
if (is_error_noslot_pfn(pfn))
return -EFAULT;
page = NULL;
@@ -813,20 +862,21 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
* Read the PTE from the process' radix tree and use that
* so we get the shift and attribute bits.
*/
- local_irq_disable();
- ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ spin_lock(&kvm->mmu_lock);
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
+ pte = __pte(0);
+ if (ptep)
+ pte = READ_ONCE(*ptep);
+ spin_unlock(&kvm->mmu_lock);
/*
* If the PTE disappeared temporarily due to a THP
* collapse, just return and let the guest try again.
*/
- if (!ptep) {
- local_irq_enable();
+ if (!pte_present(pte)) {
if (page)
put_page(page);
return RESUME_GUEST;
}
- pte = *ptep;
- local_irq_enable();
/* If we're logging dirty pages, always map single pages */
large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
@@ -886,7 +936,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
return ret;
}
-int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
@@ -932,7 +982,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
return RESUME_GUEST;
}
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
}
if (memslot->flags & KVM_MEM_READONLY) {
@@ -948,8 +998,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Failed to set the reference/change bits */
if (dsisr & DSISR_SET_RC) {
spin_lock(&kvm->mmu_lock);
- if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable,
- writing, gpa, kvm->arch.lpid))
+ if (kvmppc_hv_handle_set_rc(kvm, false, writing,
+ gpa, kvm->arch.lpid))
dsisr &= ~DSISR_SET_RC;
spin_unlock(&kvm->mmu_lock);
@@ -968,8 +1018,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
/* Called with kvm->mmu_lock held */
-int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
{
pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT;
@@ -977,30 +1027,29 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
- return 0;
+ return;
}
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
kvm->arch.lpid);
- return 0;
}
/* Called with kvm->mmu_lock held */
-int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
{
pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
- int ref = 0;
+ bool ref = false;
unsigned long old, *rmapp;
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ref;
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
gpa, shift);
@@ -1010,26 +1059,27 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
old & PTE_RPN_MASK,
1UL << shift);
- ref = 1;
+ ref = true;
}
return ref;
}
/* Called with kvm->mmu_lock held */
-int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
- unsigned long gfn)
+bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
+ unsigned long gfn)
+
{
pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
- int ref = 0;
+ bool ref = false;
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ref;
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep))
- ref = 1;
+ ref = true;
return ref;
}
@@ -1039,7 +1089,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
{
unsigned long gfn = memslot->base_gfn + pagenum;
unsigned long gpa = gfn << PAGE_SHIFT;
- pte_t *ptep;
+ pte_t *ptep, pte;
unsigned int shift;
int ret = 0;
unsigned long old, *rmapp;
@@ -1047,12 +1097,35 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ret;
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
- if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
- ret = 1;
- if (shift)
- ret = 1 << (shift - PAGE_SHIFT);
+ /*
+ * For performance reasons we don't hold kvm->mmu_lock while walking the
+ * partition scoped table.
+ */
+ ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
+ if (!ptep)
+ return 0;
+
+ pte = READ_ONCE(*ptep);
+ if (pte_present(pte) && pte_dirty(pte)) {
spin_lock(&kvm->mmu_lock);
+ /*
+ * Recheck the pte again
+ */
+ if (pte_val(pte) != pte_val(*ptep)) {
+ /*
+ * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
+ * only find PAGE_SIZE pte entries here. We can continue
+ * to use the pte addr returned by above page table
+ * walk.
+ */
+ if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
+ spin_unlock(&kvm->mmu_lock);
+ return 0;
+ }
+ }
+
+ ret = 1;
+ VM_BUG_ON(shift);
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
gpa, shift);
kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
@@ -1108,12 +1181,17 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
gpa = memslot->base_gfn << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
for (n = memslot->npages; n; --n) {
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
kvm->arch.lpid);
gpa += PAGE_SIZE;
}
+ /*
+ * Increase the mmu notifier sequence number to prevent any page
+ * fault that read the memslot earlier from writing a PTE.
+ */
+ kvm->mmu_invalidate_seq++;
spin_unlock(&kvm->mmu_lock);
}
@@ -1219,7 +1297,8 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
unsigned long gpa;
pgd_t *pgt;
struct kvm_nested_guest *nested;
- pgd_t pgd, *pgdp;
+ pgd_t *pgdp;
+ p4d_t p4d, *p4dp;
pud_t pud, *pudp;
pmd_t pmd, *pmdp;
pte_t *ptep;
@@ -1292,13 +1371,14 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
}
pgdp = pgt + pgd_index(gpa);
- pgd = READ_ONCE(*pgdp);
- if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
- gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
+ p4dp = p4d_offset(pgdp, gpa);
+ p4d = READ_ONCE(*p4dp);
+ if (!(p4d_val(p4d) & _PAGE_PRESENT)) {
+ gpa = (gpa & P4D_MASK) + P4D_SIZE;
continue;
}
- pudp = pud_offset(&pgd, gpa);
+ pudp = pud_offset(&p4d, gpa);
pud = READ_ONCE(*pudp);
if (!(pud_val(pud) & _PAGE_PRESENT)) {
gpa = (gpa & PUD_MASK) + PUD_SIZE;
@@ -1376,9 +1456,8 @@ static const struct file_operations debugfs_radix_fops = {
void kvmhv_radix_debugfs_init(struct kvm *kvm)
{
- kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
- kvm->arch.debugfs_dir, kvm,
- &debugfs_radix_fops);
+ debugfs_create_file("radix", 0400, kvm->debugfs_dentry, kvm,
+ &debugfs_radix_fops);
}
int kvmppc_radix_init(void)
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index ee6c103bb7d5..40864373ef87 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -27,12 +27,23 @@
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
-#include <asm/kvm_host.h>
#include <asm/udbg.h>
#include <asm/iommu.h>
#include <asm/tce.h>
#include <asm/mmu_context.h>
+static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
+ unsigned long liobn)
+{
+ struct kvmppc_spapr_tce_table *stt;
+
+ list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
+ if (stt->liobn == liobn)
+ return stt;
+
+ return NULL;
+}
+
static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
{
return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
@@ -74,6 +85,7 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
struct iommu_table_group *table_group = NULL;
+ rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
table_group = iommu_group_get_iommudata(grp);
@@ -88,7 +100,9 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
}
}
+ cond_resched_rcu();
}
+ rcu_read_unlock();
}
extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
@@ -106,12 +120,14 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!f.file)
return -EBADF;
+ rcu_read_lock();
list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt == f.file->private_data) {
found = true;
break;
}
}
+ rcu_read_unlock();
fdput(f);
@@ -144,6 +160,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!tbl)
return -EINVAL;
+ rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
if (tbl != stit->tbl)
continue;
@@ -151,14 +168,17 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if (!kref_get_unless_zero(&stit->kref)) {
/* stit is being destroyed */
iommu_tce_table_put(tbl);
+ rcu_read_unlock();
return -ENOTTY;
}
/*
* The table is already known to this KVM, we just increased
* its KVM reference counter and can return.
*/
+ rcu_read_unlock();
return 0;
}
+ rcu_read_unlock();
stit = kzalloc(sizeof(*stit), GFP_KERNEL);
if (!stit) {
@@ -275,7 +295,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvmppc_spapr_tce_table *siter;
struct mm_struct *mm = kvm->mm;
unsigned long npages, size = args->size;
- int ret = -ENOMEM;
+ int ret;
if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
@@ -287,8 +307,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
return ret;
ret = -ENOMEM;
- stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
- GFP_KERNEL);
+ stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL | __GFP_NOWARN);
if (!stt)
goto fail_acct;
@@ -338,7 +357,7 @@ static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
unsigned long gfn = tce >> PAGE_SHIFT;
struct kvm_memory_slot *memslot;
- memslot = search_memslots(kvm_memslots(kvm), gfn);
+ memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
if (!memslot)
return -EINVAL;
@@ -366,18 +385,19 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
return H_TOO_HARD;
+ rcu_read_lock();
list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
unsigned long hpa = 0;
struct mm_iommu_table_group_mem_t *mem;
long shift = stit->tbl->it_page_shift;
mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
- if (!mem)
- return H_TOO_HARD;
-
- if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
+ if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
+ rcu_read_unlock();
return H_TOO_HARD;
+ }
}
+ rcu_read_unlock();
return H_SUCCESS;
}
@@ -412,13 +432,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
tbl[idx % TCES_PER_PAGE] = tce;
}
-static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
- unsigned long entry)
+static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
+ struct iommu_table *tbl, unsigned long entry)
{
- unsigned long hpa = 0;
- enum dma_data_direction dir = DMA_NONE;
+ unsigned long i;
+ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+ unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
+
+ for (i = 0; i < subpages; ++i) {
+ unsigned long hpa = 0;
+ enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
+ }
}
static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -477,10 +503,12 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
break;
}
+ iommu_tce_kill(tbl, io_entry, subpages);
+
return ret;
}
-long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
+static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
unsigned long entry, unsigned long ua,
enum dma_data_direction dir)
{
@@ -536,6 +564,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
break;
}
+ iommu_tce_kill(tbl, io_entry, subpages);
+
return ret;
}
@@ -582,10 +612,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
- iommu_tce_kill(stit->tbl, entry, 1);
if (ret != H_SUCCESS) {
- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
goto unlock_exit;
}
}
@@ -661,13 +690,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/
if (get_user(tce, tces + i)) {
ret = H_TOO_HARD;
- goto invalidate_exit;
+ goto unlock_exit;
}
tce = be64_to_cpu(tce);
if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
ret = H_PARAMETER;
- goto invalidate_exit;
+ goto unlock_exit;
}
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -676,19 +705,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
iommu_tce_direction(tce));
if (ret != H_SUCCESS) {
- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
- entry);
- goto invalidate_exit;
+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
+ entry + i);
+ goto unlock_exit;
}
}
kvmppc_tce_put(stt, entry + i, tce);
}
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill(stit->tbl, entry, npages);
-
unlock_exit:
srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -727,20 +752,47 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
continue;
if (ret == H_TOO_HARD)
- goto invalidate_exit;
+ return ret;
WARN_ON_ONCE(1);
- kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
+ kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
}
}
for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
-
return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
+
+long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba)
+{
+ struct kvmppc_spapr_tce_table *stt;
+ long ret;
+ unsigned long idx;
+ struct page *page;
+ u64 *tbl;
+
+ stt = kvmppc_find_table(vcpu->kvm, liobn);
+ if (!stt)
+ return H_TOO_HARD;
+
+ ret = kvmppc_ioba_validate(stt, ioba, 1);
+ if (ret != H_SUCCESS)
+ return ret;
+
+ idx = (ioba >> stt->page_shift) - stt->offset;
+ page = stt->pages[idx / TCES_PER_PAGE];
+ if (!page) {
+ vcpu->arch.regs.gpr[4] = 0;
+ return H_SUCCESS;
+ }
+ tbl = (u64 *)page_address(page);
+
+ vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
+
+ return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
deleted file mode 100644
index ab6eeb8e753e..000000000000
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ /dev/null
@@ -1,684 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *
- * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
- * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
- * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
- */
-
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/kvm.h>
-#include <linux/kvm_host.h>
-#include <linux/highmem.h>
-#include <linux/gfp.h>
-#include <linux/slab.h>
-#include <linux/hugetlb.h>
-#include <linux/list.h>
-#include <linux/stringify.h>
-
-#include <asm/kvm_ppc.h>
-#include <asm/kvm_book3s.h>
-#include <asm/book3s/64/mmu-hash.h>
-#include <asm/mmu_context.h>
-#include <asm/hvcall.h>
-#include <asm/synch.h>
-#include <asm/ppc-opcode.h>
-#include <asm/kvm_host.h>
-#include <asm/udbg.h>
-#include <asm/iommu.h>
-#include <asm/tce.h>
-#include <asm/pte-walk.h>
-
-#ifdef CONFIG_BUG
-
-#define WARN_ON_ONCE_RM(condition) ({ \
- static bool __section(.data.unlikely) __warned; \
- int __ret_warn_once = !!(condition); \
- \
- if (unlikely(__ret_warn_once && !__warned)) { \
- __warned = true; \
- pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
- __stringify(condition), \
- __func__, __LINE__); \
- dump_stack(); \
- } \
- unlikely(__ret_warn_once); \
-})
-
-#else
-
-#define WARN_ON_ONCE_RM(condition) ({ \
- int __ret_warn_on = !!(condition); \
- unlikely(__ret_warn_on); \
-})
-
-#endif
-
-/*
- * Finds a TCE table descriptor by LIOBN.
- *
- * WARNING: This will be called in real or virtual mode on HV KVM and virtual
- * mode on PR KVM
- */
-struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
- unsigned long liobn)
-{
- struct kvmppc_spapr_tce_table *stt;
-
- list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
- if (stt->liobn == liobn)
- return stt;
-
- return NULL;
-}
-EXPORT_SYMBOL_GPL(kvmppc_find_table);
-
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
- unsigned long *ua, unsigned long **prmap)
-{
- unsigned long gfn = tce >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot;
-
- memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
- if (!memslot)
- return -EINVAL;
-
- *ua = __gfn_to_hva_memslot(memslot, gfn) |
- (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
-
- if (prmap)
- *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
-
- return 0;
-}
-
-/*
- * Validates TCE address.
- * At the moment flags and page mask are validated.
- * As the host kernel does not access those addresses (just puts them
- * to the table and user space is supposed to process them), we can skip
- * checking other things (such as TCE is a guest RAM address or the page
- * was actually allocated).
- */
-static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
- unsigned long tce)
-{
- unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
- enum dma_data_direction dir = iommu_tce_direction(tce);
- struct kvmppc_spapr_tce_iommu_table *stit;
- unsigned long ua = 0;
-
- /* Allow userspace to poison TCE table */
- if (dir == DMA_NONE)
- return H_SUCCESS;
-
- if (iommu_tce_check_gpa(stt->page_shift, gpa))
- return H_PARAMETER;
-
- if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
- return H_TOO_HARD;
-
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- unsigned long hpa = 0;
- struct mm_iommu_table_group_mem_t *mem;
- long shift = stit->tbl->it_page_shift;
-
- mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
- if (!mem)
- return H_TOO_HARD;
-
- if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
- return H_TOO_HARD;
- }
-
- return H_SUCCESS;
-}
-
-/* Note on the use of page_address() in real mode,
- *
- * It is safe to use page_address() in real mode on ppc64 because
- * page_address() is always defined as lowmem_page_address()
- * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
- * operation and does not access page struct.
- *
- * Theoretically page_address() could be defined different
- * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
- * would have to be enabled.
- * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
- * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
- * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
- * is not expected to be enabled on ppc32, page_address()
- * is safe for ppc32 as well.
- *
- * WARNING: This will be called in real-mode on HV KVM and virtual
- * mode on PR KVM
- */
-static u64 *kvmppc_page_address(struct page *page)
-{
-#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
-#error TODO: fix to avoid page_address() here
-#endif
- return (u64 *) page_address(page);
-}
-
-/*
- * Handles TCE requests for emulated devices.
- * Puts guest TCE values to the table and expects user space to convert them.
- * Cannot fail so kvmppc_rm_tce_validate must be called before it.
- */
-static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
- unsigned long idx, unsigned long tce)
-{
- struct page *page;
- u64 *tbl;
-
- idx -= stt->offset;
- page = stt->pages[idx / TCES_PER_PAGE];
- /*
- * page must not be NULL in real mode,
- * kvmppc_rm_ioba_validate() must have taken care of this.
- */
- WARN_ON_ONCE_RM(!page);
- tbl = kvmppc_page_address(page);
-
- tbl[idx % TCES_PER_PAGE] = tce;
-}
-
-/*
- * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
- * in real mode.
- * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
- * allocated or not required (when clearing a tce entry).
- */
-static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
- unsigned long ioba, unsigned long npages, bool clearing)
-{
- unsigned long i, idx, sttpage, sttpages;
- unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
-
- if (ret)
- return ret;
- /*
- * clearing==true says kvmppc_rm_tce_put won't be allocating pages
- * for empty tces.
- */
- if (clearing)
- return H_SUCCESS;
-
- idx = (ioba >> stt->page_shift) - stt->offset;
- sttpage = idx / TCES_PER_PAGE;
- sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
- TCES_PER_PAGE;
- for (i = sttpage; i < sttpage + sttpages; ++i)
- if (!stt->pages[i])
- return H_TOO_HARD;
-
- return H_SUCCESS;
-}
-
-static long iommu_tce_xchg_no_kill_rm(struct mm_struct *mm,
- struct iommu_table *tbl,
- unsigned long entry, unsigned long *hpa,
- enum dma_data_direction *direction)
-{
- long ret;
-
- ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, true);
-
- if (!ret && ((*direction == DMA_FROM_DEVICE) ||
- (*direction == DMA_BIDIRECTIONAL))) {
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
- /*
- * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
- * calling this so we still get here a valid UA.
- */
- if (pua && *pua)
- mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
- }
-
- return ret;
-}
-
-extern void iommu_tce_kill_rm(struct iommu_table *tbl,
- unsigned long entry, unsigned long pages)
-{
- if (tbl->it_ops->tce_kill)
- tbl->it_ops->tce_kill(tbl, entry, pages, true);
-}
-
-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
- unsigned long entry)
-{
- unsigned long hpa = 0;
- enum dma_data_direction dir = DMA_NONE;
-
- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
-}
-
-static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
- struct iommu_table *tbl, unsigned long entry)
-{
- struct mm_iommu_table_group_mem_t *mem = NULL;
- const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
-
- if (!pua)
- /* it_userspace allocation might be delayed */
- return H_TOO_HARD;
-
- mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
- if (!mem)
- return H_TOO_HARD;
-
- mm_iommu_mapped_dec(mem);
-
- *pua = cpu_to_be64(0);
-
- return H_SUCCESS;
-}
-
-static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
- struct iommu_table *tbl, unsigned long entry)
-{
- enum dma_data_direction dir = DMA_NONE;
- unsigned long hpa = 0;
- long ret;
-
- if (iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir))
- /*
- * real mode xchg can fail if struct page crosses
- * a page boundary
- */
- return H_TOO_HARD;
-
- if (dir == DMA_NONE)
- return H_SUCCESS;
-
- ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
- if (ret)
- iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
-
- return ret;
-}
-
-static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
- struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
- unsigned long entry)
-{
- unsigned long i, ret = H_SUCCESS;
- unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
- unsigned long io_entry = entry * subpages;
-
- for (i = 0; i < subpages; ++i) {
- ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
- if (ret != H_SUCCESS)
- break;
- }
-
- return ret;
-}
-
-static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
- unsigned long entry, unsigned long ua,
- enum dma_data_direction dir)
-{
- long ret;
- unsigned long hpa = 0;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
- struct mm_iommu_table_group_mem_t *mem;
-
- if (!pua)
- /* it_userspace allocation might be delayed */
- return H_TOO_HARD;
-
- mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
- if (!mem)
- return H_TOO_HARD;
-
- if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
- &hpa)))
- return H_TOO_HARD;
-
- if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
- return H_TOO_HARD;
-
- ret = iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
- if (ret) {
- mm_iommu_mapped_dec(mem);
- /*
- * real mode xchg can fail if struct page crosses
- * a page boundary
- */
- return H_TOO_HARD;
- }
-
- if (dir != DMA_NONE)
- kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
-
- *pua = cpu_to_be64(ua);
-
- return 0;
-}
-
-static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
- struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
- unsigned long entry, unsigned long ua,
- enum dma_data_direction dir)
-{
- unsigned long i, pgoff, ret = H_SUCCESS;
- unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
- unsigned long io_entry = entry * subpages;
-
- for (i = 0, pgoff = 0; i < subpages;
- ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
-
- ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
- io_entry + i, ua + pgoff, dir);
- if (ret != H_SUCCESS)
- break;
- }
-
- return ret;
-}
-
-long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
- unsigned long ioba, unsigned long tce)
-{
- struct kvmppc_spapr_tce_table *stt;
- long ret;
- struct kvmppc_spapr_tce_iommu_table *stit;
- unsigned long entry, ua = 0;
- enum dma_data_direction dir;
-
- /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
- /* liobn, ioba, tce); */
-
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
- stt = kvmppc_find_table(vcpu->kvm, liobn);
- if (!stt)
- return H_TOO_HARD;
-
- ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
- if (ret != H_SUCCESS)
- return ret;
-
- ret = kvmppc_rm_tce_validate(stt, tce);
- if (ret != H_SUCCESS)
- return ret;
-
- dir = iommu_tce_direction(tce);
- if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
- return H_PARAMETER;
-
- entry = ioba >> stt->page_shift;
-
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- if (dir == DMA_NONE)
- ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
- stit->tbl, entry);
- else
- ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
- stit->tbl, entry, ua, dir);
-
- iommu_tce_kill_rm(stit->tbl, entry, 1);
-
- if (ret != H_SUCCESS) {
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
- return ret;
- }
- }
-
- kvmppc_rm_tce_put(stt, entry, tce);
-
- return H_SUCCESS;
-}
-
-static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
- unsigned long ua, unsigned long *phpa)
-{
- pte_t *ptep, pte;
- unsigned shift = 0;
-
- /*
- * Called in real mode with MSR_EE = 0. We are safe here.
- * It is ok to do the lookup with arch.pgdir here, because
- * we are doing this on secondary cpus and current task there
- * is not the hypervisor. Also this is safe against THP in the
- * host, because an IPI to primary thread will wait for the secondary
- * to exit which will agains result in the below page table walk
- * to finish.
- */
- ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
- if (!ptep || !pte_present(*ptep))
- return -ENXIO;
- pte = *ptep;
-
- if (!shift)
- shift = PAGE_SHIFT;
-
- /* Avoid handling anything potentially complicated in realmode */
- if (shift > PAGE_SHIFT)
- return -EAGAIN;
-
- if (!pte_young(pte))
- return -EAGAIN;
-
- *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
- (ua & ~PAGE_MASK);
-
- return 0;
-}
-
-long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
- unsigned long liobn, unsigned long ioba,
- unsigned long tce_list, unsigned long npages)
-{
- struct kvmppc_spapr_tce_table *stt;
- long i, ret = H_SUCCESS;
- unsigned long tces, entry, ua = 0;
- unsigned long *rmap = NULL;
- bool prereg = false;
- struct kvmppc_spapr_tce_iommu_table *stit;
-
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
- stt = kvmppc_find_table(vcpu->kvm, liobn);
- if (!stt)
- return H_TOO_HARD;
-
- entry = ioba >> stt->page_shift;
- /*
- * The spec says that the maximum size of the list is 512 TCEs
- * so the whole table addressed resides in 4K page
- */
- if (npages > 512)
- return H_PARAMETER;
-
- if (tce_list & (SZ_4K - 1))
- return H_PARAMETER;
-
- ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
- if (ret != H_SUCCESS)
- return ret;
-
- if (mm_iommu_preregistered(vcpu->kvm->mm)) {
- /*
- * We get here if guest memory was pre-registered which
- * is normally VFIO case and gpa->hpa translation does not
- * depend on hpt.
- */
- struct mm_iommu_table_group_mem_t *mem;
-
- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
- return H_TOO_HARD;
-
- mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
- if (mem)
- prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
- IOMMU_PAGE_SHIFT_4K, &tces) == 0;
- }
-
- if (!prereg) {
- /*
- * This is usually a case of a guest with emulated devices only
- * when TCE list is not in preregistered memory.
- * We do not require memory to be preregistered in this case
- * so lock rmap and do __find_linux_pte_or_hugepte().
- */
- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
- return H_TOO_HARD;
-
- rmap = (void *) vmalloc_to_phys(rmap);
- if (WARN_ON_ONCE_RM(!rmap))
- return H_TOO_HARD;
-
- /*
- * Synchronize with the MMU notifier callbacks in
- * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
- * While we have the rmap lock, code running on other CPUs
- * cannot finish unmapping the host real page that backs
- * this guest real page, so we are OK to access the host
- * real page.
- */
- lock_rmap(rmap);
- if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
- ret = H_TOO_HARD;
- goto unlock_exit;
- }
- }
-
- for (i = 0; i < npages; ++i) {
- unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
-
- ret = kvmppc_rm_tce_validate(stt, tce);
- if (ret != H_SUCCESS)
- goto unlock_exit;
- }
-
- for (i = 0; i < npages; ++i) {
- unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
-
- ua = 0;
- if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
- ret = H_PARAMETER;
- goto invalidate_exit;
- }
-
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
- stit->tbl, entry + i, ua,
- iommu_tce_direction(tce));
-
- if (ret != H_SUCCESS) {
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
- entry);
- goto invalidate_exit;
- }
- }
-
- kvmppc_rm_tce_put(stt, entry + i, tce);
- }
-
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill_rm(stit->tbl, entry, npages);
-
-unlock_exit:
- if (rmap)
- unlock_rmap(rmap);
-
- return ret;
-}
-
-long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
- unsigned long liobn, unsigned long ioba,
- unsigned long tce_value, unsigned long npages)
-{
- struct kvmppc_spapr_tce_table *stt;
- long i, ret;
- struct kvmppc_spapr_tce_iommu_table *stit;
-
- /* For radix, we might be in virtual mode, so punt */
- if (kvm_is_radix(vcpu->kvm))
- return H_TOO_HARD;
-
- stt = kvmppc_find_table(vcpu->kvm, liobn);
- if (!stt)
- return H_TOO_HARD;
-
- ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
- if (ret != H_SUCCESS)
- return ret;
-
- /* Check permission bits only to allow userspace poison TCE for debug */
- if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
- return H_PARAMETER;
-
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- unsigned long entry = ioba >> stt->page_shift;
-
- for (i = 0; i < npages; ++i) {
- ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
- stit->tbl, entry + i);
-
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
- goto invalidate_exit;
-
- WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
- }
- }
-
- for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
- kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
-
-invalidate_exit:
- list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
- iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
-
- return ret;
-}
-
-/* This can be called in either virtual mode or real mode */
-long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
- unsigned long ioba)
-{
- struct kvmppc_spapr_tce_table *stt;
- long ret;
- unsigned long idx;
- struct page *page;
- u64 *tbl;
-
- stt = kvmppc_find_table(vcpu->kvm, liobn);
- if (!stt)
- return H_TOO_HARD;
-
- ret = kvmppc_ioba_validate(stt, ioba, 1);
- if (ret != H_SUCCESS)
- return ret;
-
- idx = (ioba >> stt->page_shift) - stt->offset;
- page = stt->pages[idx / TCES_PER_PAGE];
- if (!page) {
- vcpu->arch.regs.gpr[4] = 0;
- return H_SUCCESS;
- }
- tbl = (u64 *)page_address(page);
-
- vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
-
- return H_SUCCESS;
-}
-EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
-
-#endif /* KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index dad71d276b91..5bbfb2eed127 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -61,10 +61,6 @@
#define SPRN_GQR6 918
#define SPRN_GQR7 919
-/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
- * function pointers, so let's just disable the define. */
-#undef mfsrin
-
enum priv_level {
PRIV_PROBLEM = 0,
PRIV_SUPER = 1,
@@ -235,7 +231,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
#endif
-int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_core_emulate_op_pr(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
@@ -272,7 +268,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
/*
* add rules to fit in ISA specification regarding TM
- * state transistion in TM disable/Suspended state,
+ * state transition in TM disable/Suspended state,
* and target TM state is TM inactive(00) state. (the
* change should be suppressed).
*/
@@ -371,13 +367,13 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
break;
- run->papr_hcall.nr = cmd;
+ vcpu->run->papr_hcall.nr = cmd;
for (i = 0; i < 9; ++i) {
ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
- run->papr_hcall.args[i] = gpr;
+ vcpu->run->papr_hcall.args[i] = gpr;
}
- run->exit_reason = KVM_EXIT_PAPR_HCALL;
+ vcpu->run->exit_reason = KVM_EXIT_PAPR_HCALL;
vcpu->arch.hcall_needed = 1;
emulated = EMULATE_EXIT_USER;
break;
@@ -629,7 +625,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
if (emulated == EMULATE_FAIL)
- emulated = kvmppc_emulate_paired_single(run, vcpu);
+ emulated = kvmppc_emulate_paired_single(vcpu);
return emulated;
}
@@ -840,6 +836,9 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_MMCR1:
case SPRN_MMCR2:
case SPRN_UMMCR2:
+ case SPRN_UAMOR:
+ case SPRN_IAMR:
+ case SPRN_AMR:
#endif
break;
unprivileged:
@@ -1004,6 +1003,9 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
case SPRN_MMCR2:
case SPRN_UMMCR2:
case SPRN_TIR:
+ case SPRN_UAMOR:
+ case SPRN_IAMR:
+ case SPRN_AMR:
#endif
*spr_val = 0;
break;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2cefd071b848..6ba68dd6190b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -42,6 +42,7 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/of.h>
+#include <linux/irqdomain.h>
#include <asm/ftrace.h>
#include <asm/reg.h>
@@ -53,11 +54,13 @@
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/lppaca.h>
+#include <asm/pmc.h>
#include <asm/processor.h>
#include <asm/cputhreads.h>
#include <asm/page.h>
@@ -72,11 +75,13 @@
#include <asm/xics.h>
#include <asm/xive.h>
#include <asm/hw_breakpoint.h>
-#include <asm/kvm_host.h>
#include <asm/kvm_book3s_uvmem.h>
#include <asm/ultravisor.h>
+#include <asm/dtl.h>
+#include <asm/plpar_wrappers.h>
#include "book3s.h"
+#include "book3s_hv.h"
#define CREATE_TRACE_POINTS
#include "trace_hv.h"
@@ -102,16 +107,12 @@ static int target_smt_mode;
module_param(target_smt_mode, int, 0644);
MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
-static bool indep_threads_mode = true;
-module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
-
static bool one_vm_per_core;
module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)");
+MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)");
#ifdef CONFIG_KVM_XICS
-static struct kernel_param_ops module_param_ops = {
+static const struct kernel_param_ops module_param_ops = {
.set = param_set_int,
.get = param_get_int,
};
@@ -128,14 +129,6 @@ static bool nested = true;
module_param(nested, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
-static inline bool nesting_enabled(struct kvm *kvm)
-{
- return kvm->arch.nested_enable && kvm_is_radix(kvm);
-}
-
-/* If set, the threads on each CPU core have to be in the same MMU mode */
-static bool no_mixing_hpt_and_radix;
-
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
/*
@@ -231,13 +224,18 @@ static bool kvmppc_ipi_thread(int cpu)
static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
{
int cpu;
- struct swait_queue_head *wqp;
+ struct rcuwait *waitp;
- wqp = kvm_arch_vcpu_wq(vcpu);
- if (swq_has_sleeper(wqp)) {
- swake_up_one(wqp);
- ++vcpu->stat.halt_wakeup;
- }
+ /*
+ * rcuwait_wake_up contains smp_mb() which orders prior stores that
+ * create pending work vs below loads of cpu fields. The other side
+ * is the barrier in vcpu run that orders setting the cpu fields vs
+ * testing for pending work.
+ */
+
+ waitp = kvm_arch_vcpu_get_wait(vcpu);
+ if (rcuwait_wake_up(waitp))
+ ++vcpu->stat.generic.halt_wakeup;
cpu = READ_ONCE(vcpu->arch.thread_cpu);
if (cpu >= 0 && kvmppc_ipi_thread(cpu))
@@ -251,6 +249,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
/*
* We use the vcpu_load/put functions to measure stolen time.
+ *
* Stolen time is counted as time when either the vcpu is able to
* run as part of a virtual core, but the task running the vcore
* is preempted or sleeping, or when the vcpu needs something done
@@ -280,24 +279,34 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
* lock. The stolen times are measured in units of timebase ticks.
* (Note that the != TB_NIL checks below are purely defensive;
* they should never fail.)
+ *
+ * The POWER9 path is simpler, one vcpu per virtual core so the
+ * former case does not exist. If a vcpu is preempted when it is
+ * BUSY_IN_HOST and not ceded or otherwise blocked, then accumulate
+ * the stolen cycles in busy_stolen. RUNNING is not a preemptible
+ * state in the P9 path.
*/
-static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
+static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb)
{
unsigned long flags;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
spin_lock_irqsave(&vc->stoltb_lock, flags);
- vc->preempt_tb = mftb();
+ vc->preempt_tb = tb;
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
}
-static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
+static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc, u64 tb)
{
unsigned long flags;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
spin_lock_irqsave(&vc->stoltb_lock, flags);
if (vc->preempt_tb != TB_NIL) {
- vc->stolen_tb += mftb() - vc->preempt_tb;
+ vc->stolen_tb += tb - vc->preempt_tb;
vc->preempt_tb = TB_NIL;
}
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
@@ -307,6 +316,18 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
+ u64 now;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (vcpu->arch.busy_preempt != TB_NIL) {
+ WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST);
+ vc->stolen_tb += mftb() - vcpu->arch.busy_preempt;
+ vcpu->arch.busy_preempt = TB_NIL;
+ }
+ return;
+ }
+
+ now = mftb();
/*
* We can test vc->runner without taking the vcore lock,
@@ -315,12 +336,12 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
* ever sets it to NULL.
*/
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
- kvmppc_core_end_stolen(vc);
+ kvmppc_core_end_stolen(vc, now);
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
vcpu->arch.busy_preempt != TB_NIL) {
- vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
+ vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt;
vcpu->arch.busy_preempt = TB_NIL;
}
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
@@ -330,13 +351,32 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
+ u64 now;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * In the P9 path, RUNNABLE is not preemptible
+ * (nor takes host interrupts)
+ */
+ WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE);
+ /*
+ * Account stolen time when preempted while the vcpu task is
+ * running in the kernel (but not in qemu, which is INACTIVE).
+ */
+ if (task_is_running(current) &&
+ vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
+ vcpu->arch.busy_preempt = mftb();
+ return;
+ }
+
+ now = mftb();
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
- kvmppc_core_start_stolen(vc);
+ kvmppc_core_start_stolen(vc, now);
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
- vcpu->arch.busy_preempt = mftb();
+ vcpu->arch.busy_preempt = now;
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
}
@@ -346,7 +386,7 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
}
/* Dummy value used in computing PCR value below */
-#define PCR_ARCH_300 (PCR_ARCH_207 << 1)
+#define PCR_ARCH_31 (PCR_ARCH_300 << 1)
static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
@@ -354,7 +394,9 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
struct kvmppc_vcore *vc = vcpu->arch.vcore;
/* We can (emulate) our own architecture version and anything older */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ host_pcr_bit = PCR_ARCH_31;
+ else if (cpu_has_feature(CPU_FTR_ARCH_300))
host_pcr_bit = PCR_ARCH_300;
else if (cpu_has_feature(CPU_FTR_ARCH_207S))
host_pcr_bit = PCR_ARCH_207;
@@ -380,6 +422,9 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
case PVR_ARCH_300:
guest_pcr_bit = PCR_ARCH_300;
break;
+ case PVR_ARCH_31:
+ guest_pcr_bit = PCR_ARCH_31;
+ break;
default:
return -EINVAL;
}
@@ -676,6 +721,8 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
u64 p;
unsigned long flags;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
spin_lock_irqsave(&vc->stoltb_lock, flags);
p = vc->stolen_tb;
if (vc->vcore_state != VCORE_INACTIVE &&
@@ -685,19 +732,55 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
return p;
}
-static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
- struct kvmppc_vcore *vc)
+static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
+ struct lppaca *vpa,
+ unsigned int pcpu, u64 now,
+ unsigned long stolen)
{
struct dtl_entry *dt;
+
+ dt = vcpu->arch.dtl_ptr;
+
+ if (!dt)
+ return;
+
+ dt->dispatch_reason = 7;
+ dt->preempt_reason = 0;
+ dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid);
+ dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
+ dt->ready_to_enqueue_time = 0;
+ dt->waiting_to_ready_time = 0;
+ dt->timebase = cpu_to_be64(now);
+ dt->fault_addr = 0;
+ dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
+ dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
+
+ ++dt;
+ if (dt == vcpu->arch.dtl.pinned_end)
+ dt = vcpu->arch.dtl.pinned_addr;
+ vcpu->arch.dtl_ptr = dt;
+ /* order writing *dt vs. writing vpa->dtl_idx */
+ smp_wmb();
+ vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
+
+ /* vcpu->arch.dtl.dirty is set by the caller */
+}
+
+static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu,
+ struct kvmppc_vcore *vc)
+{
struct lppaca *vpa;
unsigned long stolen;
unsigned long core_stolen;
u64 now;
unsigned long flags;
- dt = vcpu->arch.dtl_ptr;
vpa = vcpu->arch.vpa.pinned_addr;
+ if (!vpa)
+ return;
+
now = mftb();
+
core_stolen = vcore_stolen_time(vc, now);
stolen = core_stolen - vcpu->arch.stolen_logged;
vcpu->arch.stolen_logged = core_stolen;
@@ -705,23 +788,35 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
stolen += vcpu->arch.busy_stolen;
vcpu->arch.busy_stolen = 0;
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
- if (!dt || !vpa)
+
+ vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen);
+
+ __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, stolen);
+
+ vcpu->arch.vpa.dirty = true;
+}
+
+static void kvmppc_update_vpa_dispatch_p9(struct kvm_vcpu *vcpu,
+ struct kvmppc_vcore *vc,
+ u64 now)
+{
+ struct lppaca *vpa;
+ unsigned long stolen;
+ unsigned long stolen_delta;
+
+ vpa = vcpu->arch.vpa.pinned_addr;
+ if (!vpa)
return;
- memset(dt, 0, sizeof(struct dtl_entry));
- dt->dispatch_reason = 7;
- dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
- dt->timebase = cpu_to_be64(now + vc->tb_offset);
- dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
- dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
- dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
- ++dt;
- if (dt == vcpu->arch.dtl.pinned_end)
- dt = vcpu->arch.dtl.pinned_addr;
- vcpu->arch.dtl_ptr = dt;
- /* order writing *dt vs. writing vpa->dtl_idx */
- smp_wmb();
- vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
- vcpu->arch.dtl.dirty = true;
+
+ stolen = vc->stolen_tb;
+ stolen_delta = stolen - vcpu->arch.stolen_logged;
+ vcpu->arch.stolen_logged = stolen;
+
+ vpa->enqueue_dispatch_tb = cpu_to_be64(stolen);
+
+ __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta);
+
+ vcpu->arch.vpa.dirty = true;
}
/* See if there is a doorbell interrupt pending for a vcpu */
@@ -732,6 +827,8 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
if (vcpu->arch.doorbell_request)
return true;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return false;
/*
* Ensure that the read of vcore->dpdes comes after the read
* of vcpu->doorbell_request. This barrier matches the
@@ -770,7 +867,7 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
return H_P3;
vcpu->arch.ciabr = value1;
return H_SUCCESS;
- case H_SET_MODE_RESOURCE_SET_DAWR:
+ case H_SET_MODE_RESOURCE_SET_DAWR0:
if (!kvmppc_power8_compatible(vcpu))
return H_P2;
if (!ppc_breakpoint_available())
@@ -779,12 +876,32 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP)
return H_P4;
- vcpu->arch.dawr = value1;
- vcpu->arch.dawrx = value2;
+ vcpu->arch.dawr0 = value1;
+ vcpu->arch.dawrx0 = value2;
+ return H_SUCCESS;
+ case H_SET_MODE_RESOURCE_SET_DAWR1:
+ if (!kvmppc_power8_compatible(vcpu))
+ return H_P2;
+ if (!ppc_breakpoint_available())
+ return H_P2;
+ if (!cpu_has_feature(CPU_FTR_DAWR1))
+ return H_P2;
+ if (!vcpu->kvm->arch.dawr1_enabled)
+ return H_FUNCTION;
+ if (mflags)
+ return H_UNSUPPORTED_FLAG_START;
+ if (value2 & DABRX_HYP)
+ return H_P4;
+ vcpu->arch.dawr1 = value1;
+ vcpu->arch.dawrx1 = value2;
return H_SUCCESS;
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
- /* KVM does not support mflags=2 (AIL=2) */
- if (mflags != 0 && mflags != 3)
+ /*
+ * KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved.
+ * Keep this in synch with kvmppc_filter_guest_lpcr_hv.
+ */
+ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
+ kvmhv_vcpu_is_radix(vcpu) && mflags == 3)
return H_UNSUPPORTED_FLAG_START;
return H_TOO_HARD;
default:
@@ -876,14 +993,19 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
* H_SUCCESS if the source vcore wasn't idle (e.g. if it may
* have useful work to do and should not confer) so we don't
* recheck that here.
+ *
+ * In the case of the P9 single vcpu per vcore case, the real
+ * mode handler is not called but no other threads are in the
+ * source vcore.
*/
-
- spin_lock(&vcore->lock);
- if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
- vcore->vcore_state != VCORE_INACTIVE &&
- vcore->runner)
- target = vcore->runner;
- spin_unlock(&vcore->lock);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ spin_lock(&vcore->lock);
+ if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
+ vcore->vcore_state != VCORE_INACTIVE &&
+ vcore->runner)
+ target = vcore->runner;
+ spin_unlock(&vcore->lock);
+ }
return kvm_vcpu_yield_to(target);
}
@@ -901,8 +1023,71 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
return yield_count;
}
+/*
+ * H_RPT_INVALIDATE hcall handler for nested guests.
+ *
+ * Handles only nested process-scoped invalidation requests in L0.
+ */
+static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
+{
+ unsigned long type = kvmppc_get_gpr(vcpu, 6);
+ unsigned long pid, pg_sizes, start, end;
+
+ /*
+ * The partition-scoped invalidations aren't handled here in L0.
+ */
+ if (type & H_RPTI_TYPE_NESTED)
+ return RESUME_HOST;
+
+ pid = kvmppc_get_gpr(vcpu, 4);
+ pg_sizes = kvmppc_get_gpr(vcpu, 7);
+ start = kvmppc_get_gpr(vcpu, 8);
+ end = kvmppc_get_gpr(vcpu, 9);
+
+ do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
+ type, pg_sizes, start, end);
+
+ kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ return RESUME_GUEST;
+}
+
+static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
+ unsigned long id, unsigned long target,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
+{
+ if (!kvm_is_radix(vcpu->kvm))
+ return H_UNSUPPORTED;
+
+ if (end < start)
+ return H_P5;
+
+ /*
+ * Partition-scoped invalidation for nested guests.
+ */
+ if (type & H_RPTI_TYPE_NESTED) {
+ if (!nesting_enabled(vcpu->kvm))
+ return H_FUNCTION;
+
+ /* Support only cores as target */
+ if (target != H_RPTI_TARGET_CMMU)
+ return H_P2;
+
+ return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
+ start, end);
+ }
+
+ /*
+ * Process-scoped invalidation for L1 guests.
+ */
+ do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
+ type, pg_sizes, start, end);
+ return H_SUCCESS;
+}
+
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
unsigned long req = kvmppc_get_gpr(vcpu, 3);
unsigned long target, ret = H_SUCCESS;
int yield_count;
@@ -914,17 +1099,63 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;
switch (req) {
+ case H_REMOVE:
+ ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_ENTER:
+ ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_READ:
+ ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_CLEAR_MOD:
+ ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_CLEAR_REF:
+ ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_PROTECT:
+ ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_BULK_REMOVE:
+ ret = kvmppc_h_bulk_remove(vcpu);
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+
case H_CEDE:
break;
case H_PROD:
target = kvmppc_get_gpr(vcpu, 4);
- tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ tvcpu = kvmppc_find_vcpu(kvm, target);
if (!tvcpu) {
ret = H_PARAMETER;
break;
}
tvcpu->arch.prodded = 1;
- smp_mb();
+ smp_mb(); /* This orders prodded store vs ceded load */
if (tvcpu->arch.ceded)
kvmppc_fast_vcpu_kick_hv(tvcpu);
break;
@@ -932,7 +1163,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
target = kvmppc_get_gpr(vcpu, 4);
if (target == -1)
break;
- tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
+ tvcpu = kvmppc_find_vcpu(kvm, target);
if (!tvcpu) {
ret = H_PARAMETER;
break;
@@ -948,12 +1179,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
kvmppc_get_gpr(vcpu, 6));
break;
case H_RTAS:
- if (list_empty(&vcpu->kvm->arch.rtas_tokens))
+ if (list_empty(&kvm->arch.rtas_tokens))
return RESUME_HOST;
- idx = srcu_read_lock(&vcpu->kvm->srcu);
+ idx = srcu_read_lock(&kvm->srcu);
rc = kvmppc_rtas_hcall(vcpu);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ srcu_read_unlock(&kvm->srcu, idx);
if (rc == -ENOENT)
return RESUME_HOST;
@@ -1034,18 +1265,26 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
break;
#endif
case H_RANDOM:
- if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
+ if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1))
ret = H_HARDWARE;
break;
+ case H_RPT_INVALIDATE:
+ ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7),
+ kvmppc_get_gpr(vcpu, 8),
+ kvmppc_get_gpr(vcpu, 9));
+ break;
case H_SET_PARTITION_TABLE:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_set_partition_table(vcpu);
break;
case H_ENTER_NESTED:
ret = H_FUNCTION;
- if (!nesting_enabled(vcpu->kvm))
+ if (!nesting_enabled(kvm))
break;
ret = kvmhv_enter_nested_guest(vcpu);
if (ret == H_INTERRUPT) {
@@ -1060,12 +1299,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
break;
case H_TLB_INVALIDATE:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_do_nested_tlbie(vcpu);
break;
case H_COPY_TOFROM_GUEST:
ret = H_FUNCTION;
- if (nesting_enabled(vcpu->kvm))
+ if (nesting_enabled(kvm))
ret = kvmhv_copy_tofrom_guest_nested(vcpu);
break;
case H_PAGE_INIT:
@@ -1074,42 +1313,59 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_PAGE_IN:
- ret = kvmppc_h_svm_page_in(vcpu->kvm,
- kvmppc_get_gpr(vcpu, 4),
- kvmppc_get_gpr(vcpu, 5),
- kvmppc_get_gpr(vcpu, 6));
+ ret = H_UNSUPPORTED;
+ if (kvmppc_get_srr1(vcpu) & MSR_S)
+ ret = kvmppc_h_svm_page_in(kvm,
+ kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_PAGE_OUT:
- ret = kvmppc_h_svm_page_out(vcpu->kvm,
- kvmppc_get_gpr(vcpu, 4),
- kvmppc_get_gpr(vcpu, 5),
- kvmppc_get_gpr(vcpu, 6));
+ ret = H_UNSUPPORTED;
+ if (kvmppc_get_srr1(vcpu) & MSR_S)
+ ret = kvmppc_h_svm_page_out(kvm,
+ kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
break;
case H_SVM_INIT_START:
- ret = kvmppc_h_svm_init_start(vcpu->kvm);
+ ret = H_UNSUPPORTED;
+ if (kvmppc_get_srr1(vcpu) & MSR_S)
+ ret = kvmppc_h_svm_init_start(kvm);
break;
case H_SVM_INIT_DONE:
- ret = kvmppc_h_svm_init_done(vcpu->kvm);
+ ret = H_UNSUPPORTED;
+ if (kvmppc_get_srr1(vcpu) & MSR_S)
+ ret = kvmppc_h_svm_init_done(kvm);
break;
case H_SVM_INIT_ABORT:
- ret = kvmppc_h_svm_init_abort(vcpu->kvm);
+ /*
+ * Even if that call is made by the Ultravisor, the SSR1 value
+ * is the guest context one, with the secure bit clear as it has
+ * not yet been secured. So we can't check it here.
+ * Instead the kvm->arch.secure_guest flag is checked inside
+ * kvmppc_h_svm_init_abort().
+ */
+ ret = kvmppc_h_svm_init_abort(kvm);
break;
default:
return RESUME_HOST;
}
+ WARN_ON_ONCE(ret == H_TOO_HARD);
kvmppc_set_gpr(vcpu, 3, ret);
vcpu->arch.hcall_needed = 0;
return RESUME_GUEST;
}
/*
- * Handle H_CEDE in the nested virtualization case where we haven't
- * called the real-mode hcall handlers in book3s_hv_rmhandlers.S.
+ * Handle H_CEDE in the P9 path where we don't call the real-mode hcall
+ * handlers in book3s_hv_rmhandlers.S.
+ *
* This has to be done early, not in kvmppc_pseries_do_hcall(), so
* that the cede logic in kvmppc_run_single_vcpu() works properly.
*/
-static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
+static void kvmppc_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.shregs.msr |= MSR_EE;
vcpu->arch.ceded = 1;
@@ -1129,6 +1385,12 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_CONFER:
case H_REGISTER_VPA:
case H_SET_MODE:
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ case H_GET_TCE:
+ case H_PUT_TCE:
+ case H_PUT_TCE_INDIRECT:
+ case H_STUFF_TCE:
+#endif
case H_LOGICAL_CI_LOAD:
case H_LOGICAL_CI_STORE:
#ifdef CONFIG_KVM_XICS
@@ -1140,6 +1402,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
case H_XIRR_X:
#endif
case H_PAGE_INIT:
+ case H_RPT_INVALIDATE:
return 1;
}
@@ -1147,8 +1410,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
return kvmppc_hcall_impl_hv_realmode(cmd);
}
-static int kvmppc_emulate_debug_inst(struct kvm_run *run,
- struct kvm_vcpu *vcpu)
+static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu)
{
u32 last_inst;
@@ -1162,8 +1424,8 @@ static int kvmppc_emulate_debug_inst(struct kvm_run *run,
}
if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.address = kvmppc_get_pc(vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
return RESUME_HOST;
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
@@ -1224,9 +1486,9 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
switch (get_xop(inst)) {
case OP_31_XOP_MSGSNDP:
arg = kvmppc_get_gpr(vcpu, rb);
- if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
+ if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER)
break;
- arg &= 0x3f;
+ arg &= 0x7f;
if (arg >= kvm->arch.emul_smt_mode)
break;
tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
@@ -1239,7 +1501,7 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
break;
case OP_31_XOP_MSGCLRP:
arg = kvmppc_get_gpr(vcpu, rb);
- if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
+ if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER)
break;
vcpu->arch.vcore->dpdes = 0;
vcpu->arch.doorbell_request = 0;
@@ -1264,9 +1526,47 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+/*
+ * If the lppaca had pmcregs_in_use clear when we exited the guest, then
+ * HFSCR_PM is cleared for next entry. If the guest then tries to access
+ * the PMU SPRs, we get this facility unavailable interrupt. Putting HFSCR_PM
+ * back in the guest HFSCR will cause the next entry to load the PMU SPRs and
+ * allow the guest access to continue.
+ */
+static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
+ return EMULATE_FAIL;
+
+ vcpu->arch.hfscr |= HFSCR_PM;
+
+ return RESUME_GUEST;
+}
+
+static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
+ return EMULATE_FAIL;
+
+ vcpu->arch.hfscr |= HFSCR_EBB;
+
+ return RESUME_GUEST;
+}
+
+static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
+ return EMULATE_FAIL;
+
+ vcpu->arch.hfscr |= HFSCR_TM;
+
+ return RESUME_GUEST;
+}
+
+static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
+ struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
vcpu->stat.sum_exits++;
@@ -1293,6 +1593,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1;
switch (vcpu->arch.trap) {
/* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER:
+ WARN_ON_ONCE(1); /* Should never happen */
+ vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
+ fallthrough;
case BOOK3S_INTERRUPT_HV_DECREMENTER:
vcpu->stat.dec_exits++;
r = RESUME_GUEST;
@@ -1309,9 +1613,15 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_SYSTEM_RESET:
r = RESUME_GUEST;
break;
- case BOOK3S_INTERRUPT_MACHINE_CHECK:
- /* Print the MCE event to host console. */
- machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
+ case BOOK3S_INTERRUPT_MACHINE_CHECK: {
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
+ /*
+ * Print the MCE event to host console. Ratelimit so the guest
+ * can't flood the host log.
+ */
+ if (__ratelimit(&rs))
+ machine_check_print_event_info(&vcpu->arch.mce_evt,false, true);
/*
* If the guest can do FWNMI, exit to userspace so it can
@@ -1339,6 +1649,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_HOST;
break;
+ }
case BOOK3S_INTERRUPT_PROGRAM:
{
ulong flags;
@@ -1355,13 +1666,39 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
case BOOK3S_INTERRUPT_SYSCALL:
{
- /* hcall - punt to userspace */
int i;
- /* hypercall with MSR_PR has already been handled in rmode,
- * and never reaches here.
- */
+ if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
+ /*
+ * Guest userspace executed sc 1. This can only be
+ * reached by the P9 path because the old path
+ * handles this case in realmode hcall handlers.
+ */
+ if (!kvmhv_vcpu_is_radix(vcpu)) {
+ /*
+ * A guest could be running PR KVM, so this
+ * may be a PR KVM hcall. It must be reflected
+ * to the guest kernel as a sc interrupt.
+ */
+ kvmppc_core_queue_syscall(vcpu);
+ } else {
+ /*
+ * Radix guests can not run PR KVM or nested HV
+ * hash guests which might run PR KVM, so this
+ * is always a privilege fault. Send a program
+ * check to guest kernel.
+ */
+ kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
+ }
+ r = RESUME_GUEST;
+ break;
+ }
+ /*
+ * hcall - gather args and set exit_reason. This will next be
+ * handled by kvmppc_pseries_do_hcall which may be able to deal
+ * with it and resume guest, or may punt to userspace.
+ */
run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
for (i = 0; i < 9; ++i)
run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
@@ -1374,20 +1711,103 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
* We get these next two if the guest accesses a page which it thinks
* it has mapped but which is not actually present, either because
* it is for an emulated I/O device or because the corresonding
- * host page has been paged out. Any other HDSI/HISI interrupts
- * have been handled already.
+ * host page has been paged out.
+ *
+ * Any other HDSI/HISI interrupts have been handled already for P7/8
+ * guests. For POWER9 hash guests not using rmhandlers, basic hash
+ * fault handling is done here.
*/
- case BOOK3S_INTERRUPT_H_DATA_STORAGE:
- r = RESUME_PAGE_FAULT;
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
+ unsigned long vsid;
+ long err;
+
+ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
+ unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) {
+ r = RESUME_GUEST; /* Just retry if it's the canary */
+ break;
+ }
+
+ if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * Radix doesn't require anything, and pre-ISAv3.0 hash
+ * already attempted to handle this in rmhandlers. The
+ * hash fault handling below is v3 only (it uses ASDR
+ * via fault_gpa).
+ */
+ r = RESUME_PAGE_FAULT;
+ break;
+ }
+
+ if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
+ kvmppc_core_queue_data_storage(vcpu,
+ vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ if (!(vcpu->arch.shregs.msr & MSR_DR))
+ vsid = vcpu->kvm->arch.vrma_slb_v;
+ else
+ vsid = vcpu->arch.fault_gpa;
+
+ err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+ vsid, vcpu->arch.fault_dsisr, true);
+ if (err == 0) {
+ r = RESUME_GUEST;
+ } else if (err == -1 || err == -2) {
+ r = RESUME_PAGE_FAULT;
+ } else {
+ kvmppc_core_queue_data_storage(vcpu,
+ vcpu->arch.fault_dar, err);
+ r = RESUME_GUEST;
+ }
break;
- case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ }
+ case BOOK3S_INTERRUPT_H_INST_STORAGE: {
+ unsigned long vsid;
+ long err;
+
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
DSISR_SRR1_MATCH_64S;
- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
- vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
- r = RESUME_PAGE_FAULT;
+ if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * Radix doesn't require anything, and pre-ISAv3.0 hash
+ * already attempted to handle this in rmhandlers. The
+ * hash fault handling below is v3 only (it uses ASDR
+ * via fault_gpa).
+ */
+ if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
+ r = RESUME_PAGE_FAULT;
+ break;
+ }
+
+ if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
+ kvmppc_core_queue_inst_storage(vcpu,
+ vcpu->arch.fault_dsisr);
+ r = RESUME_GUEST;
+ break;
+ }
+
+ if (!(vcpu->arch.shregs.msr & MSR_IR))
+ vsid = vcpu->kvm->arch.vrma_slb_v;
+ else
+ vsid = vcpu->arch.fault_gpa;
+
+ err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
+ vsid, vcpu->arch.fault_dsisr, false);
+ if (err == 0) {
+ r = RESUME_GUEST;
+ } else if (err == -1) {
+ r = RESUME_PAGE_FAULT;
+ } else {
+ kvmppc_core_queue_inst_storage(vcpu, err);
+ r = RESUME_GUEST;
+ }
break;
+ }
+
/*
* This occurs if the guest executes an illegal instruction.
* If the guest debug is disabled, generate a program interrupt
@@ -1401,12 +1821,27 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
- r = kvmppc_emulate_debug_inst(run, vcpu);
+ r = kvmppc_emulate_debug_inst(vcpu);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
}
break;
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ case BOOK3S_INTERRUPT_HV_SOFTPATCH:
+ /*
+ * This occurs for various TM-related instructions that
+ * we need to emulate on POWER9 DD2.2. We have already
+ * handled the cases where the guest was in real-suspend
+ * mode and was transitioning to transactional state.
+ */
+ r = kvmhv_p9_tm_emulation(vcpu);
+ if (r != -1)
+ break;
+ fallthrough; /* go to facility unavailable handler */
+#endif
+
/*
* This occurs if the guest (kernel or userspace), does something that
* is prohibited by HFSCR.
@@ -1414,28 +1849,26 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
* to emulate.
* Otherwise, we just generate a program interrupt to the guest.
*/
- case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
+ u64 cause = vcpu->arch.hfscr >> 56;
+
r = EMULATE_FAIL;
- if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
- cpu_has_feature(CPU_FTR_ARCH_300))
- r = kvmppc_emulate_doorbell_instr(vcpu);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (cause == FSCR_MSGP_LG)
+ r = kvmppc_emulate_doorbell_instr(vcpu);
+ if (cause == FSCR_PM_LG)
+ r = kvmppc_pmu_unavailable(vcpu);
+ if (cause == FSCR_EBB_LG)
+ r = kvmppc_ebb_unavailable(vcpu);
+ if (cause == FSCR_TM_LG)
+ r = kvmppc_tm_unavailable(vcpu);
+ }
if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
}
break;
-
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- case BOOK3S_INTERRUPT_HV_SOFTPATCH:
- /*
- * This occurs for various TM-related instructions that
- * we need to emulate on POWER9 DD2.2. We have already
- * handled the cases where the guest was in real-suspend
- * mode and was transitioning to transactional state.
- */
- r = kvmhv_p9_tm_emulation(vcpu);
- break;
-#endif
+ }
case BOOK3S_INTERRUPT_HV_RM_HARD:
r = RESUME_PASSTHROUGH;
@@ -1453,7 +1886,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r;
}
-static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
{
int r;
int srcu_idx;
@@ -1491,6 +1924,12 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
+ /* These need to go to the nested HV */
+ case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER:
+ vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
+ vcpu->stat.dec_exits++;
+ r = RESUME_HOST;
+ break;
/* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
case BOOK3S_INTERRUPT_HMI:
case BOOK3S_INTERRUPT_PERFMON:
@@ -1498,11 +1937,16 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_MACHINE_CHECK:
+ {
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
/* Pass the machine check to the L1 guest */
r = RESUME_HOST;
/* Print the MCE event to host console. */
- machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
+ if (__ratelimit(&rs))
+ machine_check_print_event_info(&vcpu->arch.mce_evt, false, true);
break;
+ }
/*
* We get these next two if the guest accesses a page which it thinks
* it has mapped but which is not actually present, either because
@@ -1511,7 +1955,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/
case BOOK3S_INTERRUPT_H_DATA_STORAGE:
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmhv_nested_page_fault(run, vcpu);
+ r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break;
case BOOK3S_INTERRUPT_H_INST_STORAGE:
@@ -1521,7 +1965,7 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmhv_nested_page_fault(run, vcpu);
+ r = kvmhv_nested_page_fault(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
break;
@@ -1534,15 +1978,64 @@ static int kvmppc_handle_nested_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
* mode and was transitioning to transactional state.
*/
r = kvmhv_p9_tm_emulation(vcpu);
- break;
+ if (r != -1)
+ break;
+ fallthrough; /* go to facility unavailable handler */
#endif
+ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
+ u64 cause = vcpu->arch.hfscr >> 56;
+
+ /*
+ * Only pass HFU interrupts to the L1 if the facility is
+ * permitted but disabled by the L1's HFSCR, otherwise
+ * the interrupt does not make sense to the L1 so turn
+ * it into a HEAI.
+ */
+ if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
+ (vcpu->arch.nested_hfscr & (1UL << cause))) {
+ vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
+
+ /*
+ * If the fetch failed, return to guest and
+ * try executing it again.
+ */
+ r = kvmppc_get_last_inst(vcpu, INST_GENERIC,
+ &vcpu->arch.emul_inst);
+ if (r != EMULATE_DONE)
+ r = RESUME_GUEST;
+ else
+ r = RESUME_HOST;
+ } else {
+ r = RESUME_HOST;
+ }
+
+ break;
+ }
+
case BOOK3S_INTERRUPT_HV_RM_HARD:
vcpu->arch.trap = 0;
r = RESUME_GUEST;
if (!xics_on_xive())
kvmppc_xics_rm_complete(vcpu, 0);
break;
+ case BOOK3S_INTERRUPT_SYSCALL:
+ {
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
+
+ /*
+ * The H_RPT_INVALIDATE hcalls issued by nested
+ * guests for process-scoped invalidations when
+ * GTSE=0, are handled here in L0.
+ */
+ if (req == H_RPT_INVALIDATE) {
+ r = kvmppc_nested_h_rpt_invalidate(vcpu);
+ break;
+ }
+
+ r = RESUME_HOST;
+ break;
+ }
default:
r = RESUME_HOST;
break;
@@ -1588,6 +2081,49 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
return 0;
}
+/*
+ * Enforce limits on guest LPCR values based on hardware availability,
+ * guest configuration, and possibly hypervisor support and security
+ * concerns.
+ */
+unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr)
+{
+ /* LPCR_TC only applies to HPT guests */
+ if (kvm_is_radix(kvm))
+ lpcr &= ~LPCR_TC;
+
+ /* On POWER8 and above, userspace can modify AIL */
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ lpcr &= ~LPCR_AIL;
+ if ((lpcr & LPCR_AIL) != LPCR_AIL_3)
+ lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */
+ /*
+ * On some POWER9s we force AIL off for radix guests to prevent
+ * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to
+ * guest, which can result in Q0 translations with LPID=0 PID=PIDR to
+ * be cached, which the host TLB management does not expect.
+ */
+ if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ lpcr &= ~LPCR_AIL;
+
+ /*
+ * On POWER9, allow userspace to enable large decrementer for the
+ * guest, whether or not the host has it enabled.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ lpcr &= ~LPCR_LD;
+
+ return lpcr;
+}
+
+static void verify_lpcr(struct kvm *kvm, unsigned long lpcr)
+{
+ if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) {
+ WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n",
+ lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr));
+ }
+}
+
static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
bool preserve_top32)
{
@@ -1596,13 +2132,30 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
u64 mask;
spin_lock(&vc->lock);
+
+ /*
+ * Userspace can only modify
+ * DPFD (default prefetch depth), ILE (interrupt little-endian),
+ * TC (translation control), AIL (alternate interrupt location),
+ * LD (large decrementer).
+ * These are subject to restrictions from kvmppc_filter_lcpr_hv().
+ */
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD;
+
+ /* Broken 32-bit version of LPCR must not clear top bits */
+ if (preserve_top32)
+ mask &= 0xFFFFFFFF;
+
+ new_lpcr = kvmppc_filter_lpcr_hv(kvm,
+ (vc->lpcr & ~mask) | (new_lpcr & mask));
+
/*
* If ILE (interrupt little-endian) has changed, update the
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
*/
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.vcore != vc)
@@ -1614,25 +2167,8 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
}
}
- /*
- * Userspace can only modify DPFD (default prefetch depth),
- * ILE (interrupt little-endian) and TC (translation control).
- * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.).
- */
- mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
- if (cpu_has_feature(CPU_FTR_ARCH_207S))
- mask |= LPCR_AIL;
- /*
- * On POWER9, allow userspace to enable large decrementer for the
- * guest, whether or not the host has it enabled.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- mask |= LPCR_LD;
+ vc->lpcr = new_lpcr;
- /* Broken 32-bit version of LPCR must not clear top bits */
- if (preserve_top32)
- mask &= 0xFFFFFFFF;
- vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
spin_unlock(&vc->lock);
}
@@ -1670,10 +2206,22 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_UAMOR:
*val = get_reg_val(id, vcpu->arch.uamor);
break;
- case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
+ case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0;
*val = get_reg_val(id, vcpu->arch.mmcr[i]);
break;
+ case KVM_REG_PPC_MMCR2:
+ *val = get_reg_val(id, vcpu->arch.mmcr[2]);
+ break;
+ case KVM_REG_PPC_MMCRA:
+ *val = get_reg_val(id, vcpu->arch.mmcra);
+ break;
+ case KVM_REG_PPC_MMCRS:
+ *val = get_reg_val(id, vcpu->arch.mmcrs);
+ break;
+ case KVM_REG_PPC_MMCR3:
+ *val = get_reg_val(id, vcpu->arch.mmcr[3]);
+ break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
*val = get_reg_val(id, vcpu->arch.pmc[i]);
@@ -1689,7 +2237,13 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.sdar);
break;
case KVM_REG_PPC_SIER:
- *val = get_reg_val(id, vcpu->arch.sier);
+ *val = get_reg_val(id, vcpu->arch.sier[0]);
+ break;
+ case KVM_REG_PPC_SIER2:
+ *val = get_reg_val(id, vcpu->arch.sier[1]);
+ break;
+ case KVM_REG_PPC_SIER3:
+ *val = get_reg_val(id, vcpu->arch.sier[2]);
break;
case KVM_REG_PPC_IAMR:
*val = get_reg_val(id, vcpu->arch.iamr);
@@ -1704,17 +2258,25 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
* either vcore->dpdes or doorbell_request.
* On POWER8, doorbell_request is 0.
*/
- *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
- vcpu->arch.doorbell_request);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ *val = get_reg_val(id, vcpu->arch.doorbell_request);
+ else
+ *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
break;
case KVM_REG_PPC_VTB:
*val = get_reg_val(id, vcpu->arch.vcore->vtb);
break;
case KVM_REG_PPC_DAWR:
- *val = get_reg_val(id, vcpu->arch.dawr);
+ *val = get_reg_val(id, vcpu->arch.dawr0);
break;
case KVM_REG_PPC_DAWRX:
- *val = get_reg_val(id, vcpu->arch.dawrx);
+ *val = get_reg_val(id, vcpu->arch.dawrx0);
+ break;
+ case KVM_REG_PPC_DAWR1:
+ *val = get_reg_val(id, vcpu->arch.dawr1);
+ break;
+ case KVM_REG_PPC_DAWRX1:
+ *val = get_reg_val(id, vcpu->arch.dawrx1);
break;
case KVM_REG_PPC_CIABR:
*val = get_reg_val(id, vcpu->arch.ciabr);
@@ -1840,8 +2402,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
break;
case KVM_REG_PPC_DEC_EXPIRY:
- *val = get_reg_val(id, vcpu->arch.dec_expires +
- vcpu->arch.vcore->tb_offset);
+ *val = get_reg_val(id, vcpu->arch.dec_expires);
break;
case KVM_REG_PPC_ONLINE:
*val = get_reg_val(id, vcpu->arch.online);
@@ -1891,10 +2452,22 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_UAMOR:
vcpu->arch.uamor = set_reg_val(id, *val);
break;
- case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
+ case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0;
vcpu->arch.mmcr[i] = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_MMCR2:
+ vcpu->arch.mmcr[2] = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_MMCRA:
+ vcpu->arch.mmcra = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_MMCRS:
+ vcpu->arch.mmcrs = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_MMCR3:
+ *val = get_reg_val(id, vcpu->arch.mmcr[3]);
+ break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
vcpu->arch.pmc[i] = set_reg_val(id, *val);
@@ -1910,7 +2483,13 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.sdar = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SIER:
- vcpu->arch.sier = set_reg_val(id, *val);
+ vcpu->arch.sier[0] = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_SIER2:
+ vcpu->arch.sier[1] = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_SIER3:
+ vcpu->arch.sier[2] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_IAMR:
vcpu->arch.iamr = set_reg_val(id, *val);
@@ -1919,16 +2498,25 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.pspb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DPDES:
- vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1;
+ else
+ vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
break;
case KVM_REG_PPC_VTB:
vcpu->arch.vcore->vtb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAWR:
- vcpu->arch.dawr = set_reg_val(id, *val);
+ vcpu->arch.dawr0 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAWRX:
- vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
+ vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
+ break;
+ case KVM_REG_PPC_DAWR1:
+ vcpu->arch.dawr1 = set_reg_val(id, *val);
+ break;
+ case KVM_REG_PPC_DAWRX1:
+ vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
break;
case KVM_REG_PPC_CIABR:
vcpu->arch.ciabr = set_reg_val(id, *val);
@@ -1987,10 +2575,24 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
case KVM_REG_PPC_TB_OFFSET:
+ {
/* round up to multiple of 2^24 */
- vcpu->arch.vcore->tb_offset =
- ALIGN(set_reg_val(id, *val), 1UL << 24);
+ u64 tb_offset = ALIGN(set_reg_val(id, *val), 1UL << 24);
+
+ /*
+ * Now that we know the timebase offset, update the
+ * decrementer expiry with a guest timebase value. If
+ * the userspace does not set DEC_EXPIRY, this ensures
+ * a migrated vcpu at least starts with an expired
+ * decrementer, which is better than a large one that
+ * causes a hang.
+ */
+ if (!vcpu->arch.dec_expires && tb_offset)
+ vcpu->arch.dec_expires = get_tb() + tb_offset;
+
+ vcpu->arch.vcore->tb_offset = tb_offset;
break;
+ }
case KVM_REG_PPC_LPCR:
kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
break;
@@ -2069,8 +2671,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DEC_EXPIRY:
- vcpu->arch.dec_expires = set_reg_val(id, *val) -
- vcpu->arch.vcore->tb_offset;
+ vcpu->arch.dec_expires = set_reg_val(id, *val);
break;
case KVM_REG_PPC_ONLINE:
i = set_reg_val(id, *val);
@@ -2100,7 +2701,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*/
static int threads_per_vcore(struct kvm *kvm)
{
- if (kvm->arch.threads_indep)
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return 1;
return threads_per_subcore;
}
@@ -2116,7 +2717,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
spin_lock_init(&vcore->lock);
spin_lock_init(&vcore->stoltb_lock);
- init_swait_queue_head(&vcore->wq);
+ rcuwait_init(&vcore->wait);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
vcore->first_vcpuid = id;
@@ -2131,11 +2732,21 @@ static struct debugfs_timings_element {
const char *name;
size_t offset;
} timings[] = {
+#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
+ {"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)},
+ {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)},
+ {"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)},
+ {"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)},
+ {"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)},
+ {"hypercall", offsetof(struct kvm_vcpu, arch.hcall)},
+ {"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)},
+#else
{"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
{"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
{"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
{"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
{"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
+#endif
};
#define N_TIMINGS (ARRAY_SIZE(timings))
@@ -2252,25 +2863,18 @@ static const struct file_operations debugfs_timings_ops = {
};
/* Create a debugfs directory for the vcpu */
-static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
+static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
{
- char buf[16];
- struct kvm *kvm = vcpu->kvm;
-
- snprintf(buf, sizeof(buf), "vcpu%u", id);
- if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
- return;
- vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
- if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
- return;
- vcpu->arch.debugfs_timings =
- debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
- vcpu, &debugfs_timings_ops);
+ if (cpu_has_feature(CPU_FTR_ARCH_300) == IS_ENABLED(CONFIG_KVM_BOOK3S_HV_P9_TIMING))
+ debugfs_create_file("timings", 0444, debugfs_dentry, vcpu,
+ &debugfs_timings_ops);
+ return 0;
}
#else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
-static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
+static int kvmppc_arch_create_vcpu_debugfs_hv(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
{
+ return 0;
}
#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
@@ -2298,12 +2902,18 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
#endif
#endif
vcpu->arch.mmcr[0] = MMCR0_FC;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT;
+ vcpu->arch.mmcra = MMCRA_BHRB_DISABLE;
+ }
+
vcpu->arch.ctrl = CTRL_RUNLATCH;
/* default to host PVR, since we can't spoof it */
kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL;
+ vcpu->arch.shregs.msr = MSR_ME;
vcpu->arch.intr_msr = MSR_SF | MSR_ME;
/*
@@ -2317,12 +2927,21 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
if (cpu_has_feature(CPU_FTR_HVMODE)) {
vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
vcpu->arch.hfscr |= HFSCR_TM;
+#endif
}
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;
+ vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
+
+ /*
+ * PM, EBB, TM are demand-faulted so start with it clear.
+ */
+ vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
+
kvmppc_mmu_book3s_hv_init(vcpu);
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
@@ -2378,8 +2997,6 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
vcpu->arch.cpu_type = KVM_CPU_3S_64;
kvmppc_sanity_check(vcpu);
- debugfs_vcpu_init(vcpu, id);
-
return 0;
}
@@ -2447,13 +3064,13 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
unsigned long dec_nsec, now;
now = get_tb();
- if (now > vcpu->arch.dec_expires) {
+ if (now > kvmppc_dec_expires_host_tb(vcpu)) {
/* decrementer has already gone negative */
kvmppc_core_queue_dec(vcpu);
kvmppc_core_prepare_to_enter(vcpu);
return;
}
- dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
+ dec_nsec = tb_to_ns(kvmppc_dec_expires_host_tb(vcpu) - now);
hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
vcpu->arch.timer_running = 1;
}
@@ -2461,14 +3078,14 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
extern int __kvmppc_vcore_entry(void);
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
- struct kvm_vcpu *vcpu)
+ struct kvm_vcpu *vcpu, u64 tb)
{
u64 now;
if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
return;
spin_lock_irq(&vcpu->arch.tbacct_lock);
- now = mftb();
+ now = tb;
vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
vcpu->arch.stolen_logged;
vcpu->arch.busy_preempt = now;
@@ -2523,29 +3140,59 @@ static void kvmppc_release_hwthread(int cpu)
tpaca->kvm_hstate.kvm_split_mode = NULL;
}
+static DEFINE_PER_CPU(struct kvm *, cpu_in_guest);
+
static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
{
struct kvm_nested_guest *nested = vcpu->arch.nested;
- cpumask_t *cpu_in_guest;
+ cpumask_t *need_tlb_flush;
int i;
- cpu = cpu_first_thread_sibling(cpu);
- if (nested) {
- cpumask_set_cpu(cpu, &nested->need_tlb_flush);
- cpu_in_guest = &nested->cpu_in_guest;
- } else {
- cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
- cpu_in_guest = &kvm->arch.cpu_in_guest;
- }
+ if (nested)
+ need_tlb_flush = &nested->need_tlb_flush;
+ else
+ need_tlb_flush = &kvm->arch.need_tlb_flush;
+
+ cpu = cpu_first_tlb_thread_sibling(cpu);
+ for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
+ i += cpu_tlb_thread_sibling_step())
+ cpumask_set_cpu(i, need_tlb_flush);
+
/*
- * Make sure setting of bit in need_tlb_flush precedes
- * testing of cpu_in_guest bits. The matching barrier on
- * the other side is the first smp_mb() in kvmppc_run_core().
+ * Make sure setting of bit in need_tlb_flush precedes testing of
+ * cpu_in_guest. The matching barrier on the other side is hwsync
+ * when switching to guest MMU mode, which happens between
+ * cpu_in_guest being set to the guest kvm, and need_tlb_flush bit
+ * being tested.
*/
smp_mb();
- for (i = 0; i < threads_per_core; ++i)
- if (cpumask_test_cpu(cpu + i, cpu_in_guest))
- smp_call_function_single(cpu + i, do_nothing, NULL, 1);
+
+ for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
+ i += cpu_tlb_thread_sibling_step()) {
+ struct kvm *running = *per_cpu_ptr(&cpu_in_guest, i);
+
+ if (running == kvm)
+ smp_call_function_single(i, do_nothing, NULL, 1);
+ }
+}
+
+static void do_migrate_away_vcpu(void *arg)
+{
+ struct kvm_vcpu *vcpu = arg;
+ struct kvm *kvm = vcpu->kvm;
+
+ /*
+ * If the guest has GTSE, it may execute tlbie, so do a eieio; tlbsync;
+ * ptesync sequence on the old CPU before migrating to a new one, in
+ * case we interrupted the guest between a tlbie ; eieio ;
+ * tlbsync; ptesync sequence.
+ *
+ * Otherwise, ptesync is sufficient for ordering tlbiel sequences.
+ */
+ if (kvm->arch.lpcr & LPCR_GTSE)
+ asm volatile("eieio; tlbsync; ptesync");
+ else
+ asm volatile("ptesync");
}
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
@@ -2571,14 +3218,17 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
* can move around between pcpus. To cope with this, when
* a vcpu moves from one pcpu to another, we need to tell
* any vcpus running on the same core as this vcpu previously
- * ran to flush the TLB. The TLB is shared between threads,
- * so we use a single bit in .need_tlb_flush for all 4 threads.
+ * ran to flush the TLB.
*/
if (prev_cpu != pcpu) {
- if (prev_cpu >= 0 &&
- cpu_first_thread_sibling(prev_cpu) !=
- cpu_first_thread_sibling(pcpu))
- radix_flush_cpu(kvm, prev_cpu, vcpu);
+ if (prev_cpu >= 0) {
+ if (cpu_first_tlb_thread_sibling(prev_cpu) !=
+ cpu_first_tlb_thread_sibling(pcpu))
+ radix_flush_cpu(kvm, prev_cpu, vcpu);
+
+ smp_call_function_single(prev_cpu,
+ do_migrate_away_vcpu, vcpu, 1);
+ }
if (nested)
nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
else
@@ -2590,7 +3240,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
{
int cpu;
struct paca_struct *tpaca;
- struct kvm *kvm = vc->kvm;
cpu = vc->pcpu;
if (vcpu) {
@@ -2601,7 +3250,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
cpu += vcpu->arch.ptid;
vcpu->cpu = vc->pcpu;
vcpu->arch.thread_cpu = cpu;
- cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
}
tpaca = paca_ptrs[cpu];
tpaca->kvm_hstate.kvm_vcpu = vcpu;
@@ -2702,6 +3350,8 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
{
struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
vc->vcore_state = VCORE_PREEMPT;
vc->pcpu = smp_processor_id();
if (vc->num_threads < threads_per_vcore(vc->kvm)) {
@@ -2711,14 +3361,16 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
}
/* Start accumulating stolen time */
- kvmppc_core_start_stolen(vc);
+ kvmppc_core_start_stolen(vc, mftb());
}
static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
{
struct preempted_vcore_list *lp;
- kvmppc_core_end_stolen(vc);
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
+ kvmppc_core_end_stolen(vc, mftb());
if (!list_empty(&vc->preempt_list)) {
lp = &per_cpu(preempted_vcores, vc->pcpu);
spin_lock(&lp->lock);
@@ -2801,11 +3453,6 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
return false;
- /* Some POWER9 chips require all threads to be in the same MMU mode */
- if (no_mixing_hpt_and_radix &&
- kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
- return false;
-
if (n_threads < cip->max_subcore_threads)
n_threads = cip->max_subcore_threads;
if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
@@ -2850,7 +3497,7 @@ static void prepare_threads(struct kvmppc_vcore *vc)
vcpu->arch.ret = RESUME_GUEST;
else
continue;
- kvmppc_remove_runnable(vc, vcpu);
+ kvmppc_remove_runnable(vc, vcpu, mftb());
wake_up(&vcpu->arch.cpu_run);
}
}
@@ -2869,7 +3516,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
list_del_init(&pvc->preempt_list);
if (pvc->runner == NULL) {
pvc->vcore_state = VCORE_INACTIVE;
- kvmppc_core_end_stolen(pvc);
+ kvmppc_core_end_stolen(pvc, mftb());
}
spin_unlock(&pvc->lock);
continue;
@@ -2878,7 +3525,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
spin_unlock(&pvc->lock);
continue;
}
- kvmppc_core_end_stolen(pvc);
+ kvmppc_core_end_stolen(pvc, mftb());
pvc->vcore_state = VCORE_PIGGYBACK;
if (cip->total_threads >= target_threads)
break;
@@ -2922,7 +3569,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
*/
spin_unlock(&vc->lock);
/* cancel pending dec exception if dec is positive */
- if (now < vcpu->arch.dec_expires &&
+ if (now < kvmppc_dec_expires_host_tb(vcpu) &&
kvmppc_core_pending_dec(vcpu))
kvmppc_core_dequeue_dec(vcpu);
@@ -2930,7 +3577,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
ret = RESUME_GUEST;
if (vcpu->arch.trap)
- ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
+ ret = kvmppc_handle_exit_hv(vcpu,
vcpu->arch.run_task);
vcpu->arch.ret = ret;
@@ -2945,7 +3592,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
else
++still_running;
} else {
- kvmppc_remove_runnable(vc, vcpu);
+ kvmppc_remove_runnable(vc, vcpu, mftb());
wake_up(&vcpu->arch.cpu_run);
}
}
@@ -2954,7 +3601,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
kvmppc_vcore_preempt(vc);
} else if (vc->runner) {
vc->vcore_state = VCORE_PREEMPT;
- kvmppc_core_start_stolen(vc);
+ kvmppc_core_start_stolen(vc, mftb());
} else {
vc->vcore_state = VCORE_INACTIVE;
}
@@ -3049,7 +3696,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
int controlled_threads;
int trap;
bool is_power8;
- bool hpt_on_radix;
+
+ if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)))
+ return;
/*
* Remove from the list any threads that have a signal pending
@@ -3078,18 +3727,12 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Make sure we are running on primary threads, and that secondary
* threads are offline. Also check if the number of threads in this
* guest are greater than the current system threads per guest.
- * On POWER9, we need to be not in independent-threads mode if
- * this is a HPT guest on a radix host machine where the
- * CPU threads may not be in different MMU modes.
*/
- hpt_on_radix = no_mixing_hpt_and_radix && radix_enabled() &&
- !kvm_is_radix(vc->kvm);
- if (((controlled_threads > 1) &&
- ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) ||
- (hpt_on_radix && vc->kvm->arch.threads_indep)) {
+ if ((controlled_threads > 1) &&
+ ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
for_each_runnable_thread(i, vcpu, vc) {
vcpu->arch.ret = -EBUSY;
- kvmppc_remove_runnable(vc, vcpu);
+ kvmppc_remove_runnable(vc, vcpu, mftb());
wake_up(&vcpu->arch.cpu_run);
}
goto out;
@@ -3108,18 +3751,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
collect_piggybacks(&core_info, target_threads);
/*
- * On radix, arrange for TLB flushing if necessary.
- * This has to be done before disabling interrupts since
- * it uses smp_call_function().
- */
- pcpu = smp_processor_id();
- if (kvm_is_radix(vc->kvm)) {
- for (sub = 0; sub < core_info.n_subcores; ++sub)
- for_each_runnable_thread(i, vcpu, core_info.vc[sub])
- kvmppc_prepare_radix_vcpu(vcpu, pcpu);
- }
-
- /*
* Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up
* and return without going into the guest(s).
@@ -3151,10 +3782,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
cmd_bit = stat_bit = 0;
split = core_info.n_subcores;
sip = NULL;
- is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S)
- && !cpu_has_feature(CPU_FTR_ARCH_300);
+ is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S);
- if (split > 1 || hpt_on_radix) {
+ if (split > 1) {
sip = &split_info;
memset(&split_info, 0, sizeof(split_info));
for (sub = 0; sub < core_info.n_subcores; ++sub)
@@ -3176,13 +3806,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
split_info.subcore_size = subcore_size;
} else {
split_info.subcore_size = 1;
- if (hpt_on_radix) {
- /* Use the split_info for LPCR/LPIDR changes */
- split_info.lpcr_req = vc->lpcr;
- split_info.lpidr_req = vc->kvm->arch.lpid;
- split_info.host_lpcr = vc->kvm->arch.host_lpcr;
- split_info.do_set = 1;
- }
}
/* order writes to split_info before kvm_split_mode pointer */
@@ -3192,7 +3815,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
for (thr = 0; thr < controlled_threads; ++thr) {
struct paca_struct *paca = paca_ptrs[pcpu + thr];
- paca->kvm_hstate.tid = thr;
paca->kvm_hstate.napping = 0;
paca->kvm_hstate.kvm_split_mode = sip;
}
@@ -3241,8 +3863,16 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
pvc = core_info.vc[sub];
pvc->pcpu = pcpu + thr;
for_each_runnable_thread(i, vcpu, pvc) {
+ /*
+ * XXX: is kvmppc_start_thread called too late here?
+ * It updates vcpu->cpu and vcpu->arch.thread_cpu
+ * which are used by kvmppc_fast_vcpu_kick_hv(), but
+ * kick is called after new exceptions become available
+ * and exceptions are checked earlier than here, by
+ * kvmppc_core_prepare_to_enter.
+ */
kvmppc_start_thread(vcpu, pvc);
- kvmppc_create_dtl_entry(vcpu, pvc);
+ kvmppc_update_vpa_dispatch(vcpu, pvc);
trace_kvm_guest_enter(vcpu);
if (!vcpu->arch.ptid)
thr0_done = true;
@@ -3266,10 +3896,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* When doing micro-threading, poke the inactive threads as well.
* This gets them to the nap instruction after kvm_do_nap,
* which reduces the time taken to unsplit later.
- * For POWER9 HPT guest on radix host, we need all the secondary
- * threads woken up so they can do the LPCR/LPIDR change.
*/
- if (cmd_bit || hpt_on_radix) {
+ if (cmd_bit) {
split_info.do_nap = 1; /* ask secondaries to nap when done */
for (thr = 1; thr < threads_per_subcore; ++thr)
if (!(active & (1 << thr)))
@@ -3284,23 +3912,17 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
for (sub = 0; sub < core_info.n_subcores; ++sub)
spin_unlock(&core_info.vc[sub]->lock);
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
srcu_idx = srcu_read_lock(&vc->kvm->srcu);
+ guest_state_enter_irqoff();
this_cpu_disable_ftrace();
- /*
- * Interrupts will be enabled once we get into the guest,
- * so tell lockdep that we're about to enable interrupts.
- */
- trace_hardirqs_on();
-
trap = __kvmppc_vcore_entry();
- trace_hardirqs_off();
-
this_cpu_enable_ftrace();
+ guest_state_exit_irqoff();
srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
@@ -3330,31 +3952,32 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
cpu_relax();
++loops;
}
- } else if (hpt_on_radix) {
- /* Wait for all threads to have seen final sync */
- for (thr = 1; thr < controlled_threads; ++thr) {
- struct paca_struct *paca = paca_ptrs[pcpu + thr];
-
- while (paca->kvm_hstate.kvm_split_mode) {
- HMT_low();
- barrier();
- }
- HMT_medium();
- }
+ split_info.do_nap = 0;
}
- split_info.do_nap = 0;
kvmppc_set_host_core(pcpu);
+ if (!vtime_accounting_enabled_this_cpu()) {
+ local_irq_enable();
+ /*
+ * Service IRQs here before guest_timing_exit_irqoff() so any
+ * ticks that occurred while running the guest are accounted to
+ * the guest. If vtime accounting is enabled, accounting uses
+ * TB rather than ticks, so it can be done without enabling
+ * interrupts here, which has the problem that it accounts
+ * interrupt processing overhead to the host.
+ */
+ local_irq_disable();
+ }
+ guest_timing_exit_irqoff();
+
local_irq_enable();
- guest_exit();
/* Let secondaries go back to the offline loop */
for (i = 0; i < controlled_threads; ++i) {
kvmppc_release_hwthread(pcpu + i);
if (sip && sip->napped[i])
kvmppc_ipi_thread(pcpu + i);
- cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
}
spin_unlock(&vc->lock);
@@ -3376,319 +3999,215 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
trace_kvmppc_run_core(vc, 1);
}
-/*
- * Load up hypervisor-mode registers on P9.
- */
-static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
- unsigned long lpcr)
+static inline bool hcall_is_xics(unsigned long req)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
- s64 hdec;
- u64 tb, purr, spurr;
- int trap;
- unsigned long host_hfscr = mfspr(SPRN_HFSCR);
- unsigned long host_ciabr = mfspr(SPRN_CIABR);
- unsigned long host_dawr = mfspr(SPRN_DAWR);
- unsigned long host_dawrx = mfspr(SPRN_DAWRX);
- unsigned long host_psscr = mfspr(SPRN_PSSCR);
- unsigned long host_pidr = mfspr(SPRN_PID);
-
- hdec = time_limit - mftb();
- if (hdec < 0)
- return BOOK3S_INTERRUPT_HV_DECREMENTER;
- mtspr(SPRN_HDEC, hdec);
-
- if (vc->tb_offset) {
- u64 new_tb = mftb() + vc->tb_offset;
- mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
- vc->tb_offset_applied = vc->tb_offset;
- }
-
- if (vc->pcr)
- mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
- mtspr(SPRN_DPDES, vc->dpdes);
- mtspr(SPRN_VTB, vc->vtb);
-
- local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
- local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
- mtspr(SPRN_PURR, vcpu->arch.purr);
- mtspr(SPRN_SPURR, vcpu->arch.spurr);
+ return req == H_EOI || req == H_CPPR || req == H_IPI ||
+ req == H_IPOLL || req == H_XIRR || req == H_XIRR_X;
+}
- if (dawr_enabled()) {
- mtspr(SPRN_DAWR, vcpu->arch.dawr);
- mtspr(SPRN_DAWRX, vcpu->arch.dawrx);
+static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
+{
+ struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
+ if (lp) {
+ u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
+ lp->yield_count = cpu_to_be32(yield_count);
+ vcpu->arch.vpa.dirty = 1;
}
- mtspr(SPRN_CIABR, vcpu->arch.ciabr);
- mtspr(SPRN_IC, vcpu->arch.ic);
- mtspr(SPRN_PID, vcpu->arch.pid);
-
- mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
- (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
-
- mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
-
- mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
- mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
- mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
- mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
-
- mtspr(SPRN_AMOR, ~0UL);
+}
- mtspr(SPRN_LPCR, lpcr);
- isync();
+/* call our hypervisor to load up HV regs and go */
+static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ unsigned long host_psscr;
+ unsigned long msr;
+ struct hv_guest_state hvregs;
+ struct p9_host_os_sprs host_os_sprs;
+ s64 dec;
+ int trap;
- kvmppc_xive_push_vcpu(vcpu);
+ msr = mfmsr();
- mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
- mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
+ save_p9_host_os_sprs(&host_os_sprs);
- trap = __kvmhv_vcpu_entry_p9(vcpu);
+ /*
+ * We need to save and restore the guest visible part of the
+ * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
+ * doesn't do this for us. Note only required if pseries since
+ * this is done in kvmhv_vcpu_entry_p9() below otherwise.
+ */
+ host_psscr = mfspr(SPRN_PSSCR_PR);
- /* Advance host PURR/SPURR by the amount used by guest */
- purr = mfspr(SPRN_PURR);
- spurr = mfspr(SPRN_SPURR);
- mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
- purr - vcpu->arch.purr);
- mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
- spurr - vcpu->arch.spurr);
- vcpu->arch.purr = purr;
- vcpu->arch.spurr = spurr;
+ kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
+ if (lazy_irq_pending())
+ return 0;
- vcpu->arch.ic = mfspr(SPRN_IC);
- vcpu->arch.pid = mfspr(SPRN_PID);
- vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
+ if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
+ msr = mfmsr(); /* TM restore can update msr */
- vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
- vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
- vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
- vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+ if (vcpu->arch.psscr != host_psscr)
+ mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
- /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
- mtspr(SPRN_PSSCR, host_psscr |
- (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
- mtspr(SPRN_HFSCR, host_hfscr);
- mtspr(SPRN_CIABR, host_ciabr);
- mtspr(SPRN_DAWR, host_dawr);
- mtspr(SPRN_DAWRX, host_dawrx);
- mtspr(SPRN_PID, host_pidr);
+ kvmhv_save_hv_regs(vcpu, &hvregs);
+ hvregs.lpcr = lpcr;
+ hvregs.amor = ~0;
+ vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
+ hvregs.version = HV_GUEST_STATE_VERSION;
+ if (vcpu->arch.nested) {
+ hvregs.lpid = vcpu->arch.nested->shadow_lpid;
+ hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
+ } else {
+ hvregs.lpid = vcpu->kvm->arch.lpid;
+ hvregs.vcpu_token = vcpu->vcpu_id;
+ }
+ hvregs.hdec_expiry = time_limit;
/*
- * Since this is radix, do a eieio; tlbsync; ptesync sequence in
- * case we interrupted the guest between a tlbie and a ptesync.
+ * When setting DEC, we must always deal with irq_work_raise
+ * via NMI vs setting DEC. The problem occurs right as we
+ * switch into guest mode if a NMI hits and sets pending work
+ * and sets DEC, then that will apply to the guest and not
+ * bring us back to the host.
+ *
+ * irq_work_raise could check a flag (or possibly LPCR[HDICE]
+ * for example) and set HDEC to 1? That wouldn't solve the
+ * nested hv case which needs to abort the hcall or zero the
+ * time limit.
+ *
+ * XXX: Another day's problem.
*/
- asm volatile("eieio; tlbsync; ptesync");
+ mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb);
- mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
- isync();
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+ switch_pmu_to_guest(vcpu, &host_os_sprs);
+ accumulate_time(vcpu, &vcpu->arch.in_guest);
+ trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
+ __pa(&vcpu->arch.regs));
+ accumulate_time(vcpu, &vcpu->arch.guest_exit);
+ kvmhv_restore_hv_return_state(vcpu, &hvregs);
+ switch_pmu_to_host(vcpu, &host_os_sprs);
+ vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
+ vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
+ vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
+ vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
+
+ store_vcpu_state(vcpu);
- vc->dpdes = mfspr(SPRN_DPDES);
- vc->vtb = mfspr(SPRN_VTB);
- mtspr(SPRN_DPDES, 0);
- if (vc->pcr)
- mtspr(SPRN_PCR, PCR_MASK);
+ dec = mfspr(SPRN_DEC);
+ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
+ dec = (s32) dec;
+ *tb = mftb();
+ vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset);
- if (vc->tb_offset_applied) {
- u64 new_tb = mftb() - vc->tb_offset_applied;
- mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
- vc->tb_offset_applied = 0;
- }
+ timer_rearm_host_dec(*tb);
- mtspr(SPRN_HDEC, 0x7fffffff);
- mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
+ restore_p9_host_os_sprs(vcpu, &host_os_sprs);
+ if (vcpu->arch.psscr != host_psscr)
+ mtspr(SPRN_PSSCR_PR, host_psscr);
return trap;
}
/*
- * Virtual-mode guest entry for POWER9 and later when the host and
- * guest are both using the radix MMU. The LPIDR has already been set.
+ * Guest entry for POWER9 and later CPUs.
*/
-int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
- unsigned long lpcr)
+static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr, u64 *tb)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
- unsigned long host_dscr = mfspr(SPRN_DSCR);
- unsigned long host_tidr = mfspr(SPRN_TIDR);
- unsigned long host_iamr = mfspr(SPRN_IAMR);
- unsigned long host_amr = mfspr(SPRN_AMR);
- s64 dec;
- u64 tb;
- int trap, save_pmu;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ u64 next_timer;
+ int trap;
- dec = mfspr(SPRN_DEC);
- tb = mftb();
- if (dec < 512)
+ next_timer = timer_get_next_tb();
+ if (*tb >= next_timer)
return BOOK3S_INTERRUPT_HV_DECREMENTER;
- local_paca->kvm_hstate.dec_expires = dec + tb;
- if (local_paca->kvm_hstate.dec_expires < time_limit)
- time_limit = local_paca->kvm_hstate.dec_expires;
+ if (next_timer < time_limit)
+ time_limit = next_timer;
+ else if (*tb >= time_limit) /* nested time limit */
+ return BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER;
vcpu->arch.ceded = 0;
- kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */
-
- kvmppc_subcore_enter_guest();
-
- vc->entry_exit_map = 1;
- vc->in_guest = 1;
-
- if (vcpu->arch.vpa.pinned_addr) {
- struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
- u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
- lp->yield_count = cpu_to_be32(yield_count);
- vcpu->arch.vpa.dirty = 1;
- }
-
- if (cpu_has_feature(CPU_FTR_TM) ||
- cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
- kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
-
- kvmhv_load_guest_pmu(vcpu);
-
- msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
- load_fp_state(&vcpu->arch.fp);
-#ifdef CONFIG_ALTIVEC
- load_vr_state(&vcpu->arch.vr);
-#endif
- mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
-
- mtspr(SPRN_DSCR, vcpu->arch.dscr);
- mtspr(SPRN_IAMR, vcpu->arch.iamr);
- mtspr(SPRN_PSPB, vcpu->arch.pspb);
- mtspr(SPRN_FSCR, vcpu->arch.fscr);
- mtspr(SPRN_TAR, vcpu->arch.tar);
- mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
- mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
- mtspr(SPRN_BESCR, vcpu->arch.bescr);
- mtspr(SPRN_WORT, vcpu->arch.wort);
- mtspr(SPRN_TIDR, vcpu->arch.tid);
- mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
- mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
- mtspr(SPRN_AMR, vcpu->arch.amr);
- mtspr(SPRN_UAMOR, vcpu->arch.uamor);
-
- if (!(vcpu->arch.ctrl & 1))
- mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
-
- mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
+ vcpu_vpa_increment_dispatch(vcpu);
if (kvmhv_on_pseries()) {
- /*
- * We need to save and restore the guest visible part of the
- * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
- * doesn't do this for us. Note only required if pseries since
- * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
- */
- unsigned long host_psscr;
- /* call our hypervisor to load up HV regs and go */
- struct hv_guest_state hvregs;
-
- host_psscr = mfspr(SPRN_PSSCR_PR);
- mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
- kvmhv_save_hv_regs(vcpu, &hvregs);
- hvregs.lpcr = lpcr;
- vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
- hvregs.version = HV_GUEST_STATE_VERSION;
- if (vcpu->arch.nested) {
- hvregs.lpid = vcpu->arch.nested->shadow_lpid;
- hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
- } else {
- hvregs.lpid = vcpu->kvm->arch.lpid;
- hvregs.vcpu_token = vcpu->vcpu_id;
- }
- hvregs.hdec_expiry = time_limit;
- trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
- __pa(&vcpu->arch.regs));
- kvmhv_restore_hv_return_state(vcpu, &hvregs);
- vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
- vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
- vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
- vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
- mtspr(SPRN_PSSCR_PR, host_psscr);
+ trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
/* H_CEDE has to be handled now, not later */
- if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
+ if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested &&
kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
- kvmppc_nested_cede(vcpu);
+ kvmppc_cede(vcpu);
+ kvmppc_set_gpr(vcpu, 3, 0);
trap = 0;
}
- } else {
- trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
- }
- vcpu->arch.slb_max = 0;
- dec = mfspr(SPRN_DEC);
- if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
- dec = (s32) dec;
- tb = mftb();
- vcpu->arch.dec_expires = dec + tb;
- vcpu->cpu = -1;
- vcpu->arch.thread_cpu = -1;
- vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
-
- vcpu->arch.iamr = mfspr(SPRN_IAMR);
- vcpu->arch.pspb = mfspr(SPRN_PSPB);
- vcpu->arch.fscr = mfspr(SPRN_FSCR);
- vcpu->arch.tar = mfspr(SPRN_TAR);
- vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
- vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
- vcpu->arch.bescr = mfspr(SPRN_BESCR);
- vcpu->arch.wort = mfspr(SPRN_WORT);
- vcpu->arch.tid = mfspr(SPRN_TIDR);
- vcpu->arch.amr = mfspr(SPRN_AMR);
- vcpu->arch.uamor = mfspr(SPRN_UAMOR);
- vcpu->arch.dscr = mfspr(SPRN_DSCR);
-
- mtspr(SPRN_PSPB, 0);
- mtspr(SPRN_WORT, 0);
- mtspr(SPRN_UAMOR, 0);
- mtspr(SPRN_DSCR, host_dscr);
- mtspr(SPRN_TIDR, host_tidr);
- mtspr(SPRN_IAMR, host_iamr);
- mtspr(SPRN_PSPB, 0);
-
- if (host_amr != vcpu->arch.amr)
- mtspr(SPRN_AMR, host_amr);
-
- msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
- store_fp_state(&vcpu->arch.fp);
-#ifdef CONFIG_ALTIVEC
- store_vr_state(&vcpu->arch.vr);
-#endif
- vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
+ } else if (nested) {
+ __this_cpu_write(cpu_in_guest, kvm);
+ trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb);
+ __this_cpu_write(cpu_in_guest, NULL);
- if (cpu_has_feature(CPU_FTR_TM) ||
- cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
- kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+ } else {
+ kvmppc_xive_push_vcpu(vcpu);
- save_pmu = 1;
- if (vcpu->arch.vpa.pinned_addr) {
- struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
- u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
- lp->yield_count = cpu_to_be32(yield_count);
- vcpu->arch.vpa.dirty = 1;
- save_pmu = lp->pmcregs_in_use;
- }
- /* Must save pmu if this guest is capable of running nested guests */
- save_pmu |= nesting_enabled(vcpu->kvm);
+ __this_cpu_write(cpu_in_guest, kvm);
+ trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb);
+ __this_cpu_write(cpu_in_guest, NULL);
- kvmhv_save_guest_pmu(vcpu, save_pmu);
+ if (trap == BOOK3S_INTERRUPT_SYSCALL &&
+ !(vcpu->arch.shregs.msr & MSR_PR)) {
+ unsigned long req = kvmppc_get_gpr(vcpu, 3);
- vc->entry_exit_map = 0x101;
- vc->in_guest = 0;
+ /*
+ * XIVE rearm and XICS hcalls must be handled
+ * before xive context is pulled (is this
+ * true?)
+ */
+ if (req == H_CEDE) {
+ /* H_CEDE has to be handled now */
+ kvmppc_cede(vcpu);
+ if (!kvmppc_xive_rearm_escalation(vcpu)) {
+ /*
+ * Pending escalation so abort
+ * the cede.
+ */
+ vcpu->arch.ceded = 0;
+ }
+ kvmppc_set_gpr(vcpu, 3, 0);
+ trap = 0;
+
+ } else if (req == H_ENTER_NESTED) {
+ /*
+ * L2 should not run with the L1
+ * context so rearm and pull it.
+ */
+ if (!kvmppc_xive_rearm_escalation(vcpu)) {
+ /*
+ * Pending escalation so abort
+ * H_ENTER_NESTED.
+ */
+ kvmppc_set_gpr(vcpu, 3, 0);
+ trap = 0;
+ }
- mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
- mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
+ } else if (hcall_is_xics(req)) {
+ int ret;
- kvmhv_load_host_pmu();
+ ret = kvmppc_xive_xics_hcall(vcpu, req);
+ if (ret != H_TOO_HARD) {
+ kvmppc_set_gpr(vcpu, 3, ret);
+ trap = 0;
+ }
+ }
+ }
+ kvmppc_xive_pull_vcpu(vcpu);
- kvmppc_subcore_exit_guest();
+ if (kvm_is_radix(kvm))
+ vcpu->arch.slb_max = 0;
+ }
+
+ vcpu_vpa_increment_dispatch(vcpu);
return trap;
}
@@ -3753,6 +4272,13 @@ static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
return false;
}
+static bool kvmppc_vcpu_check_block(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
+ return true;
+ return false;
+}
+
/*
* Check to see if any of the runnable vcpus on the vcore have pending
* exceptions or are no longer ceded
@@ -3763,7 +4289,7 @@ static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
int i;
for_each_runnable_thread(i, vcpu, vc) {
- if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
+ if (kvmppc_vcpu_check_block(vcpu))
return 1;
}
@@ -3779,13 +4305,14 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
ktime_t cur, start_poll, start_wait;
int do_sleep = 1;
u64 block_ns;
- DECLARE_SWAITQUEUE(wait);
+
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
/* Poll for pending exceptions and ceded state */
cur = start_poll = ktime_get();
if (vc->halt_poll_ns) {
ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
- ++vc->runner->stat.halt_attempted_poll;
+ ++vc->runner->stat.generic.halt_attempted_poll;
vc->vcore_state = VCORE_POLLING;
spin_unlock(&vc->lock);
@@ -3796,38 +4323,38 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
break;
}
cur = ktime_get();
- } while (single_task_running() && ktime_before(cur, stop));
+ } while (kvm_vcpu_can_poll(cur, stop));
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
if (!do_sleep) {
- ++vc->runner->stat.halt_successful_poll;
+ ++vc->runner->stat.generic.halt_successful_poll;
goto out;
}
}
- prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE);
-
+ prepare_to_rcuwait(&vc->wait);
+ set_current_state(TASK_INTERRUPTIBLE);
if (kvmppc_vcore_check_block(vc)) {
- finish_swait(&vc->wq, &wait);
+ finish_rcuwait(&vc->wait);
do_sleep = 0;
/* If we polled, count this as a successful poll */
if (vc->halt_poll_ns)
- ++vc->runner->stat.halt_successful_poll;
+ ++vc->runner->stat.generic.halt_successful_poll;
goto out;
}
start_wait = ktime_get();
vc->vcore_state = VCORE_SLEEPING;
- trace_kvmppc_vcore_blocked(vc, 0);
+ trace_kvmppc_vcore_blocked(vc->runner, 0);
spin_unlock(&vc->lock);
schedule();
- finish_swait(&vc->wq, &wait);
+ finish_rcuwait(&vc->wait);
spin_lock(&vc->lock);
vc->vcore_state = VCORE_INACTIVE;
- trace_kvmppc_vcore_blocked(vc, 1);
+ trace_kvmppc_vcore_blocked(vc->runner, 1);
++vc->runner->stat.halt_successful_wait;
cur = ktime_get();
@@ -3837,19 +4364,31 @@ out:
/* Attribute wait time */
if (do_sleep) {
- vc->runner->stat.halt_wait_ns +=
+ vc->runner->stat.generic.halt_wait_ns +=
ktime_to_ns(cur) - ktime_to_ns(start_wait);
+ KVM_STATS_LOG_HIST_UPDATE(
+ vc->runner->stat.generic.halt_wait_hist,
+ ktime_to_ns(cur) - ktime_to_ns(start_wait));
/* Attribute failed poll time */
- if (vc->halt_poll_ns)
- vc->runner->stat.halt_poll_fail_ns +=
+ if (vc->halt_poll_ns) {
+ vc->runner->stat.generic.halt_poll_fail_ns +=
ktime_to_ns(start_wait) -
ktime_to_ns(start_poll);
+ KVM_STATS_LOG_HIST_UPDATE(
+ vc->runner->stat.generic.halt_poll_fail_hist,
+ ktime_to_ns(start_wait) -
+ ktime_to_ns(start_poll));
+ }
} else {
/* Attribute successful poll time */
- if (vc->halt_poll_ns)
- vc->runner->stat.halt_poll_success_ns +=
+ if (vc->halt_poll_ns) {
+ vc->runner->stat.generic.halt_poll_success_ns +=
ktime_to_ns(cur) -
ktime_to_ns(start_poll);
+ KVM_STATS_LOG_HIST_UPDATE(
+ vc->runner->stat.generic.halt_poll_success_hist,
+ ktime_to_ns(cur) - ktime_to_ns(start_poll));
+ }
}
/* Adjust poll time */
@@ -3874,7 +4413,6 @@ out:
/*
* This never fails for a radix guest, as none of the operations it does
* for a radix guest can fail or have a way to report failure.
- * kvmhv_run_single_vcpu() relies on this fact.
*/
static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
{
@@ -3895,15 +4433,16 @@ static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
return r;
}
-static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int n_ceded, i, r;
struct kvmppc_vcore *vc;
struct kvm_vcpu *v;
trace_kvmppc_run_vcpu_enter(vcpu);
- kvm_run->exit_reason = 0;
+ run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
kvmppc_update_vpas(vcpu);
@@ -3915,7 +4454,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
spin_lock(&vc->lock);
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
- vcpu->arch.kvm_run = kvm_run;
vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
vcpu->arch.busy_preempt = TB_NIL;
@@ -3931,11 +4469,11 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if ((vc->vcore_state == VCORE_PIGGYBACK ||
vc->vcore_state == VCORE_RUNNING) &&
!VCORE_IS_EXITING(vc)) {
- kvmppc_create_dtl_entry(vcpu, vc);
+ kvmppc_update_vpa_dispatch(vcpu, vc);
kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
- swake_up_one(&vc->wq);
+ rcuwait_wake_up(&vc->wait);
}
}
@@ -3948,8 +4486,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
r = kvmhv_setup_mmu(vcpu);
spin_lock(&vc->lock);
if (r) {
- kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
- kvm_run->fail_entry.
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.
hardware_entry_failure_reason = 0;
vcpu->arch.ret = r;
break;
@@ -3966,9 +4504,9 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
for_each_runnable_thread(i, v, vc) {
kvmppc_core_prepare_to_enter(v);
if (signal_pending(v->arch.run_task)) {
- kvmppc_remove_runnable(vc, v);
+ kvmppc_remove_runnable(vc, v, mftb());
v->stat.signal_exits++;
- v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
+ v->run->exit_reason = KVM_EXIT_INTR;
v->arch.ret = -EINTR;
wake_up(&v->arch.cpu_run);
}
@@ -4007,9 +4545,9 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_vcore_end_preempt(vc);
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
- kvmppc_remove_runnable(vc, vcpu);
+ kvmppc_remove_runnable(vc, vcpu, mftb());
vcpu->stat.signal_exits++;
- kvm_run->exit_reason = KVM_EXIT_INTR;
+ run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
}
@@ -4020,73 +4558,91 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
wake_up(&v->arch.cpu_run);
}
- trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
+ trace_kvmppc_run_vcpu_exit(vcpu);
spin_unlock(&vc->lock);
return vcpu->arch.ret;
}
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
- struct kvm_vcpu *vcpu, u64 time_limit,
+int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr)
{
+ struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
+ struct kvm_run *run = vcpu->run;
int trap, r, pcpu;
- int srcu_idx, lpid;
+ int srcu_idx;
struct kvmppc_vcore *vc;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
+ unsigned long flags;
+ u64 tb;
trace_kvmppc_run_vcpu_enter(vcpu);
- kvm_run->exit_reason = 0;
+ run->exit_reason = 0;
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
vc = vcpu->arch.vcore;
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
- vcpu->arch.kvm_run = kvm_run;
- vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
- vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
- vcpu->arch.busy_preempt = TB_NIL;
vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
- vc->runnable_threads[0] = vcpu;
- vc->n_runnable = 1;
- vc->runner = vcpu;
/* See if the MMU is ready to go */
- if (!kvm->arch.mmu_ready)
- kvmhv_setup_mmu(vcpu);
+ if (unlikely(!kvm->arch.mmu_ready)) {
+ r = kvmhv_setup_mmu(vcpu);
+ if (r) {
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ vcpu->arch.ret = r;
+ return r;
+ }
+ }
if (need_resched())
cond_resched();
kvmppc_update_vpas(vcpu);
- init_vcore_to_run(vc);
- vc->preempt_tb = TB_NIL;
-
preempt_disable();
pcpu = smp_processor_id();
- vc->pcpu = pcpu;
- kvmppc_prepare_radix_vcpu(vcpu, pcpu);
+ if (kvm_is_radix(kvm))
+ kvmppc_prepare_radix_vcpu(vcpu, pcpu);
+
+ /* flags save not required, but irq_pmu has no disable/enable API */
+ powerpc_local_irq_pmu_save(flags);
+
+ vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
- local_irq_disable();
- hard_irq_disable();
if (signal_pending(current))
goto sigpend;
- if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
+ if (need_resched() || !kvm->arch.mmu_ready)
goto out;
+ vcpu->cpu = pcpu;
+ vcpu->arch.thread_cpu = pcpu;
+ vc->pcpu = pcpu;
+ local_paca->kvm_hstate.kvm_vcpu = vcpu;
+ local_paca->kvm_hstate.ptid = 0;
+ local_paca->kvm_hstate.fake_suspend = 0;
+
+ /*
+ * Orders set cpu/thread_cpu vs testing for pending interrupts and
+ * doorbells below. The other side is when these fields are set vs
+ * kvmppc_fast_vcpu_kick_hv reading the cpu/thread_cpu fields to
+ * kick a vCPU to notice the pending interrupt.
+ */
+ smp_mb();
+
if (!nested) {
kvmppc_core_prepare_to_enter(vcpu);
- if (vcpu->arch.doorbell_request) {
- vc->dpdes = 1;
- smp_wmb();
- vcpu->arch.doorbell_request = 0;
- }
- if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
- &vcpu->arch.pending_exceptions))
+ if (vcpu->arch.shregs.msr & MSR_EE) {
+ if (xive_interrupt_pending(vcpu))
+ kvmppc_inject_interrupt_hv(vcpu,
+ BOOK3S_INTERRUPT_EXTERNAL, 0);
+ } else if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
+ &vcpu->arch.pending_exceptions)) {
lpcr |= LPCR_MER;
+ }
} else if (vcpu->arch.pending_exceptions ||
vcpu->arch.doorbell_request ||
xive_interrupt_pending(vcpu)) {
@@ -4094,56 +4650,53 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
goto out;
}
- kvmppc_clear_host_core(pcpu);
+ if (vcpu->arch.timer_running) {
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+ vcpu->arch.timer_running = 0;
+ }
- local_paca->kvm_hstate.tid = 0;
- local_paca->kvm_hstate.napping = 0;
- local_paca->kvm_hstate.kvm_split_mode = NULL;
- kvmppc_start_thread(vcpu, vc);
- kvmppc_create_dtl_entry(vcpu, vc);
- trace_kvm_guest_enter(vcpu);
+ tb = mftb();
- vc->vcore_state = VCORE_RUNNING;
- trace_kvmppc_run_core(vc, 0);
+ kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset);
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
- mtspr(SPRN_LPID, lpid);
- isync();
- kvmppc_check_need_tlb_flush(kvm, pcpu, nested);
- }
+ trace_kvm_guest_enter(vcpu);
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
srcu_idx = srcu_read_lock(&kvm->srcu);
+ guest_state_enter_irqoff();
this_cpu_disable_ftrace();
- /* Tell lockdep that we're about to enable interrupts */
- trace_hardirqs_on();
-
- trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
+ trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb);
vcpu->arch.trap = trap;
- trace_hardirqs_off();
-
this_cpu_enable_ftrace();
+ guest_state_exit_irqoff();
srcu_read_unlock(&kvm->srcu, srcu_idx);
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- mtspr(SPRN_LPID, kvm->arch.host_lpid);
- isync();
- }
-
set_irq_happened(trap);
- kvmppc_set_host_core(pcpu);
+ vcpu->cpu = -1;
+ vcpu->arch.thread_cpu = -1;
+ vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
- local_irq_enable();
- guest_exit();
+ if (!vtime_accounting_enabled_this_cpu()) {
+ powerpc_local_irq_pmu_restore(flags);
+ /*
+ * Service IRQs here before guest_timing_exit_irqoff() so any
+ * ticks that occurred while running the guest are accounted to
+ * the guest. If vtime accounting is enabled, accounting uses
+ * TB rather than ticks, so it can be done without enabling
+ * interrupts here, which has the problem that it accounts
+ * interrupt processing overhead to the host.
+ */
+ powerpc_local_irq_pmu_save(flags);
+ }
+ guest_timing_exit_irqoff();
- cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
+ powerpc_local_irq_pmu_restore(flags);
preempt_enable();
@@ -4153,7 +4706,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
* by L2 and the L1 decrementer is provided in hdec_expires
*/
if (kvmppc_core_pending_dec(vcpu) &&
- ((get_tb() < vcpu->arch.dec_expires) ||
+ ((tb < kvmppc_dec_expires_host_tb(vcpu)) ||
(trap == BOOK3S_INTERRUPT_SYSCALL &&
kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
kvmppc_core_dequeue_dec(vcpu);
@@ -4162,69 +4715,80 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
r = RESUME_GUEST;
if (trap) {
if (!nested)
- r = kvmppc_handle_exit_hv(kvm_run, vcpu, current);
+ r = kvmppc_handle_exit_hv(vcpu, current);
else
- r = kvmppc_handle_nested_exit(kvm_run, vcpu);
+ r = kvmppc_handle_nested_exit(vcpu);
}
vcpu->arch.ret = r;
- if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
- !kvmppc_vcpu_woken(vcpu)) {
+ if (is_kvmppc_resume_guest(r) && !kvmppc_vcpu_check_block(vcpu)) {
kvmppc_set_timer(vcpu);
- while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
+
+ prepare_to_rcuwait(wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
vcpu->stat.signal_exits++;
- kvm_run->exit_reason = KVM_EXIT_INTR;
+ run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
break;
}
- spin_lock(&vc->lock);
- kvmppc_vcore_blocked(vc);
- spin_unlock(&vc->lock);
+
+ if (kvmppc_vcpu_check_block(vcpu))
+ break;
+
+ trace_kvmppc_vcore_blocked(vcpu, 0);
+ schedule();
+ trace_kvmppc_vcore_blocked(vcpu, 1);
}
+ finish_rcuwait(wait);
}
vcpu->arch.ceded = 0;
- vc->vcore_state = VCORE_INACTIVE;
- trace_kvmppc_run_core(vc, 1);
-
done:
- kvmppc_remove_runnable(vc, vcpu);
- trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
+ trace_kvmppc_run_vcpu_exit(vcpu);
return vcpu->arch.ret;
sigpend:
vcpu->stat.signal_exits++;
- kvm_run->exit_reason = KVM_EXIT_INTR;
+ run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
out:
- local_irq_enable();
+ vcpu->cpu = -1;
+ vcpu->arch.thread_cpu = -1;
+ vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
+ powerpc_local_irq_pmu_restore(flags);
preempt_enable();
goto done;
}
-static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int r;
int srcu_idx;
- unsigned long ebb_regs[3] = {}; /* shut up GCC */
- unsigned long user_tar = 0;
- unsigned int user_vrsave;
struct kvm *kvm;
+ unsigned long msr;
+
+ start_timing(vcpu, &vcpu->arch.vcpu_entry);
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
+ /* No need to go into the guest when all we'll do is come back out */
+ if (signal_pending(current)) {
+ run->exit_reason = KVM_EXIT_INTR;
+ return -EINTR;
+ }
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Don't allow entry with a suspended transaction, because
* the guest entry/exit code will lose it.
- * If the guest has TM enabled, save away their TM-related SPRs
- * (they will get restored by the TM unavailable interrupt).
*/
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
(current->thread.regs->msr & MSR_TM)) {
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
@@ -4232,12 +4796,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
- /* Enable TM so we can read the TM SPRs */
- mtmsr(mfmsr() | MSR_TM);
- current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
- current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
- current->thread.tm_texasr = mfspr(SPRN_TEXASR);
- current->thread.regs->msr &= ~MSR_TM;
}
#endif
@@ -4252,57 +4810,62 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_core_prepare_to_enter(vcpu);
- /* No need to go into the guest when all we'll do is come back out */
- if (signal_pending(current)) {
- run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
kvm = vcpu->kvm;
atomic_inc(&kvm->arch.vcpus_running);
/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
smp_mb();
- flush_all_to_thread(current);
+ msr = 0;
+ if (IS_ENABLED(CONFIG_PPC_FPU))
+ msr |= MSR_FP;
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ msr |= MSR_VEC;
+ if (cpu_has_feature(CPU_FTR_VSX))
+ msr |= MSR_VSX;
+ if ((cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+ (vcpu->arch.hfscr & HFSCR_TM))
+ msr |= MSR_TM;
+ msr = msr_check_and_set(msr);
- /* Save userspace EBB and other register values */
- if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
- ebb_regs[0] = mfspr(SPRN_EBBHR);
- ebb_regs[1] = mfspr(SPRN_EBBRR);
- ebb_regs[2] = mfspr(SPRN_BESCR);
- user_tar = mfspr(SPRN_TAR);
- }
- user_vrsave = mfspr(SPRN_VRSAVE);
+ kvmppc_save_user_regs();
+
+ kvmppc_save_current_sprs();
- vcpu->arch.wqp = &vcpu->arch.vcore->wq;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ vcpu->arch.waitp = &vcpu->arch.vcore->wait;
vcpu->arch.pgdir = kvm->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do {
- /*
- * The early POWER9 chips that can't mix radix and HPT threads
- * on the same core also need the workaround for the problem
- * where the TLB would prefetch entries in the guest exit path
- * for radix guests using the guest PIDR value and LPID 0.
- * The workaround is in the old path (kvmppc_run_vcpu())
- * but not the new path (kvmhv_run_single_vcpu()).
- */
- if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
- !no_mixing_hpt_and_radix)
- r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
+ accumulate_time(vcpu, &vcpu->arch.guest_entry);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ r = kvmhv_run_single_vcpu(vcpu, ~(u64)0,
vcpu->arch.vcore->lpcr);
else
- r = kvmppc_run_vcpu(run, vcpu);
-
- if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
- !(vcpu->arch.shregs.msr & MSR_PR)) {
+ r = kvmppc_run_vcpu(vcpu);
+
+ if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
+ accumulate_time(vcpu, &vcpu->arch.hcall);
+
+ if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
+ /*
+ * These should have been caught reflected
+ * into the guest by now. Final sanity check:
+ * don't allow userspace to execute hcalls in
+ * the hypervisor.
+ */
+ r = RESUME_GUEST;
+ continue;
+ }
trace_kvm_hcall_enter(vcpu);
r = kvmppc_pseries_do_hcall(vcpu);
trace_kvm_hcall_exit(vcpu, r);
kvmppc_core_prepare_to_enter(vcpu);
} else if (r == RESUME_PAGE_FAULT) {
+ accumulate_time(vcpu, &vcpu->arch.pg_fault);
srcu_idx = srcu_read_lock(&kvm->srcu);
- r = kvmppc_book3s_hv_page_fault(run, vcpu,
+ r = kvmppc_book3s_hv_page_fault(vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
srcu_read_unlock(&kvm->srcu, srcu_idx);
} else if (r == RESUME_PASSTHROUGH) {
@@ -4312,19 +4875,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = kvmppc_xics_rm_complete(vcpu, 0);
}
} while (is_kvmppc_resume_guest(r));
-
- /* Restore userspace EBB and other register values */
- if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
- mtspr(SPRN_EBBHR, ebb_regs[0]);
- mtspr(SPRN_EBBRR, ebb_regs[1]);
- mtspr(SPRN_BESCR, ebb_regs[2]);
- mtspr(SPRN_TAR, user_tar);
- mtspr(SPRN_FSCR, current->thread.fscr);
- }
- mtspr(SPRN_VRSAVE, user_vrsave);
+ accumulate_time(vcpu, &vcpu->arch.vcpu_exit);
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&kvm->arch.vcpus_running);
+
+ srr_regs_clobbered();
+
+ end_timing(vcpu);
+
return r;
}
@@ -4386,8 +4945,8 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int i, r;
- unsigned long n;
+ int r;
+ unsigned long n, i;
unsigned long *buf, *p;
struct kvm_vcpu *vcpu;
@@ -4400,7 +4959,7 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
slots = kvm_memslots(kvm);
memslot = id_to_memslot(slots, log->slot);
r = -ENOENT;
- if (!memslot->dirty_bitmap)
+ if (!memslot || !memslot->dirty_bitmap)
goto out;
/*
@@ -4447,47 +5006,45 @@ out:
return r;
}
-static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot)
{
- if (!dont || free->arch.rmap != dont->arch.rmap) {
- vfree(free->arch.rmap);
- free->arch.rmap = NULL;
- }
+ vfree(slot->arch.rmap);
+ slot->arch.rmap = NULL;
}
-static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
- unsigned long npages)
+static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
- slot->arch.rmap = vzalloc(array_size(npages, sizeof(*slot->arch.rmap)));
- if (!slot->arch.rmap)
- return -ENOMEM;
+ if (change == KVM_MR_CREATE) {
+ unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap));
- return 0;
-}
+ if ((size >> PAGE_SHIFT) > totalram_pages())
+ return -ENOMEM;
+
+ new->arch.rmap = vzalloc(size);
+ if (!new->arch.rmap)
+ return -ENOMEM;
+ } else if (change != KVM_MR_DELETE) {
+ new->arch.rmap = old->arch.rmap;
+ }
-static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem)
-{
return 0;
}
static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- unsigned long npages = mem->memory_size >> PAGE_SHIFT;
-
/*
- * If we are making a new memslot, it might make
+ * If we are creating or modifying a memslot, it might make
* some address that was previously cached as emulated
* MMIO be no longer emulated MMIO, so invalidate
* all the caches of emulated MMIO translations.
*/
- if (npages)
+ if (change != KVM_MR_DELETE)
atomic64_inc(&kvm->arch.mmio_update);
/*
@@ -4514,16 +5071,14 @@ static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
switch (change) {
case KVM_MR_CREATE:
- if (kvmppc_uvmem_slot_init(kvm, new))
- return;
- uv_register_mem_slot(kvm->arch.lpid,
- new->base_gfn << PAGE_SHIFT,
- new->npages * PAGE_SIZE,
- 0, new->id);
+ /*
+ * @TODO kvmppc_uvmem_memslot_create() can fail and
+ * return error. Fix this.
+ */
+ kvmppc_uvmem_memslot_create(kvm, new);
break;
case KVM_MR_DELETE:
- uv_unregister_mem_slot(kvm->arch.lpid, old->id);
- kvmppc_uvmem_slot_free(kvm, old);
+ kvmppc_uvmem_memslot_delete(kvm, old);
break;
default:
/* TODO: Handle KVM_MR_MOVE */
@@ -4550,19 +5105,16 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
struct kvmppc_vcore *vc = kvm->arch.vcores[i];
if (!vc)
continue;
+
spin_lock(&vc->lock);
vc->lpcr = (vc->lpcr & ~mask) | lpcr;
+ verify_lpcr(kvm, vc->lpcr);
spin_unlock(&vc->lock);
if (++cores_done >= kvm->arch.online_vcores)
break;
}
}
-static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
-{
- return;
-}
-
void kvmppc_setup_partition_table(struct kvm *kvm)
{
unsigned long dw0, dw1;
@@ -4630,14 +5182,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Look up the VMA for the start of this memory slot */
hva = memslot->userspace_addr;
- down_read(&kvm->mm->mmap_sem);
- vma = find_vma(kvm->mm, hva);
- if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
+ mmap_read_lock(kvm->mm);
+ vma = vma_lookup(kvm->mm, hva);
+ if (!vma || (vma->vm_flags & VM_IO))
goto up_out;
psize = vma_kernel_pagesize(vma);
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
/* We can handle 4k, 64k or 16M pages in the VRMA */
if (psize >= 0x1000000)
@@ -4670,7 +5222,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
return err;
up_out:
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
goto out_srcu;
}
@@ -4680,17 +5232,24 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
*/
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{
+ unsigned long lpcr, lpcr_mask;
+
if (nesting_enabled(kvm))
kvmhv_release_all_nested(kvm);
kvmppc_rmap_reset(kvm);
kvm->arch.process_table = 0;
- /* Mutual exclusion with kvm_unmap_hva_range etc. */
+ /* Mutual exclusion with kvm_unmap_gfn_range etc. */
spin_lock(&kvm->mmu_lock);
kvm->arch.radix = 0;
spin_unlock(&kvm->mmu_lock);
kvmppc_free_radix(kvm);
- kvmppc_update_lpcr(kvm, LPCR_VPM1,
- LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+
+ lpcr = LPCR_VPM1;
+ lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ lpcr_mask |= LPCR_HAIL;
+ kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
return 0;
}
@@ -4700,19 +5259,29 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
*/
int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
{
+ unsigned long lpcr, lpcr_mask;
int err;
err = kvmppc_init_vm_radix(kvm);
if (err)
return err;
kvmppc_rmap_reset(kvm);
- /* Mutual exclusion with kvm_unmap_hva_range etc. */
+ /* Mutual exclusion with kvm_unmap_gfn_range etc. */
spin_lock(&kvm->mmu_lock);
kvm->arch.radix = 1;
spin_unlock(&kvm->mmu_lock);
kvmppc_free_hpt(&kvm->arch.hpt);
- kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
- LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+
+ lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+ lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ lpcr_mask |= LPCR_HAIL;
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ (kvm->arch.host_lpcr & LPCR_HAIL))
+ lpcr |= LPCR_HAIL;
+ }
+ kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
return 0;
}
@@ -4734,6 +5303,9 @@ void kvmppc_alloc_host_rm_ops(void)
int cpu, core;
int size;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
/* Not the first time here ? */
if (kvmppc_host_rm_ops_hv != NULL)
return;
@@ -4799,7 +5371,6 @@ void kvmppc_free_host_rm_ops(void)
static int kvmppc_core_init_vm_hv(struct kvm *kvm)
{
unsigned long lpcr, lpid;
- char buf[32];
int ret;
mutex_init(&kvm->arch.uvmem_lock);
@@ -4840,6 +5411,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
lpcr &= LPCR_PECE | LPCR_LPES;
} else {
+ /*
+ * The L2 LPES mode will be set by the L0 according to whether
+ * or not it needs to take external interrupts in HV mode.
+ */
lpcr = 0;
}
lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
@@ -4876,6 +5451,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm->arch.mmu_ready = 1;
lpcr &= ~LPCR_VPM1;
lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_ARCH_31) &&
+ (kvm->arch.host_lpcr & LPCR_HAIL))
+ lpcr |= LPCR_HAIL;
ret = kvmppc_init_vm_radix(kvm);
if (ret) {
kvmppc_free_lpid(kvm->arch.lpid);
@@ -4884,6 +5463,7 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvmppc_setup_partition_table(kvm);
}
+ verify_lpcr(kvm, lpcr);
kvm->arch.lpcr = lpcr;
/* Initialization for future HPT resizes */
@@ -4893,7 +5473,12 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
* Work out how many sets the TLB has, for the use of
* the TLB invalidation loop in book3s_hv_rmhandlers.S.
*/
- if (radix_enabled())
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ /*
+ * P10 will flush all the congruence class with a single tlbiel
+ */
+ kvm->arch.tlb_sets = 1;
+ } else if (radix_enabled())
kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */
else if (cpu_has_feature(CPU_FTR_ARCH_300))
kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */
@@ -4905,18 +5490,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/*
* Track that we now have a HV mode VM active. This blocks secondary
* CPU threads from coming online.
- * On POWER9, we only need to do this if the "indep_threads_mode"
- * module parameter has been set to N.
*/
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) {
- pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n");
- kvm->arch.threads_indep = true;
- } else {
- kvm->arch.threads_indep = indep_threads_mode;
- }
- }
- if (!kvm->arch.threads_indep)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
kvm_hv_vm_activated();
/*
@@ -4932,15 +5507,14 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm->arch.smt_mode = 1;
kvm->arch.emul_smt_mode = 1;
- /*
- * Create a debugfs directory for the VM
- */
- snprintf(buf, sizeof(buf), "vm%d", current->pid);
- kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
+ return 0;
+}
+
+static int kvmppc_arch_create_vm_debugfs_hv(struct kvm *kvm)
+{
kvmppc_mmu_debugfs_init(kvm);
if (radix_enabled())
kvmhv_radix_debugfs_init(kvm);
-
return 0;
}
@@ -4955,9 +5529,7 @@ static void kvmppc_free_vcores(struct kvm *kvm)
static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
{
- debugfs_remove_recursive(kvm->arch.debugfs_dir);
-
- if (!kvm->arch.threads_indep)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
kvm_hv_vm_deactivated();
kvmppc_free_vcores(kvm);
@@ -4984,7 +5556,7 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
}
/* We don't need to emulate any privileged instructions or dcbz */
-static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
return EMULATE_FAIL;
@@ -5034,6 +5606,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
struct kvmppc_passthru_irqmap *pimap;
struct irq_chip *chip;
int i, rc = 0;
+ struct irq_data *host_data;
if (!kvm_irq_bypass)
return 1;
@@ -5061,7 +5634,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
* what our real-mode EOI code does, or a XIVE interrupt
*/
chip = irq_data_get_irq_chip(&desc->irq_data);
- if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) {
+ if (!chip || !is_pnv_opal_msi(chip)) {
pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
host_irq, guest_gsi);
mutex_unlock(&kvm->lock);
@@ -5098,15 +5671,22 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
* the KVM real mode handler.
*/
smp_wmb();
- irq_map->r_hwirq = desc->irq_data.hwirq;
+
+ /*
+ * The 'host_irq' number is mapped in the PCI-MSI domain but
+ * the underlying calls, which will EOI the interrupt in real
+ * mode, need an HW IRQ number mapped in the XICS IRQ domain.
+ */
+ host_data = irq_domain_get_irq_data(irq_get_default_host(), host_irq);
+ irq_map->r_hwirq = (unsigned int)irqd_to_hwirq(host_data);
if (i == pimap->n_mapped)
pimap->n_mapped++;
if (xics_on_xive())
- rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
+ rc = kvmppc_xive_set_mapped(kvm, guest_gsi, host_irq);
else
- kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
+ kvmppc_xics_set_mapped(kvm, guest_gsi, irq_map->r_hwirq);
if (rc)
irq_map->r_hwirq = 0;
@@ -5145,11 +5725,11 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
}
if (xics_on_xive())
- rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
+ rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, host_irq);
else
kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
- /* invalidate the entry (what do do on error from the above ?) */
+ /* invalidate the entry (what to do on error from the above ?) */
pimap->mapped[i].r_hwirq = 0;
/*
@@ -5211,6 +5791,12 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
case KVM_PPC_ALLOCATE_HTAB: {
u32 htab_order;
+ /* If we're a nested hypervisor, we currently only support radix */
+ if (kvmhv_on_pseries()) {
+ r = -EOPNOTSUPP;
+ break;
+ }
+
r = -EFAULT;
if (get_user(htab_order, (u32 __user *)argp))
break;
@@ -5272,8 +5858,10 @@ static unsigned int default_hcall_list[] = {
H_READ,
H_PROTECT,
H_BULK_REMOVE,
+#ifdef CONFIG_SPAPR_TCE_IOMMU
H_GET_TCE,
H_PUT_TCE,
+#endif
H_SET_DABR,
H_SET_XDABR,
H_CEDE,
@@ -5370,7 +5958,9 @@ static int kvmhv_enable_nested(struct kvm *kvm)
{
if (!nested)
return -EPERM;
- if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return -ENODEV;
+ if (!radix_enabled())
return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
@@ -5427,6 +6017,21 @@ static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa)
}
/*
+ * Enable a guest to become a secure VM, or test whether
+ * that could be enabled.
+ * Called when the KVM_CAP_PPC_SECURE_GUEST capability is
+ * tested (kvm == NULL) or enabled (kvm != NULL).
+ */
+static int kvmhv_enable_svm(struct kvm *kvm)
+{
+ if (!kvmppc_uvmem_available())
+ return -EINVAL;
+ if (kvm)
+ kvm->arch.svm_enabled = 1;
+ return 0;
+}
+
+/*
* IOCTL handler to turn off secure mode of guest
*
* - Release all device pages
@@ -5440,7 +6045,7 @@ static int kvmhv_svm_off(struct kvm *kvm)
int mmu_was_ready;
int srcu_idx;
int ret = 0;
- int i;
+ unsigned long i;
if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
return ret;
@@ -5462,11 +6067,12 @@ static int kvmhv_svm_off(struct kvm *kvm)
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_memory_slot *memslot;
struct kvm_memslots *slots = __kvm_memslots(kvm, i);
+ int bkt;
if (!slots)
continue;
- kvm_for_each_memslot(memslot, slots) {
+ kvm_for_each_memslot(memslot, bkt, slots) {
kvmppc_uvmem_drop_pages(memslot, kvm, true);
uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
}
@@ -5505,6 +6111,40 @@ out:
return ret;
}
+static int kvmhv_enable_dawr1(struct kvm *kvm)
+{
+ if (!cpu_has_feature(CPU_FTR_DAWR1))
+ return -ENODEV;
+
+ /* kvm == NULL means the caller is testing if the capability exists */
+ if (kvm)
+ kvm->arch.dawr1_enabled = true;
+ return 0;
+}
+
+static bool kvmppc_hash_v3_possible(void)
+{
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return false;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return false;
+
+ /*
+ * POWER9 chips before version 2.02 can't have some threads in
+ * HPT mode and some in radix mode on the same core.
+ */
+ if (radix_enabled()) {
+ unsigned int pvr = mfspr(SPRN_PVR);
+ if ((pvr >> 16) == PVR_POWER9 &&
+ (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
+ ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
+ return false;
+ }
+
+ return true;
+}
+
static struct kvmppc_ops kvm_ops_hv = {
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -5522,13 +6162,11 @@ static struct kvmppc_ops kvm_ops_hv = {
.flush_memslot = kvmppc_core_flush_memslot_hv,
.prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
.commit_memory_region = kvmppc_core_commit_memory_region_hv,
- .unmap_hva_range = kvm_unmap_hva_range_hv,
- .age_hva = kvm_age_hva_hv,
- .test_age_hva = kvm_test_age_hva_hv,
- .set_spte_hva = kvm_set_spte_hva_hv,
- .mmu_destroy = kvmppc_mmu_destroy_hv,
+ .unmap_gfn_range = kvm_unmap_gfn_range_hv,
+ .age_gfn = kvm_age_gfn_hv,
+ .test_age_gfn = kvm_test_age_gfn_hv,
+ .set_spte_gfn = kvm_set_spte_gfn_hv,
.free_memslot = kvmppc_core_free_memslot_hv,
- .create_memslot = kvmppc_core_create_memslot_hv,
.init_vm = kvmppc_core_init_vm_hv,
.destroy_vm = kvmppc_core_destroy_vm_hv,
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
@@ -5548,7 +6186,12 @@ static struct kvmppc_ops kvm_ops_hv = {
.enable_nested = kvmhv_enable_nested,
.load_from_eaddr = kvmhv_load_from_eaddr,
.store_to_eaddr = kvmhv_store_to_eaddr,
+ .enable_svm = kvmhv_enable_svm,
.svm_off = kvmhv_svm_off,
+ .enable_dawr1 = kvmhv_enable_dawr1,
+ .hash_v3_possible = kvmppc_hash_v3_possible,
+ .create_vcpu_debugfs = kvmppc_arch_create_vcpu_debugfs_hv,
+ .create_vm_debugfs = kvmppc_arch_create_vm_debugfs_hv,
};
static int kvm_init_subcore_bitmap(void)
@@ -5607,9 +6250,11 @@ static int kvmppc_book3s_init_hv(void)
if (r)
return r;
- r = kvm_init_subcore_bitmap();
- if (r)
- return r;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ r = kvm_init_subcore_bitmap();
+ if (r)
+ goto err;
+ }
/*
* We need a way of accessing the XICS interrupt controller,
@@ -5624,42 +6269,42 @@ static int kvmppc_book3s_init_hv(void)
np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
if (!np) {
pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
- return -ENODEV;
+ r = -ENODEV;
+ goto err;
}
/* presence of intc confirmed - node can be dropped again */
of_node_put(np);
}
#endif
- kvm_ops_hv.owner = THIS_MODULE;
- kvmppc_hv_ops = &kvm_ops_hv;
-
init_default_hcalls();
init_vcore_lists();
r = kvmppc_mmu_hv_init();
if (r)
- return r;
+ goto err;
- if (kvmppc_radix_possible())
+ if (kvmppc_radix_possible()) {
r = kvmppc_radix_init();
-
- /*
- * POWER9 chips before version 2.02 can't have some threads in
- * HPT mode and some in radix mode on the same core.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
- unsigned int pvr = mfspr(SPRN_PVR);
- if ((pvr >> 16) == PVR_POWER9 &&
- (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
- ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
- no_mixing_hpt_and_radix = true;
+ if (r)
+ goto err;
}
r = kvmppc_uvmem_init();
- if (r < 0)
+ if (r < 0) {
pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);
+ return r;
+ }
+
+ kvm_ops_hv.owner = THIS_MODULE;
+ kvmppc_hv_ops = &kvm_ops_hv;
+
+ return 0;
+
+err:
+ kvmhv_nested_exit();
+ kvmppc_radix_exit();
return r;
}
diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h
new file mode 100644
index 000000000000..2f2e59d7d433
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv.h
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Privileged (non-hypervisor) host registers to save.
+ */
+struct p9_host_os_sprs {
+ unsigned long iamr;
+ unsigned long amr;
+
+ unsigned int pmc1;
+ unsigned int pmc2;
+ unsigned int pmc3;
+ unsigned int pmc4;
+ unsigned int pmc5;
+ unsigned int pmc6;
+ unsigned long mmcr0;
+ unsigned long mmcr1;
+ unsigned long mmcr2;
+ unsigned long mmcr3;
+ unsigned long mmcra;
+ unsigned long siar;
+ unsigned long sier1;
+ unsigned long sier2;
+ unsigned long sier3;
+ unsigned long sdar;
+};
+
+static inline bool nesting_enabled(struct kvm *kvm)
+{
+ return kvm->arch.nested_enable && kvm_is_radix(kvm);
+}
+
+bool load_vcpu_state(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs);
+void store_vcpu_state(struct kvm_vcpu *vcpu);
+void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs);
+void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs);
+void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs);
+void switch_pmu_to_host(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
+void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
+#define start_timing(vcpu, next) accumulate_time(vcpu, next)
+#define end_timing(vcpu) accumulate_time(vcpu, NULL)
+#else
+#define accumulate_time(vcpu, next) do {} while (0)
+#define start_timing(vcpu, next) do {} while (0)
+#define end_timing(vcpu) do {} while (0)
+#endif
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 7cd3cf3d366b..da85f046377a 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -15,11 +15,11 @@
#include <linux/cma.h>
#include <linux/bitops.h>
-#include <asm/asm-prototypes.h>
#include <asm/cputable.h>
+#include <asm/interrupt.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
-#include <asm/archrandom.h>
+#include <asm/machdep.h>
#include <asm/xics.h>
#include <asm/xive.h>
#include <asm/dbell.h>
@@ -34,21 +34,6 @@
#include "book3s_xive.h"
/*
- * The XIVE module will populate these when it loads
- */
-unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
-unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
-int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
-int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
-EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
-EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
-EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
-
-/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
* should be power of 2.
*/
@@ -95,25 +80,17 @@ EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
void __init kvm_cma_reserve(void)
{
unsigned long align_size;
- struct memblock_region *reg;
- phys_addr_t selected_size = 0;
+ phys_addr_t selected_size;
/*
* We need CMA reservation only when we are in HV mode
*/
if (!cpu_has_feature(CPU_FTR_HVMODE))
return;
- /*
- * We cannot use memblock_phys_mem_size() here, because
- * memblock_analyze() has not been called yet.
- */
- for_each_memblock(memory, reg)
- selected_size += memblock_region_memory_end_pfn(reg) -
- memblock_region_memory_base_pfn(reg);
- selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
+ selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100);
if (selected_size) {
- pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ pr_info("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M);
align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
cma_declare_contiguous(0, selected_size, 0, align_size,
@@ -159,23 +136,23 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
* exist in the system. We use a counter of VMs to track this.
*
* One of the operations we need to block is onlining of secondaries, so we
- * protect hv_vm_count with get/put_online_cpus().
+ * protect hv_vm_count with cpus_read_lock/unlock().
*/
static atomic_t hv_vm_count;
void kvm_hv_vm_activated(void)
{
- get_online_cpus();
+ cpus_read_lock();
atomic_inc(&hv_vm_count);
- put_online_cpus();
+ cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
void kvm_hv_vm_deactivated(void)
{
- get_online_cpus();
+ cpus_read_lock();
atomic_dec(&hv_vm_count);
- put_online_cpus();
+ cpus_read_unlock();
}
EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
@@ -199,20 +176,14 @@ EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
int kvmppc_hwrng_present(void)
{
- return powernv_hwrng_present();
+ return ppc_md.get_random_seed != NULL;
}
EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
-long kvmppc_h_random(struct kvm_vcpu *vcpu)
+long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
{
- int r;
-
- /* Only need to do the expensive mfmsr() on radix */
- if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
- r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
- else
- r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
- if (r)
+ if (ppc_md.get_random_seed &&
+ ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4]))
return H_SUCCESS;
return H_HARDWARE;
@@ -228,15 +199,6 @@ void kvmhv_rm_send_ipi(int cpu)
void __iomem *xics_phys;
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
- /* For a nested hypervisor, use the XICS via hcall */
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
- IPI_PRIORITY);
- return;
- }
-
/* On POWER9 we can use msgsnd for any destination cpu. */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu);
@@ -285,8 +247,7 @@ void kvmhv_commence_exit(int trap)
struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
int ptid = local_paca->kvm_hstate.ptid;
struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
- int me, ee, i, t;
- int cpu0;
+ int me, ee, i;
/* Set our bit in the threads-exiting-guest map in the 0xff00
bits of vcore->entry_exit_map */
@@ -328,22 +289,6 @@ void kvmhv_commence_exit(int trap)
if ((ee >> 8) == 0)
kvmhv_interrupt_vcore(vc, ee);
}
-
- /*
- * On POWER9 when running a HPT guest on a radix host (sip != NULL),
- * we have to interrupt inactive CPU threads to get them to
- * restore the host LPCR value.
- */
- if (sip->lpcr_req) {
- if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
- vc = local_paca->kvm_hstate.kvm_vcore;
- cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
- for (t = 1; t < threads_per_core; ++t) {
- if (sip->napped[t])
- kvmhv_rm_send_ipi(cpu0 + t);
- }
- }
- }
}
struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
@@ -466,19 +411,12 @@ static long kvmppc_read_one_intr(bool *again)
return 1;
/* Now read the interrupt from the ICP */
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
- xirr = cpu_to_be32(retbuf[0]);
- } else {
- xics_phys = local_paca->kvm_hstate.xics_phys;
- rc = 0;
- if (!xics_phys)
- rc = opal_int_get_xirr(&xirr, false);
- else
- xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
- }
+ xics_phys = local_paca->kvm_hstate.xics_phys;
+ rc = 0;
+ if (!xics_phys)
+ rc = opal_int_get_xirr(&xirr, false);
+ else
+ xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
if (rc < 0)
return 1;
@@ -507,13 +445,7 @@ static long kvmppc_read_one_intr(bool *again)
*/
if (xisr == XICS_IPI) {
rc = 0;
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf,
- hard_smp_processor_id(), 0xff);
- plpar_hcall_raw(H_EOI, retbuf, h_xirr);
- } else if (xics_phys) {
+ if (xics_phys) {
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
} else {
@@ -539,13 +471,7 @@ static long kvmppc_read_one_intr(bool *again)
/* We raced with the host,
* we need to resend that IPI, bummer
*/
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- plpar_hcall_raw(H_IPI, retbuf,
- hard_smp_processor_id(),
- IPI_PRIORITY);
- } else if (xics_phys)
+ if (xics_phys)
__raw_rm_writeb(IPI_PRIORITY,
xics_phys + XICS_MFRR);
else
@@ -564,197 +490,6 @@ static long kvmppc_read_one_intr(bool *again)
return kvmppc_check_passthru(xisr, xirr, again);
}
-#ifdef CONFIG_KVM_XICS
-static inline bool is_rm(void)
-{
- return !(mfmsr() & MSR_DR);
-}
-
-unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_xirr(vcpu);
- if (unlikely(!__xive_vm_h_xirr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_xirr(vcpu);
- } else
- return xics_rm_h_xirr(vcpu);
-}
-
-unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- vcpu->arch.regs.gpr[5] = get_tb();
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_xirr(vcpu);
- if (unlikely(!__xive_vm_h_xirr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_xirr(vcpu);
- } else
- return xics_rm_h_xirr(vcpu);
-}
-
-unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_ipoll(vcpu, server);
- if (unlikely(!__xive_vm_h_ipoll))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_ipoll(vcpu, server);
- } else
- return H_TOO_HARD;
-}
-
-int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_ipi(vcpu, server, mfrr);
- if (unlikely(!__xive_vm_h_ipi))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_ipi(vcpu, server, mfrr);
- } else
- return xics_rm_h_ipi(vcpu, server, mfrr);
-}
-
-int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_cppr(vcpu, cppr);
- if (unlikely(!__xive_vm_h_cppr))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_cppr(vcpu, cppr);
- } else
- return xics_rm_h_cppr(vcpu, cppr);
-}
-
-int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
-{
- if (!kvmppc_xics_enabled(vcpu))
- return H_TOO_HARD;
- if (xics_on_xive()) {
- if (is_rm())
- return xive_rm_h_eoi(vcpu, xirr);
- if (unlikely(!__xive_vm_h_eoi))
- return H_NOT_AVAILABLE;
- return __xive_vm_h_eoi(vcpu, xirr);
- } else
- return xics_rm_h_eoi(vcpu, xirr);
-}
-#endif /* CONFIG_KVM_XICS */
-
-void kvmppc_bad_interrupt(struct pt_regs *regs)
-{
- /*
- * 100 could happen at any time, 200 can happen due to invalid real
- * address access for example (or any time due to a hardware problem).
- */
- if (TRAP(regs) == 0x100) {
- get_paca()->in_nmi++;
- system_reset_exception(regs);
- get_paca()->in_nmi--;
- } else if (TRAP(regs) == 0x200) {
- machine_check_exception(regs);
- } else {
- die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
- }
- panic("Bad KVM trap");
-}
-
-/*
- * Functions used to switch LPCR HR and UPRT bits on all threads
- * when entering and exiting HPT guests on a radix host.
- */
-
-#define PHASE_REALMODE 1 /* in real mode */
-#define PHASE_SET_LPCR 2 /* have set LPCR */
-#define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */
-#define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */
-
-#define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
-
-static void wait_for_sync(struct kvm_split_mode *sip, int phase)
-{
- int thr = local_paca->kvm_hstate.tid;
-
- sip->lpcr_sync.phase[thr] |= phase;
- phase = ALL(phase);
- while ((sip->lpcr_sync.allphases & phase) != phase) {
- HMT_low();
- barrier();
- }
- HMT_medium();
-}
-
-void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
-{
- unsigned long rb, set;
-
- /* wait for every other thread to get to real mode */
- wait_for_sync(sip, PHASE_REALMODE);
-
- /* Set LPCR and LPIDR */
- mtspr(SPRN_LPCR, sip->lpcr_req);
- mtspr(SPRN_LPID, sip->lpidr_req);
- isync();
-
- /* Invalidate the TLB on thread 0 */
- if (local_paca->kvm_hstate.tid == 0) {
- sip->do_set = 0;
- asm volatile("ptesync" : : : "memory");
- for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) {
- rb = TLBIEL_INVAL_SET_LPID +
- (set << TLBIEL_INVAL_SET_SHIFT);
- asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
- "r" (rb), "r" (0));
- }
- asm volatile("ptesync" : : : "memory");
- }
-
- /* indicate that we have done so and wait for others */
- wait_for_sync(sip, PHASE_SET_LPCR);
- /* order read of sip->lpcr_sync.allphases vs. sip->do_set */
- smp_rmb();
-}
-
-/*
- * Called when a thread that has been in the guest needs
- * to reload the host LPCR value - but only on POWER9 when
- * running a HPT guest on a radix host.
- */
-void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
-{
- /* we're out of the guest... */
- wait_for_sync(sip, PHASE_OUT_OF_GUEST);
-
- mtspr(SPRN_LPID, 0);
- mtspr(SPRN_LPCR, sip->host_lpcr);
- isync();
-
- if (local_paca->kvm_hstate.tid == 0) {
- sip->do_restore = 0;
- smp_wmb(); /* order store of do_restore vs. phase */
- }
-
- wait_for_sync(sip, PHASE_RESET_LPCR);
- smp_mb();
- local_paca->kvm_hstate.kvm_split_mode = NULL;
-}
-
static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.ceded = 0;
@@ -766,6 +501,9 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
{
+ /* Guest must always run with ME enabled, HV disabled. */
+ msr = (msr | MSR_ME) & ~MSR_HV;
+
/*
* Check for illegal transactional state bit combination
* and if we find it, force the TS field to a safe state.
@@ -829,6 +567,8 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
int ext;
unsigned long lpcr;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
/* Insert EXTERNAL bit into LPCR at the MER bit position */
ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
lpcr = mfspr(SPRN_LPCR);
@@ -862,57 +602,23 @@ static void flush_guest_tlb(struct kvm *kvm)
unsigned long rb, set;
rb = PPC_BIT(52); /* IS = 2 */
- if (kvm_is_radix(kvm)) {
- /* R=1 PRS=1 RIC=2 */
+ for (set = 0; set < kvm->arch.tlb_sets; ++set) {
+ /* R=0 PRS=0 RIC=0 */
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r" (rb), "i" (1), "i" (1), "i" (2),
+ : : "r" (rb), "i" (0), "i" (0), "i" (0),
"r" (0) : "memory");
- for (set = 1; set < kvm->arch.tlb_sets; ++set) {
- rb += PPC_BIT(51); /* increment set number */
- /* R=1 PRS=1 RIC=0 */
- asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r" (rb), "i" (1), "i" (1), "i" (0),
- "r" (0) : "memory");
- }
- asm volatile("ptesync": : :"memory");
- asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
- } else {
- for (set = 0; set < kvm->arch.tlb_sets; ++set) {
- /* R=0 PRS=0 RIC=0 */
- asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r" (rb), "i" (0), "i" (0), "i" (0),
- "r" (0) : "memory");
- rb += PPC_BIT(51); /* increment set number */
- }
- asm volatile("ptesync": : :"memory");
- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
+ rb += PPC_BIT(51); /* increment set number */
}
+ asm volatile("ptesync": : :"memory");
}
-void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
- struct kvm_nested_guest *nested)
+void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu)
{
- cpumask_t *need_tlb_flush;
-
- /*
- * On POWER9, individual threads can come in here, but the
- * TLB is shared between the 4 threads in a core, hence
- * invalidating on one thread invalidates for all.
- * Thus we make all 4 threads use the same bit.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- pcpu = cpu_first_thread_sibling(pcpu);
-
- if (nested)
- need_tlb_flush = &nested->need_tlb_flush;
- else
- need_tlb_flush = &kvm->arch.need_tlb_flush;
-
- if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
+ if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) {
flush_guest_tlb(kvm);
/* Clear the bit after the TLB flush */
- cpumask_clear_cpu(pcpu, need_tlb_flush);
+ cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush);
}
}
EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);
diff --git a/arch/powerpc/kvm/book3s_hv_hmi.c b/arch/powerpc/kvm/book3s_hv_hmi.c
index 9af660476314..1ec50c69678b 100644
--- a/arch/powerpc/kvm/book3s_hv_hmi.c
+++ b/arch/powerpc/kvm/book3s_hv_hmi.c
@@ -20,10 +20,15 @@ void wait_for_subcore_guest_exit(void)
/*
* NULL bitmap pointer indicates that KVM module hasn't
- * been loaded yet and hence no guests are running.
+ * been loaded yet and hence no guests are running, or running
+ * on POWER9 or newer CPU.
+ *
* If no KVM is in use, no need to co-ordinate among threads
* as all of them will always be in host and no one is going
* to modify TB other than the opal hmi handler.
+ *
+ * POWER9 and newer don't need this synchronisation.
+ *
* Hence, just return from here.
*/
if (!local_paca->sibling_subcore_state)
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 63fd81f3039d..59d89e4b154a 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -58,21 +58,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/*
* Put whatever is in the decrementer into the
* hypervisor decrementer.
+ * Because of a hardware deviation in P8,
+ * we need to set LPCR[HDICE] before writing HDEC.
*/
-BEGIN_FTR_SECTION
ld r5, HSTATE_KVM_VCORE(r13)
ld r6, VCORE_KVM(r5)
ld r9, KVM_HOST_LPCR(r6)
- andis. r9, r9, LPCR_LD@h
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ ori r8, r9, LPCR_HDICE
+ mtspr SPRN_LPCR, r8
+ isync
mfspr r8,SPRN_DEC
mftb r7
-BEGIN_FTR_SECTION
- /* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
- bne 32f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8
-32: mtspr SPRN_HDEC,r8
+ mtspr SPRN_HDEC,r8
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
@@ -106,7 +104,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtlr r0
blr
-_GLOBAL(kvmhv_save_host_pmu)
+/*
+ * void kvmhv_save_host_pmu(void)
+ */
+kvmhv_save_host_pmu:
BEGIN_FTR_SECTION
/* Work around P8 PMAE bug */
li r3, -1
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index dc97e5be76f6..5a64a1341e6f 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -11,14 +11,16 @@
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/llist.h>
+#include <linux/pgtable.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/firmware.h>
static struct patb_entry *pseries_partition_tb;
@@ -33,8 +35,8 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
hr->dpdes = vc->dpdes;
hr->hfscr = vcpu->arch.hfscr;
hr->tb_offset = vc->tb_offset;
- hr->dawr0 = vcpu->arch.dawr;
- hr->dawrx0 = vcpu->arch.dawrx;
+ hr->dawr0 = vcpu->arch.dawr0;
+ hr->dawrx0 = vcpu->arch.dawrx0;
hr->ciabr = vcpu->arch.ciabr;
hr->purr = vcpu->arch.purr;
hr->spurr = vcpu->arch.spurr;
@@ -49,9 +51,12 @@ void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
hr->pidr = vcpu->arch.pid;
hr->cfar = vcpu->arch.cfar;
hr->ppr = vcpu->arch.ppr;
+ hr->dawr1 = vcpu->arch.dawr1;
+ hr->dawrx1 = vcpu->arch.dawrx1;
}
-static void byteswap_pt_regs(struct pt_regs *regs)
+/* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
+static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
{
unsigned long *addr = (unsigned long *) regs;
@@ -91,15 +96,16 @@ static void byteswap_hv_regs(struct hv_guest_state *hr)
hr->pidr = swab64(hr->pidr);
hr->cfar = swab64(hr->cfar);
hr->ppr = swab64(hr->ppr);
+ hr->dawr1 = swab64(hr->dawr1);
+ hr->dawrx1 = swab64(hr->dawrx1);
}
-static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
+static void save_hv_return_state(struct kvm_vcpu *vcpu,
struct hv_guest_state *hr)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
hr->dpdes = vc->dpdes;
- hr->hfscr = vcpu->arch.hfscr;
hr->purr = vcpu->arch.purr;
hr->spurr = vcpu->arch.spurr;
hr->ic = vcpu->arch.ic;
@@ -113,7 +119,7 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
hr->pidr = vcpu->arch.pid;
hr->cfar = vcpu->arch.cfar;
hr->ppr = vcpu->arch.ppr;
- switch (trap) {
+ switch (vcpu->arch.trap) {
case BOOK3S_INTERRUPT_H_DATA_STORAGE:
hr->hdar = vcpu->arch.fault_dar;
hr->hdsisr = vcpu->arch.fault_dsisr;
@@ -122,37 +128,25 @@ static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
case BOOK3S_INTERRUPT_H_INST_STORAGE:
hr->asdr = vcpu->arch.fault_gpa;
break;
+ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+ hr->hfscr = ((~HFSCR_INTR_CAUSE & hr->hfscr) |
+ (HFSCR_INTR_CAUSE & vcpu->arch.hfscr));
+ break;
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
hr->heir = vcpu->arch.emul_inst;
break;
}
}
-static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
-{
- /*
- * Don't let L1 enable features for L2 which we've disabled for L1,
- * but preserve the interrupt cause field.
- */
- hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
-
- /* Don't let data address watchpoint match in hypervisor state */
- hr->dawrx0 &= ~DAWRX_HYP;
-
- /* Don't let completed instruction address breakpt match in HV state */
- if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
- hr->ciabr &= ~CIABR_PRIV;
-}
-
-static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
+static void restore_hv_regs(struct kvm_vcpu *vcpu, const struct hv_guest_state *hr)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
vc->pcr = hr->pcr | PCR_MASK;
vc->dpdes = hr->dpdes;
vcpu->arch.hfscr = hr->hfscr;
- vcpu->arch.dawr = hr->dawr0;
- vcpu->arch.dawrx = hr->dawrx0;
+ vcpu->arch.dawr0 = hr->dawr0;
+ vcpu->arch.dawrx0 = hr->dawrx0;
vcpu->arch.ciabr = hr->ciabr;
vcpu->arch.purr = hr->purr;
vcpu->arch.spurr = hr->spurr;
@@ -167,6 +161,8 @@ static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
vcpu->arch.pid = hr->pidr;
vcpu->arch.cfar = hr->cfar;
vcpu->arch.ppr = hr->ppr;
+ vcpu->arch.dawr1 = hr->dawr1;
+ vcpu->arch.dawrx1 = hr->dawrx1;
}
void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
@@ -215,43 +211,135 @@ static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
}
}
+static int kvmhv_read_guest_state_and_regs(struct kvm_vcpu *vcpu,
+ struct hv_guest_state *l2_hv,
+ struct pt_regs *l2_regs,
+ u64 hv_ptr, u64 regs_ptr)
+{
+ int size;
+
+ if (kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv->version,
+ sizeof(l2_hv->version)))
+ return -1;
+
+ if (kvmppc_need_byteswap(vcpu))
+ l2_hv->version = swab64(l2_hv->version);
+
+ size = hv_guest_state_size(l2_hv->version);
+ if (size < 0)
+ return -1;
+
+ return kvm_vcpu_read_guest(vcpu, hv_ptr, l2_hv, size) ||
+ kvm_vcpu_read_guest(vcpu, regs_ptr, l2_regs,
+ sizeof(struct pt_regs));
+}
+
+static int kvmhv_write_guest_state_and_regs(struct kvm_vcpu *vcpu,
+ struct hv_guest_state *l2_hv,
+ struct pt_regs *l2_regs,
+ u64 hv_ptr, u64 regs_ptr)
+{
+ int size;
+
+ size = hv_guest_state_size(l2_hv->version);
+ if (size < 0)
+ return -1;
+
+ return kvm_vcpu_write_guest(vcpu, hv_ptr, l2_hv, size) ||
+ kvm_vcpu_write_guest(vcpu, regs_ptr, l2_regs,
+ sizeof(struct pt_regs));
+}
+
+static void load_l2_hv_regs(struct kvm_vcpu *vcpu,
+ const struct hv_guest_state *l2_hv,
+ const struct hv_guest_state *l1_hv, u64 *lpcr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 mask;
+
+ restore_hv_regs(vcpu, l2_hv);
+
+ /*
+ * Don't let L1 change LPCR bits for the L2 except these:
+ */
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD | LPCR_MER;
+
+ /*
+ * Additional filtering is required depending on hardware
+ * and configuration.
+ */
+ *lpcr = kvmppc_filter_lpcr_hv(vcpu->kvm,
+ (vc->lpcr & ~mask) | (*lpcr & mask));
+
+ /*
+ * Don't let L1 enable features for L2 which we don't allow for L1,
+ * but preserve the interrupt cause field.
+ */
+ vcpu->arch.hfscr = l2_hv->hfscr & (HFSCR_INTR_CAUSE | vcpu->arch.hfscr_permitted);
+
+ /* Don't let data address watchpoint match in hypervisor state */
+ vcpu->arch.dawrx0 = l2_hv->dawrx0 & ~DAWRX_HYP;
+ vcpu->arch.dawrx1 = l2_hv->dawrx1 & ~DAWRX_HYP;
+
+ /* Don't let completed instruction address breakpt match in HV state */
+ if ((l2_hv->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
+ vcpu->arch.ciabr = l2_hv->ciabr & ~CIABR_PRIV;
+}
+
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
{
long int err, r;
struct kvm_nested_guest *l2;
struct pt_regs l2_regs, saved_l1_regs;
- struct hv_guest_state l2_hv, saved_l1_hv;
+ struct hv_guest_state l2_hv = {0}, saved_l1_hv;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
u64 hv_ptr, regs_ptr;
- u64 hdec_exp;
+ u64 hdec_exp, lpcr;
s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
- u64 mask;
- unsigned long lpcr;
if (vcpu->kvm->arch.l1_ptcr == 0)
return H_NOT_AVAILABLE;
+ if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
+ return H_BAD_MODE;
+
/* copy parameters in */
hv_ptr = kvmppc_get_gpr(vcpu, 4);
- err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
- sizeof(struct hv_guest_state));
+ regs_ptr = kvmppc_get_gpr(vcpu, 5);
+ kvm_vcpu_srcu_read_lock(vcpu);
+ err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
+ hv_ptr, regs_ptr);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (err)
return H_PARAMETER;
+
if (kvmppc_need_byteswap(vcpu))
byteswap_hv_regs(&l2_hv);
- if (l2_hv.version != HV_GUEST_STATE_VERSION)
+ if (l2_hv.version > HV_GUEST_STATE_VERSION)
return H_P2;
- regs_ptr = kvmppc_get_gpr(vcpu, 5);
- err = kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
- sizeof(struct pt_regs));
- if (err)
- return H_PARAMETER;
if (kvmppc_need_byteswap(vcpu))
byteswap_pt_regs(&l2_regs);
if (l2_hv.vcpu_token >= NR_CPUS)
return H_PARAMETER;
+ /*
+ * L1 must have set up a suspended state to enter the L2 in a
+ * transactional state, and only in that case. These have to be
+ * filtered out here to prevent causing a TM Bad Thing in the
+ * host HRFID. We could synthesize a TM Bad Thing back to the L1
+ * here but there doesn't seem like much point.
+ */
+ if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
+ if (!MSR_TM_ACTIVE(l2_regs.msr))
+ return H_BAD_MODE;
+ } else {
+ if (l2_regs.msr & MSR_TS_MASK)
+ return H_BAD_MODE;
+ if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
+ return H_BAD_MODE;
+ }
+
/* translate lpid */
l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
if (!l2)
@@ -270,28 +358,24 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
/* convert TB values/offsets to host (L0) values */
hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
vc->tb_offset += l2_hv.tb_offset;
+ vcpu->arch.dec_expires += l2_hv.tb_offset;
/* set L1 state to L2 state */
vcpu->arch.nested = l2;
vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
+ vcpu->arch.nested_hfscr = l2_hv.hfscr;
vcpu->arch.regs = l2_regs;
- vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
- mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
- LPCR_LPES | LPCR_MER;
- lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
- sanitise_hv_regs(vcpu, &l2_hv);
- restore_hv_regs(vcpu, &l2_hv);
+
+ /* Guest must always run with ME enabled, HV disabled. */
+ vcpu->arch.shregs.msr = (vcpu->arch.regs.msr | MSR_ME) & ~MSR_HV;
+
+ lpcr = l2_hv.lpcr;
+ load_l2_hv_regs(vcpu, &l2_hv, &saved_l1_hv, &lpcr);
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
do {
- if (mftb() >= hdec_exp) {
- vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
- r = RESUME_HOST;
- break;
- }
- r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
- lpcr);
+ r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
} while (is_kvmppc_resume_guest(r));
/* save L2 state for return */
@@ -301,7 +385,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
delta_ic = vcpu->arch.ic - l2_hv.ic;
delta_vtb = vc->vtb - l2_hv.vtb;
- save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
+ save_hv_return_state(vcpu, &l2_hv);
/* restore L1 state */
vcpu->arch.nested = NULL;
@@ -311,6 +395,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (l2_regs.msr & MSR_TS_MASK)
vcpu->arch.shregs.msr |= MSR_TS_S;
vc->tb_offset = saved_l1_hv.tb_offset;
+ /* XXX: is this always the same delta as saved_l1_hv.tb_offset? */
+ vcpu->arch.dec_expires -= l2_hv.tb_offset;
restore_hv_regs(vcpu, &saved_l1_hv);
vcpu->arch.purr += delta_purr;
vcpu->arch.spurr += delta_spurr;
@@ -324,12 +410,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
byteswap_hv_regs(&l2_hv);
byteswap_pt_regs(&l2_regs);
}
- err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
- sizeof(struct hv_guest_state));
- if (err)
- return H_AUTHORITY;
- err = kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
- sizeof(struct pt_regs));
+ kvm_vcpu_srcu_read_lock(vcpu);
+ err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
+ hv_ptr, regs_ptr);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (err)
return H_AUTHORITY;
@@ -355,10 +439,11 @@ long kvmhv_nested_init(void)
if (!radix_enabled())
return -ENODEV;
- /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
- ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
- if (ptb_order < 8)
- ptb_order = 8;
+ /* Partition table entry is 1<<4 bytes in size, hence the 4. */
+ ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
+ /* Minimum partition table size is 1<<12 bytes */
+ if (ptb_order < 12)
+ ptb_order = 12;
pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
GFP_KERNEL);
if (!pseries_partition_tb) {
@@ -366,7 +451,7 @@ long kvmhv_nested_init(void)
return -ENOMEM;
}
- ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
+ ptcr = __pa(pseries_partition_tb) | (ptb_order - 12);
rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
if (rc != H_SUCCESS) {
pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
@@ -402,8 +487,15 @@ static void kvmhv_flush_lpid(unsigned int lpid)
return;
}
- rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
- lpid, TLBIEL_INVAL_SET_LPID);
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ else
+ rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
+ H_RPTI_TYPE_NESTED |
+ H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
+ H_RPTI_TYPE_PAT,
+ H_RPTI_PAGE_ALL, 0, -1UL);
if (rc)
pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
}
@@ -430,11 +522,6 @@ static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
}
-void kvmhv_vm_nested_init(struct kvm *kvm)
-{
- kvm->arch.max_nested_lpid = -1;
-}
-
/*
* Handle the H_SET_PARTITION_TABLE hcall.
* r4 = guest real address of partition table + log_2(size) - 12
@@ -448,16 +535,14 @@ long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
long ret = H_SUCCESS;
srcu_idx = srcu_read_lock(&kvm->srcu);
- /*
- * Limit the partition table to 4096 entries (because that's what
- * hardware supports), and check the base address.
- */
- if ((ptcr & PRTS_MASK) > 12 - 8 ||
+ /* Check partition size and base address. */
+ if ((ptcr & PRTS_MASK) + 12 - 4 > KVM_MAX_NESTED_GUESTS_SHIFT ||
!kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
ret = H_PARAMETER;
srcu_read_unlock(&kvm->srcu, srcu_idx);
if (ret == H_SUCCESS)
kvm->arch.l1_ptcr = ptcr;
+
return ret;
}
@@ -489,7 +574,7 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
if (eaddr & (0xFFFUL << 52))
return H_PARAMETER;
- buf = kzalloc(n, GFP_KERNEL);
+ buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN);
if (!buf)
return H_NO_MEM;
@@ -509,12 +594,16 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
goto not_found;
/* Write what was loaded into our buffer back to the L1 guest */
+ kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto not_found;
} else {
/* Load the data to be stored from the L1 guest into our buf */
+ kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto not_found;
@@ -549,9 +638,12 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
ret = -EFAULT;
ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
- if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8)))
+ if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4))) {
+ int srcu_idx = srcu_read_lock(&kvm->srcu);
ret = kvm_read_guest(kvm, ptbl_addr,
&ptbl_entry, sizeof(ptbl_entry));
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ }
if (ret) {
gp->l1_gr_to_hr = 0;
gp->process_table = 0;
@@ -562,7 +654,36 @@ static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
kvmhv_set_nested_ptbl(gp);
}
-struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
+void kvmhv_vm_nested_init(struct kvm *kvm)
+{
+ idr_init(&kvm->arch.kvm_nested_guest_idr);
+}
+
+static struct kvm_nested_guest *__find_nested(struct kvm *kvm, int lpid)
+{
+ return idr_find(&kvm->arch.kvm_nested_guest_idr, lpid);
+}
+
+static bool __prealloc_nested(struct kvm *kvm, int lpid)
+{
+ if (idr_alloc(&kvm->arch.kvm_nested_guest_idr,
+ NULL, lpid, lpid + 1, GFP_KERNEL) != lpid)
+ return false;
+ return true;
+}
+
+static void __add_nested(struct kvm *kvm, int lpid, struct kvm_nested_guest *gp)
+{
+ if (idr_replace(&kvm->arch.kvm_nested_guest_idr, gp, lpid))
+ WARN_ON(1);
+}
+
+static void __remove_nested(struct kvm *kvm, int lpid)
+{
+ idr_remove(&kvm->arch.kvm_nested_guest_idr, lpid);
+}
+
+static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
{
struct kvm_nested_guest *gp;
long shadow_lpid;
@@ -622,13 +743,8 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
long ref;
spin_lock(&kvm->mmu_lock);
- if (gp == kvm->arch.nested_guests[lpid]) {
- kvm->arch.nested_guests[lpid] = NULL;
- if (lpid == kvm->arch.max_nested_lpid) {
- while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
- ;
- kvm->arch.max_nested_lpid = lpid;
- }
+ if (gp == __find_nested(kvm, lpid)) {
+ __remove_nested(kvm, lpid);
--gp->refcnt;
}
ref = gp->refcnt;
@@ -645,24 +761,22 @@ static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
*/
void kvmhv_release_all_nested(struct kvm *kvm)
{
- int i;
+ int lpid;
struct kvm_nested_guest *gp;
struct kvm_nested_guest *freelist = NULL;
struct kvm_memory_slot *memslot;
- int srcu_idx;
+ int srcu_idx, bkt;
spin_lock(&kvm->mmu_lock);
- for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
- gp = kvm->arch.nested_guests[i];
- if (!gp)
- continue;
- kvm->arch.nested_guests[i] = NULL;
+ idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
+ __remove_nested(kvm, lpid);
if (--gp->refcnt == 0) {
gp->next = freelist;
freelist = gp;
}
}
- kvm->arch.max_nested_lpid = -1;
+ idr_destroy(&kvm->arch.kvm_nested_guest_idr);
+ /* idr is empty and may be reused at this point */
spin_unlock(&kvm->mmu_lock);
while ((gp = freelist) != NULL) {
freelist = gp->next;
@@ -670,7 +784,7 @@ void kvmhv_release_all_nested(struct kvm *kvm)
}
srcu_idx = srcu_read_lock(&kvm->srcu);
- kvm_for_each_memslot(memslot, kvm_memslots(kvm))
+ kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
kvmhv_free_memslot_nest_rmap(memslot);
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
@@ -694,12 +808,11 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
{
struct kvm_nested_guest *gp, *newgp;
- if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
- l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
+ if (l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
return NULL;
spin_lock(&kvm->mmu_lock);
- gp = kvm->arch.nested_guests[l1_lpid];
+ gp = __find_nested(kvm, l1_lpid);
if (gp)
++gp->refcnt;
spin_unlock(&kvm->mmu_lock);
@@ -710,17 +823,19 @@ struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
newgp = kvmhv_alloc_nested(kvm, l1_lpid);
if (!newgp)
return NULL;
+
+ if (!__prealloc_nested(kvm, l1_lpid)) {
+ kvmhv_release_nested(newgp);
+ return NULL;
+ }
+
spin_lock(&kvm->mmu_lock);
- if (kvm->arch.nested_guests[l1_lpid]) {
- /* someone else beat us to it */
- gp = kvm->arch.nested_guests[l1_lpid];
- } else {
- kvm->arch.nested_guests[l1_lpid] = newgp;
+ gp = __find_nested(kvm, l1_lpid);
+ if (!gp) {
+ __add_nested(kvm, l1_lpid, newgp);
++newgp->refcnt;
gp = newgp;
newgp = NULL;
- if (l1_lpid > kvm->arch.max_nested_lpid)
- kvm->arch.max_nested_lpid = l1_lpid;
}
++gp->refcnt;
spin_unlock(&kvm->mmu_lock);
@@ -743,11 +858,21 @@ void kvmhv_put_nested(struct kvm_nested_guest *gp)
kvmhv_release_nested(gp);
}
-static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
+pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
+ unsigned long ea, unsigned *hshift)
{
- if (lpid > kvm->arch.max_nested_lpid)
+ struct kvm_nested_guest *gp;
+ pte_t *pte;
+
+ gp = __find_nested(kvm, lpid);
+ if (!gp)
return NULL;
- return kvm->arch.nested_guests[lpid];
+
+ VM_WARN(!spin_is_locked(&kvm->mmu_lock),
+ "%s called with kvm mmu_lock not held \n", __func__);
+ pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
+
+ return pte;
}
static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
@@ -792,19 +917,15 @@ static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
unsigned long clr, unsigned long set,
unsigned long hpa, unsigned long mask)
{
- struct kvm_nested_guest *gp;
unsigned long gpa;
unsigned int shift, lpid;
pte_t *ptep;
gpa = n_rmap & RMAP_NESTED_GPA_MASK;
lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
- gp = kvmhv_find_nested(kvm, lpid);
- if (!gp)
- return;
/* Find the pte */
- ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
/*
* If the pte is present and the pfn is still the same, update the pte.
* If the pfn has changed then this is a stale rmap entry, the nested
@@ -849,12 +970,12 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
gpa = n_rmap & RMAP_NESTED_GPA_MASK;
lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
- gp = kvmhv_find_nested(kvm, lpid);
+ gp = __find_nested(kvm, lpid);
if (!gp)
return;
/* Find and invalidate the pte */
- ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
/* Don't spuriously invalidate ptes if the pfn has changed */
if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
@@ -921,7 +1042,7 @@ static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
int shift;
spin_lock(&kvm->mmu_lock);
- ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
if (!shift)
shift = PAGE_SHIFT;
if (ptep && pte_present(*ptep)) {
@@ -1041,16 +1162,13 @@ static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
{
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *gp;
- int i;
+ int lpid;
spin_lock(&kvm->mmu_lock);
- for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
- gp = kvm->arch.nested_guests[i];
- if (gp) {
- spin_unlock(&kvm->mmu_lock);
- kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
- spin_lock(&kvm->mmu_lock);
- }
+ idr_for_each_entry(&kvm->arch.kvm_nested_guest_idr, gp, lpid) {
+ spin_unlock(&kvm->mmu_lock);
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ spin_lock(&kvm->mmu_lock);
}
spin_unlock(&kvm->mmu_lock);
}
@@ -1129,6 +1247,113 @@ long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
return H_SUCCESS;
}
+static long do_tlb_invalidate_nested_all(struct kvm_vcpu *vcpu,
+ unsigned long lpid, unsigned long ric)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *gp;
+
+ gp = kvmhv_get_nested(kvm, lpid, false);
+ if (gp) {
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ kvmhv_put_nested(gp);
+ }
+ return H_SUCCESS;
+}
+
+/*
+ * Number of pages above which we invalidate the entire LPID rather than
+ * flush individual pages.
+ */
+static unsigned long tlb_range_flush_page_ceiling __read_mostly = 33;
+
+static long do_tlb_invalidate_nested_tlb(struct kvm_vcpu *vcpu,
+ unsigned long lpid,
+ unsigned long pg_sizes,
+ unsigned long start,
+ unsigned long end)
+{
+ int ret = H_P4;
+ unsigned long addr, nr_pages;
+ struct mmu_psize_def *def;
+ unsigned long psize, ap, page_size;
+ bool flush_lpid;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ def = &mmu_psize_defs[psize];
+ if (!(pg_sizes & def->h_rpt_pgsize))
+ continue;
+
+ nr_pages = (end - start) >> def->shift;
+ flush_lpid = nr_pages > tlb_range_flush_page_ceiling;
+ if (flush_lpid)
+ return do_tlb_invalidate_nested_all(vcpu, lpid,
+ RIC_FLUSH_TLB);
+ addr = start;
+ ap = mmu_get_ap(psize);
+ page_size = 1UL << def->shift;
+ do {
+ ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap,
+ get_epn(addr));
+ if (ret)
+ return H_P4;
+ addr += page_size;
+ } while (addr < end);
+ }
+ return ret;
+}
+
+/*
+ * Performs partition-scoped invalidations for nested guests
+ * as part of H_RPT_INVALIDATE hcall.
+ */
+long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
+{
+ /*
+ * If L2 lpid isn't valid, we need to return H_PARAMETER.
+ *
+ * However, nested KVM issues a L2 lpid flush call when creating
+ * partition table entries for L2. This happens even before the
+ * corresponding shadow lpid is created in HV which happens in
+ * H_ENTER_NESTED call. Since we can't differentiate this case from
+ * the invalid case, we ignore such flush requests and return success.
+ */
+ if (!__find_nested(vcpu->kvm, lpid))
+ return H_SUCCESS;
+
+ /*
+ * A flush all request can be handled by a full lpid flush only.
+ */
+ if ((type & H_RPTI_TYPE_NESTED_ALL) == H_RPTI_TYPE_NESTED_ALL)
+ return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_ALL);
+
+ /*
+ * We don't need to handle a PWC flush like process table here,
+ * because intermediate partition scoped table in nested guest doesn't
+ * really have PWC. Only level we have PWC is in L0 and for nested
+ * invalidate at L0 we always do kvm_flush_lpid() which does
+ * radix__flush_all_lpid(). For range invalidate at any level, we
+ * are not removing the higher level page tables and hence there is
+ * no PWC invalidate needed.
+ *
+ * if (type & H_RPTI_TYPE_PWC) {
+ * ret = do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_PWC);
+ * if (ret)
+ * return H_P4;
+ * }
+ */
+
+ if (start == 0 && end == -1)
+ return do_tlb_invalidate_nested_all(vcpu, lpid, RIC_FLUSH_TLB);
+
+ if (type & H_RPTI_TYPE_TLB)
+ return do_tlb_invalidate_nested_tlb(vcpu, lpid, pg_sizes,
+ start, end);
+ return H_SUCCESS;
+}
+
/* Used to convert a nested guest real address to a L1 guest real address */
static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
struct kvm_nested_guest *gp,
@@ -1169,7 +1394,7 @@ static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
} else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
/* Can we execute? */
if (!gpte_p->may_execute) {
- flags |= SRR1_ISI_N_OR_G;
+ flags |= SRR1_ISI_N_G_OR_CIP;
goto forward_to_l1;
}
} else {
@@ -1212,16 +1437,16 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
spin_lock(&kvm->mmu_lock);
/* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
- ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing,
- gpte.raddr, kvm->arch.lpid);
+ ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
+ gpte.raddr, kvm->arch.lpid);
if (!ret) {
ret = -EINVAL;
goto out_unlock;
}
/* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
- ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa,
- gp->shadow_lpid);
+ ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
+ n_gpa, gp->l1_lpid);
if (!ret)
ret = -EINVAL;
else
@@ -1257,8 +1482,7 @@ static inline int kvmppc_radix_shift_to_level(int shift)
}
/* called with gp->tlb_lock held */
-static long int __kvmhv_nested_page_fault(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
+static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
struct kvm_nested_guest *gp)
{
struct kvm *kvm = vcpu->kvm;
@@ -1341,7 +1565,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
}
/* passthrough of emulated MMIO case */
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
}
if (memslot->flags & KVM_MEM_READONLY) {
if (writing) {
@@ -1356,13 +1580,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
/* 2. Find the host pte for this L1 guest real address */
/* Used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/* See if can find translation in our partition scoped tables for L1 */
pte = __pte(0);
spin_lock(&kvm->mmu_lock);
- pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
if (!shift)
shift = PAGE_SHIFT;
if (pte_p)
@@ -1416,8 +1640,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
- if (n_rmap)
- kfree(n_rmap);
+ kfree(n_rmap);
if (ret == -EAGAIN)
ret = RESUME_GUEST; /* Let the guest try again */
@@ -1428,28 +1651,25 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
return RESUME_GUEST;
}
-long int kvmhv_nested_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu)
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
{
struct kvm_nested_guest *gp = vcpu->arch.nested;
long int ret;
mutex_lock(&gp->tlb_lock);
- ret = __kvmhv_nested_page_fault(run, vcpu, gp);
+ ret = __kvmhv_nested_page_fault(vcpu, gp);
mutex_unlock(&gp->tlb_lock);
return ret;
}
int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
{
- int ret = -1;
+ int ret = lpid + 1;
spin_lock(&kvm->mmu_lock);
- while (++lpid <= kvm->arch.max_nested_lpid) {
- if (kvm->arch.nested_guests[lpid]) {
- ret = lpid;
- break;
- }
- }
+ if (!idr_get_next(&kvm->arch.kvm_nested_guest_idr, &ret))
+ ret = -1;
spin_unlock(&kvm->mmu_lock);
+
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
new file mode 100644
index 000000000000..34f1db212824
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -0,0 +1,930 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <asm/asm-prototypes.h>
+#include <asm/dbell.h>
+#include <asm/ppc-opcode.h>
+
+#include "book3s_hv.h"
+
+static void load_spr_state(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ /* TAR is very fast */
+ mtspr(SPRN_TAR, vcpu->arch.tar);
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+ current->thread.vrsave != vcpu->arch.vrsave)
+ mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
+#endif
+
+ if (vcpu->arch.hfscr & HFSCR_EBB) {
+ if (current->thread.ebbhr != vcpu->arch.ebbhr)
+ mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
+ if (current->thread.ebbrr != vcpu->arch.ebbrr)
+ mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
+ if (current->thread.bescr != vcpu->arch.bescr)
+ mtspr(SPRN_BESCR, vcpu->arch.bescr);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
+ current->thread.tidr != vcpu->arch.tid)
+ mtspr(SPRN_TIDR, vcpu->arch.tid);
+ if (host_os_sprs->iamr != vcpu->arch.iamr)
+ mtspr(SPRN_IAMR, vcpu->arch.iamr);
+ if (host_os_sprs->amr != vcpu->arch.amr)
+ mtspr(SPRN_AMR, vcpu->arch.amr);
+ if (vcpu->arch.uamor != 0)
+ mtspr(SPRN_UAMOR, vcpu->arch.uamor);
+ if (current->thread.fscr != vcpu->arch.fscr)
+ mtspr(SPRN_FSCR, vcpu->arch.fscr);
+ if (current->thread.dscr != vcpu->arch.dscr)
+ mtspr(SPRN_DSCR, vcpu->arch.dscr);
+ if (vcpu->arch.pspb != 0)
+ mtspr(SPRN_PSPB, vcpu->arch.pspb);
+
+ /*
+ * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
+ * clear (or hstate set appropriately to catch those registers
+ * being clobbered if we take a MCE or SRESET), so those are done
+ * later.
+ */
+
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, 0);
+}
+
+static void store_spr_state(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.tar = mfspr(SPRN_TAR);
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
+#endif
+
+ if (vcpu->arch.hfscr & HFSCR_EBB) {
+ vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
+ vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
+ vcpu->arch.bescr = mfspr(SPRN_BESCR);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TIDR))
+ vcpu->arch.tid = mfspr(SPRN_TIDR);
+ vcpu->arch.iamr = mfspr(SPRN_IAMR);
+ vcpu->arch.amr = mfspr(SPRN_AMR);
+ vcpu->arch.uamor = mfspr(SPRN_UAMOR);
+ vcpu->arch.fscr = mfspr(SPRN_FSCR);
+ vcpu->arch.dscr = mfspr(SPRN_DSCR);
+ vcpu->arch.pspb = mfspr(SPRN_PSPB);
+
+ vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
+}
+
+/* Returns true if current MSR and/or guest MSR may have changed */
+bool load_vcpu_state(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ bool ret = false;
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+ unsigned long guest_msr = vcpu->arch.shregs.msr;
+ if (MSR_TM_ACTIVE(guest_msr)) {
+ kvmppc_restore_tm_hv(vcpu, guest_msr, true);
+ ret = true;
+ } else if (vcpu->arch.hfscr & HFSCR_TM) {
+ mtspr(SPRN_TEXASR, vcpu->arch.texasr);
+ mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
+ mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
+ }
+ }
+#endif
+
+ load_spr_state(vcpu, host_os_sprs);
+
+ load_fp_state(&vcpu->arch.fp);
+#ifdef CONFIG_ALTIVEC
+ load_vr_state(&vcpu->arch.vr);
+#endif
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(load_vcpu_state);
+
+void store_vcpu_state(struct kvm_vcpu *vcpu)
+{
+ store_spr_state(vcpu);
+
+ store_fp_state(&vcpu->arch.fp);
+#ifdef CONFIG_ALTIVEC
+ store_vr_state(&vcpu->arch.vr);
+#endif
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+ unsigned long guest_msr = vcpu->arch.shregs.msr;
+ if (MSR_TM_ACTIVE(guest_msr)) {
+ kvmppc_save_tm_hv(vcpu, guest_msr, true);
+ } else if (vcpu->arch.hfscr & HFSCR_TM) {
+ vcpu->arch.texasr = mfspr(SPRN_TEXASR);
+ vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
+ vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
+
+ if (!vcpu->arch.nested) {
+ vcpu->arch.load_tm++; /* see load_ebb comment */
+ if (!vcpu->arch.load_tm)
+ vcpu->arch.hfscr &= ~HFSCR_TM;
+ }
+ }
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(store_vcpu_state);
+
+void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
+{
+ host_os_sprs->iamr = mfspr(SPRN_IAMR);
+ host_os_sprs->amr = mfspr(SPRN_AMR);
+}
+EXPORT_SYMBOL_GPL(save_p9_host_os_sprs);
+
+/* vcpu guest regs must already be saved */
+void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ /*
+ * current->thread.xxx registers must all be restored to host
+ * values before a potential context switch, otherwise the context
+ * switch itself will overwrite current->thread.xxx with the values
+ * from the guest SPRs.
+ */
+
+ mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
+
+ if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
+ current->thread.tidr != vcpu->arch.tid)
+ mtspr(SPRN_TIDR, current->thread.tidr);
+ if (host_os_sprs->iamr != vcpu->arch.iamr)
+ mtspr(SPRN_IAMR, host_os_sprs->iamr);
+ if (vcpu->arch.uamor != 0)
+ mtspr(SPRN_UAMOR, 0);
+ if (host_os_sprs->amr != vcpu->arch.amr)
+ mtspr(SPRN_AMR, host_os_sprs->amr);
+ if (current->thread.fscr != vcpu->arch.fscr)
+ mtspr(SPRN_FSCR, current->thread.fscr);
+ if (current->thread.dscr != vcpu->arch.dscr)
+ mtspr(SPRN_DSCR, current->thread.dscr);
+ if (vcpu->arch.pspb != 0)
+ mtspr(SPRN_PSPB, 0);
+
+ /* Save guest CTRL register, set runlatch to 1 */
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, 1);
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+ vcpu->arch.vrsave != current->thread.vrsave)
+ mtspr(SPRN_VRSAVE, current->thread.vrsave);
+#endif
+ if (vcpu->arch.hfscr & HFSCR_EBB) {
+ if (vcpu->arch.bescr != current->thread.bescr)
+ mtspr(SPRN_BESCR, current->thread.bescr);
+ if (vcpu->arch.ebbhr != current->thread.ebbhr)
+ mtspr(SPRN_EBBHR, current->thread.ebbhr);
+ if (vcpu->arch.ebbrr != current->thread.ebbrr)
+ mtspr(SPRN_EBBRR, current->thread.ebbrr);
+
+ if (!vcpu->arch.nested) {
+ /*
+ * This is like load_fp in context switching, turn off
+ * the facility after it wraps the u8 to try avoiding
+ * saving and restoring the registers each partition
+ * switch.
+ */
+ vcpu->arch.load_ebb++;
+ if (!vcpu->arch.load_ebb)
+ vcpu->arch.hfscr &= ~HFSCR_EBB;
+ }
+ }
+
+ if (vcpu->arch.tar != current->thread.tar)
+ mtspr(SPRN_TAR, current->thread.tar);
+}
+EXPORT_SYMBOL_GPL(restore_p9_host_os_sprs);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
+void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ struct kvmhv_tb_accumulator *curr;
+ u64 tb = mftb() - vc->tb_offset_applied;
+ u64 prev_tb;
+ u64 delta;
+ u64 seq;
+
+ curr = vcpu->arch.cur_activity;
+ vcpu->arch.cur_activity = next;
+ prev_tb = vcpu->arch.cur_tb_start;
+ vcpu->arch.cur_tb_start = tb;
+
+ if (!curr)
+ return;
+
+ delta = tb - prev_tb;
+
+ seq = curr->seqcount;
+ curr->seqcount = seq + 1;
+ smp_wmb();
+ curr->tb_total += delta;
+ if (seq == 0 || delta < curr->tb_min)
+ curr->tb_min = delta;
+ if (delta > curr->tb_max)
+ curr->tb_max = delta;
+ smp_wmb();
+ curr->seqcount = seq + 2;
+}
+EXPORT_SYMBOL_GPL(accumulate_time);
+#endif
+
+static inline u64 mfslbv(unsigned int idx)
+{
+ u64 slbev;
+
+ asm volatile("slbmfev %0,%1" : "=r" (slbev) : "r" (idx));
+
+ return slbev;
+}
+
+static inline u64 mfslbe(unsigned int idx)
+{
+ u64 slbee;
+
+ asm volatile("slbmfee %0,%1" : "=r" (slbee) : "r" (idx));
+
+ return slbee;
+}
+
+static inline void mtslb(u64 slbee, u64 slbev)
+{
+ asm volatile("slbmte %0,%1" :: "r" (slbev), "r" (slbee));
+}
+
+static inline void clear_slb_entry(unsigned int idx)
+{
+ mtslb(idx, 0);
+}
+
+static inline void slb_clear_invalidate_partition(void)
+{
+ clear_slb_entry(0);
+ asm volatile(PPC_SLBIA(6));
+}
+
+/*
+ * Malicious or buggy radix guests may have inserted SLB entries
+ * (only 0..3 because radix always runs with UPRT=1), so these must
+ * be cleared here to avoid side-channels. slbmte is used rather
+ * than slbia, as it won't clear cached translations.
+ */
+static void radix_clear_slb(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++)
+ clear_slb_entry(i);
+}
+
+static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
+{
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ u32 lpid;
+ u32 pid;
+
+ lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
+ pid = vcpu->arch.pid;
+
+ /*
+ * Prior memory accesses to host PID Q3 must be completed before we
+ * start switching, and stores must be drained to avoid not-my-LPAR
+ * logic (see switch_mmu_to_host).
+ */
+ asm volatile("hwsync" ::: "memory");
+ isync();
+ mtspr(SPRN_LPID, lpid);
+ mtspr(SPRN_LPCR, lpcr);
+ mtspr(SPRN_PID, pid);
+ /*
+ * isync not required here because we are HRFID'ing to guest before
+ * any guest context access, which is context synchronising.
+ */
+}
+
+static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
+{
+ u32 lpid;
+ u32 pid;
+ int i;
+
+ lpid = kvm->arch.lpid;
+ pid = vcpu->arch.pid;
+
+ /*
+ * See switch_mmu_to_guest_radix. ptesync should not be required here
+ * even if the host is in HPT mode because speculative accesses would
+ * not cause RC updates (we are in real mode).
+ */
+ asm volatile("hwsync" ::: "memory");
+ isync();
+ mtspr(SPRN_LPID, lpid);
+ mtspr(SPRN_LPCR, lpcr);
+ mtspr(SPRN_PID, pid);
+
+ for (i = 0; i < vcpu->arch.slb_max; i++)
+ mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
+ /*
+ * isync not required here, see switch_mmu_to_guest_radix.
+ */
+}
+
+static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
+{
+ u32 lpid = kvm->arch.host_lpid;
+ u64 lpcr = kvm->arch.host_lpcr;
+
+ /*
+ * The guest has exited, so guest MMU context is no longer being
+ * non-speculatively accessed, but a hwsync is needed before the
+ * mtLPIDR / mtPIDR switch, in order to ensure all stores are drained,
+ * so the not-my-LPAR tlbie logic does not overlook them.
+ */
+ asm volatile("hwsync" ::: "memory");
+ isync();
+ mtspr(SPRN_PID, pid);
+ mtspr(SPRN_LPID, lpid);
+ mtspr(SPRN_LPCR, lpcr);
+ /*
+ * isync is not required after the switch, because mtmsrd with L=0
+ * is performed after this switch, which is context synchronising.
+ */
+
+ if (!radix_enabled())
+ slb_restore_bolted_realmode();
+}
+
+static void save_clear_host_mmu(struct kvm *kvm)
+{
+ if (!radix_enabled()) {
+ /*
+ * Hash host could save and restore host SLB entries to
+ * reduce SLB fault overheads of VM exits, but for now the
+ * existing code clears all entries and restores just the
+ * bolted ones when switching back to host.
+ */
+ slb_clear_invalidate_partition();
+ }
+}
+
+static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
+{
+ if (kvm_is_radix(kvm)) {
+ radix_clear_slb();
+ } else {
+ int i;
+ int nr = 0;
+
+ /*
+ * This must run before switching to host (radix host can't
+ * access all SLBs).
+ */
+ for (i = 0; i < vcpu->arch.slb_nr; i++) {
+ u64 slbee, slbev;
+
+ slbee = mfslbe(i);
+ if (slbee & SLB_ESID_V) {
+ slbev = mfslbv(i);
+ vcpu->arch.slb[nr].orige = slbee | i;
+ vcpu->arch.slb[nr].origv = slbev;
+ nr++;
+ }
+ }
+ vcpu->arch.slb_max = nr;
+ slb_clear_invalidate_partition();
+ }
+}
+
+static void flush_guest_tlb(struct kvm *kvm)
+{
+ unsigned long rb, set;
+
+ rb = PPC_BIT(52); /* IS = 2 */
+ if (kvm_is_radix(kvm)) {
+ /* R=1 PRS=1 RIC=2 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (1), "i" (1), "i" (2),
+ "r" (0) : "memory");
+ for (set = 1; set < kvm->arch.tlb_sets; ++set) {
+ rb += PPC_BIT(51); /* increment set number */
+ /* R=1 PRS=1 RIC=0 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (1), "i" (1), "i" (0),
+ "r" (0) : "memory");
+ }
+ asm volatile("ptesync": : :"memory");
+ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
+ asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
+ } else {
+ for (set = 0; set < kvm->arch.tlb_sets; ++set) {
+ /* R=0 PRS=0 RIC=0 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (0), "i" (0), "i" (0),
+ "r" (0) : "memory");
+ rb += PPC_BIT(51); /* increment set number */
+ }
+ asm volatile("ptesync": : :"memory");
+ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
+ }
+}
+
+static void check_need_tlb_flush(struct kvm *kvm, int pcpu,
+ struct kvm_nested_guest *nested)
+{
+ cpumask_t *need_tlb_flush;
+ bool all_set = true;
+ int i;
+
+ if (nested)
+ need_tlb_flush = &nested->need_tlb_flush;
+ else
+ need_tlb_flush = &kvm->arch.need_tlb_flush;
+
+ if (likely(!cpumask_test_cpu(pcpu, need_tlb_flush)))
+ return;
+
+ /*
+ * Individual threads can come in here, but the TLB is shared between
+ * the 4 threads in a core, hence invalidating on one thread
+ * invalidates for all, so only invalidate the first time (if all bits
+ * were set. The others must still execute a ptesync.
+ *
+ * If a race occurs and two threads do the TLB flush, that is not a
+ * problem, just sub-optimal.
+ */
+ for (i = cpu_first_tlb_thread_sibling(pcpu);
+ i <= cpu_last_tlb_thread_sibling(pcpu);
+ i += cpu_tlb_thread_sibling_step()) {
+ if (!cpumask_test_cpu(i, need_tlb_flush)) {
+ all_set = false;
+ break;
+ }
+ }
+ if (all_set)
+ flush_guest_tlb(kvm);
+ else
+ asm volatile("ptesync" ::: "memory");
+
+ /* Clear the bit after the TLB flush */
+ cpumask_clear_cpu(pcpu, need_tlb_flush);
+}
+
+unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr)
+{
+ unsigned long msr_needed = 0;
+
+ msr &= ~MSR_EE;
+
+ /* MSR bits may have been cleared by context switch so must recheck */
+ if (IS_ENABLED(CONFIG_PPC_FPU))
+ msr_needed |= MSR_FP;
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ msr_needed |= MSR_VEC;
+ if (cpu_has_feature(CPU_FTR_VSX))
+ msr_needed |= MSR_VSX;
+ if ((cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+ (vcpu->arch.hfscr & HFSCR_TM))
+ msr_needed |= MSR_TM;
+
+ /*
+ * This could be combined with MSR[RI] clearing, but that expands
+ * the unrecoverable window. It would be better to cover unrecoverable
+ * with KVM bad interrupt handling rather than use MSR[RI] at all.
+ *
+ * Much more difficult and less worthwhile to combine with IR/DR
+ * disable.
+ */
+ if ((msr & msr_needed) != msr_needed) {
+ msr |= msr_needed;
+ __mtmsrd(msr, 0);
+ } else {
+ __hard_irq_disable();
+ }
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ return msr;
+}
+EXPORT_SYMBOL_GPL(kvmppc_msr_hard_disable_set_facilities);
+
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
+{
+ struct p9_host_os_sprs host_os_sprs;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ s64 hdec, dec;
+ u64 purr, spurr;
+ u64 *exsave;
+ int trap;
+ unsigned long msr;
+ unsigned long host_hfscr;
+ unsigned long host_ciabr;
+ unsigned long host_dawr0;
+ unsigned long host_dawrx0;
+ unsigned long host_psscr;
+ unsigned long host_hpsscr;
+ unsigned long host_pidr;
+ unsigned long host_dawr1;
+ unsigned long host_dawrx1;
+ unsigned long dpdes;
+
+ hdec = time_limit - *tb;
+ if (hdec < 0)
+ return BOOK3S_INTERRUPT_HV_DECREMENTER;
+
+ WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
+ WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
+
+ vcpu->arch.ceded = 0;
+
+ /* Save MSR for restore, with EE clear. */
+ msr = mfmsr() & ~MSR_EE;
+
+ host_hfscr = mfspr(SPRN_HFSCR);
+ host_ciabr = mfspr(SPRN_CIABR);
+ host_psscr = mfspr(SPRN_PSSCR_PR);
+ if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ host_hpsscr = mfspr(SPRN_PSSCR);
+ host_pidr = mfspr(SPRN_PID);
+
+ if (dawr_enabled()) {
+ host_dawr0 = mfspr(SPRN_DAWR0);
+ host_dawrx0 = mfspr(SPRN_DAWRX0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ host_dawr1 = mfspr(SPRN_DAWR1);
+ host_dawrx1 = mfspr(SPRN_DAWRX1);
+ }
+ }
+
+ local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
+ local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
+
+ save_p9_host_os_sprs(&host_os_sprs);
+
+ msr = kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
+ if (lazy_irq_pending()) {
+ trap = 0;
+ goto out;
+ }
+
+ if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
+ msr = mfmsr(); /* MSR may have been updated */
+
+ if (vc->tb_offset) {
+ u64 new_tb = *tb + vc->tb_offset;
+ mtspr(SPRN_TBU40, new_tb);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ *tb = new_tb;
+ vc->tb_offset_applied = vc->tb_offset;
+ }
+
+ mtspr(SPRN_VTB, vc->vtb);
+ mtspr(SPRN_PURR, vcpu->arch.purr);
+ mtspr(SPRN_SPURR, vcpu->arch.spurr);
+
+ if (vc->pcr)
+ mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
+ if (vcpu->arch.doorbell_request) {
+ vcpu->arch.doorbell_request = 0;
+ mtspr(SPRN_DPDES, 1);
+ }
+
+ if (dawr_enabled()) {
+ if (vcpu->arch.dawr0 != host_dawr0)
+ mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
+ if (vcpu->arch.dawrx0 != host_dawrx0)
+ mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ if (vcpu->arch.dawr1 != host_dawr1)
+ mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
+ if (vcpu->arch.dawrx1 != host_dawrx1)
+ mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
+ }
+ }
+ if (vcpu->arch.ciabr != host_ciabr)
+ mtspr(SPRN_CIABR, vcpu->arch.ciabr);
+
+
+ if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+ mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+ } else {
+ if (vcpu->arch.psscr != host_psscr)
+ mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
+ }
+
+ mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
+
+ mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
+ mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
+
+ /*
+ * On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage
+ * Interrupt (HDSI) the HDSISR is not be updated at all.
+ *
+ * To work around this we put a canary value into the HDSISR before
+ * returning to a guest and then check for this canary when we take a
+ * HDSI. If we find the canary on a HDSI, we know the hardware didn't
+ * update the HDSISR. In this case we return to the guest to retake the
+ * HDSI which should correctly update the HDSISR the second time HDSI
+ * entry.
+ *
+ * The "radix prefetch bug" test can be used to test for this bug, as
+ * it also exists fo DD2.1 and below.
+ */
+ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ mtspr(SPRN_HDSISR, HDSISR_CANARY);
+
+ mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
+ mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
+ mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
+ mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
+
+ /*
+ * It might be preferable to load_vcpu_state here, in order to get the
+ * GPR/FP register loads executing in parallel with the previous mtSPR
+ * instructions, but for now that can't be done because the TM handling
+ * in load_vcpu_state can change some SPRs and vcpu state (nip, msr).
+ * But TM could be split out if this would be a significant benefit.
+ */
+
+ /*
+ * MSR[RI] does not need to be cleared (and is not, for radix guests
+ * with no prefetch bug), because in_guest is set. If we take a SRESET
+ * or MCE with in_guest set but still in HV mode, then
+ * kvmppc_p9_bad_interrupt handles the interrupt, which effectively
+ * clears MSR[RI] and doesn't return.
+ */
+ WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_HV_P9);
+ barrier(); /* Open in_guest critical section */
+
+ /*
+ * Hash host, hash guest, or radix guest with prefetch bug, all have
+ * to disable the MMU before switching to guest MMU state.
+ */
+ if (!radix_enabled() || !kvm_is_radix(kvm) ||
+ cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
+
+ save_clear_host_mmu(kvm);
+
+ if (kvm_is_radix(kvm))
+ switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
+ else
+ switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
+
+ /* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */
+ check_need_tlb_flush(kvm, vc->pcpu, nested);
+
+ /*
+ * P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
+ * so set guest LPCR (with HDICE) before writing HDEC.
+ */
+ mtspr(SPRN_HDEC, hdec);
+
+ mtspr(SPRN_DEC, vcpu->arch.dec_expires - *tb);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+tm_return_to_guest:
+#endif
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+ mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
+ mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
+
+ switch_pmu_to_guest(vcpu, &host_os_sprs);
+ accumulate_time(vcpu, &vcpu->arch.in_guest);
+
+ kvmppc_p9_enter_guest(vcpu);
+
+ accumulate_time(vcpu, &vcpu->arch.guest_exit);
+ switch_pmu_to_host(vcpu, &host_os_sprs);
+
+ /* XXX: Could get these from r11/12 and paca exsave instead */
+ vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
+ vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
+ vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
+ vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
+
+ /* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
+ trap = local_paca->kvm_hstate.scratch0 & ~0x2;
+
+ if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK))
+ exsave = local_paca->exgen;
+ else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET)
+ exsave = local_paca->exnmi;
+ else /* trap == 0x200 */
+ exsave = local_paca->exmc;
+
+ vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
+ vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
+
+ /*
+ * After reading machine check regs (DAR, DSISR, SRR0/1) and hstate
+ * scratch (which we need to move into exsave to make re-entrant vs
+ * SRESET/MCE), register state is protected from reentrancy. However
+ * timebase, MMU, among other state is still set to guest, so don't
+ * enable MSR[RI] here. It gets enabled at the end, after in_guest
+ * is cleared.
+ *
+ * It is possible an NMI could come in here, which is why it is
+ * important to save the above state early so it can be debugged.
+ */
+
+ vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
+ vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
+ vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
+ vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
+ vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
+ vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
+ vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
+ vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
+
+ vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
+
+ if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) {
+ vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
+ vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
+ kvmppc_realmode_machine_check(vcpu);
+
+ } else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
+ kvmppc_p9_realmode_hmi_handler(vcpu);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
+ vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_DATA_STORAGE) {
+ vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
+ vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
+ vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
+ vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
+
+ } else if (trap == BOOK3S_INTERRUPT_H_FAC_UNAVAIL) {
+ vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /*
+ * Softpatch interrupt for transactional memory emulation cases
+ * on POWER9 DD2.2. This is early in the guest exit path - we
+ * haven't saved registers or done a treclaim yet.
+ */
+ } else if (trap == BOOK3S_INTERRUPT_HV_SOFTPATCH) {
+ vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
+
+ /*
+ * The cases we want to handle here are those where the guest
+ * is in real suspend mode and is trying to transition to
+ * transactional mode.
+ */
+ if (!local_paca->kvm_hstate.fake_suspend &&
+ (vcpu->arch.shregs.msr & MSR_TS_S)) {
+ if (kvmhv_p9_tm_emulation_early(vcpu)) {
+ /*
+ * Go straight back into the guest with the
+ * new NIP/MSR as set by TM emulation.
+ */
+ mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
+ mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
+ goto tm_return_to_guest;
+ }
+ }
+#endif
+ }
+
+ /* Advance host PURR/SPURR by the amount used by guest */
+ purr = mfspr(SPRN_PURR);
+ spurr = mfspr(SPRN_SPURR);
+ local_paca->kvm_hstate.host_purr += purr - vcpu->arch.purr;
+ local_paca->kvm_hstate.host_spurr += spurr - vcpu->arch.spurr;
+ vcpu->arch.purr = purr;
+ vcpu->arch.spurr = spurr;
+
+ vcpu->arch.ic = mfspr(SPRN_IC);
+ vcpu->arch.pid = mfspr(SPRN_PID);
+ vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
+
+ vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
+ vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
+ vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
+ vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+
+ dpdes = mfspr(SPRN_DPDES);
+ if (dpdes)
+ vcpu->arch.doorbell_request = 1;
+
+ vc->vtb = mfspr(SPRN_VTB);
+
+ dec = mfspr(SPRN_DEC);
+ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
+ dec = (s32) dec;
+ *tb = mftb();
+ vcpu->arch.dec_expires = dec + *tb;
+
+ if (vc->tb_offset_applied) {
+ u64 new_tb = *tb - vc->tb_offset_applied;
+ mtspr(SPRN_TBU40, new_tb);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ *tb = new_tb;
+ vc->tb_offset_applied = 0;
+ }
+
+ save_clear_guest_mmu(kvm, vcpu);
+ switch_mmu_to_host(kvm, host_pidr);
+
+ /*
+ * Enable MSR here in order to have facilities enabled to save
+ * guest registers. This enables MMU (if we were in realmode), so
+ * only switch MMU on after the MMU is switched to host, to avoid
+ * the P9_RADIX_PREFETCH_BUG or hash guest context.
+ */
+ if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
+ vcpu->arch.shregs.msr & MSR_TS_MASK)
+ msr |= MSR_TS_S;
+ __mtmsrd(msr, 0);
+
+ store_vcpu_state(vcpu);
+
+ mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr);
+ mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr);
+
+ if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+ /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
+ mtspr(SPRN_PSSCR, host_hpsscr |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+ }
+
+ mtspr(SPRN_HFSCR, host_hfscr);
+ if (vcpu->arch.ciabr != host_ciabr)
+ mtspr(SPRN_CIABR, host_ciabr);
+
+ if (dawr_enabled()) {
+ if (vcpu->arch.dawr0 != host_dawr0)
+ mtspr(SPRN_DAWR0, host_dawr0);
+ if (vcpu->arch.dawrx0 != host_dawrx0)
+ mtspr(SPRN_DAWRX0, host_dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ if (vcpu->arch.dawr1 != host_dawr1)
+ mtspr(SPRN_DAWR1, host_dawr1);
+ if (vcpu->arch.dawrx1 != host_dawrx1)
+ mtspr(SPRN_DAWRX1, host_dawrx1);
+ }
+ }
+
+ if (dpdes)
+ mtspr(SPRN_DPDES, 0);
+ if (vc->pcr)
+ mtspr(SPRN_PCR, PCR_MASK);
+
+ /* HDEC must be at least as large as DEC, so decrementer_max fits */
+ mtspr(SPRN_HDEC, decrementer_max);
+
+ timer_rearm_host_dec(*tb);
+
+ restore_p9_host_os_sprs(vcpu, &host_os_sprs);
+
+ barrier(); /* Close in_guest critical section */
+ WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_NONE);
+ /* Interrupts are recoverable at this point */
+
+ /*
+ * cp_abort is required if the processor supports local copy-paste
+ * to clear the copy buffer that was under control of the guest.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ asm volatile(PPC_CP_ABORT);
+
+out:
+ return trap;
+}
+EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);
diff --git a/arch/powerpc/kvm/book3s_hv_p9_perf.c b/arch/powerpc/kvm/book3s_hv_p9_perf.c
new file mode 100644
index 000000000000..44d24cca3df1
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_p9_perf.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/kvm_ppc.h>
+#include <asm/pmc.h>
+
+#include "book3s_hv.h"
+
+static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
+{
+ if (!(mmcr0 & MMCR0_FC))
+ goto do_freeze;
+ if (mmcra & MMCRA_SAMPLE_ENABLE)
+ goto do_freeze;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (!(mmcr0 & MMCR0_PMCCEXT))
+ goto do_freeze;
+ if (!(mmcra & MMCRA_BHRB_DISABLE))
+ goto do_freeze;
+ }
+ return;
+
+do_freeze:
+ mmcr0 = MMCR0_FC;
+ mmcra = 0;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mmcr0 |= MMCR0_PMCCEXT;
+ mmcra = MMCRA_BHRB_DISABLE;
+ }
+
+ mtspr(SPRN_MMCR0, mmcr0);
+ mtspr(SPRN_MMCRA, mmcra);
+ isync();
+}
+
+void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ struct lppaca *lp;
+ int load_pmu = 1;
+
+ lp = vcpu->arch.vpa.pinned_addr;
+ if (lp)
+ load_pmu = lp->pmcregs_in_use;
+
+ /* Save host */
+ if (ppc_get_pmu_inuse()) {
+ /* POWER9, POWER10 do not implement HPMC or SPMC */
+
+ host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
+ host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
+
+ freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
+
+ host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
+ host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
+ host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
+ host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
+ host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
+ host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
+ host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
+ host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
+ host_os_sprs->sdar = mfspr(SPRN_SDAR);
+ host_os_sprs->siar = mfspr(SPRN_SIAR);
+ host_os_sprs->sier1 = mfspr(SPRN_SIER);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
+ host_os_sprs->sier2 = mfspr(SPRN_SIER2);
+ host_os_sprs->sier3 = mfspr(SPRN_SIER3);
+ }
+ }
+
+#ifdef CONFIG_PPC_PSERIES
+ /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
+ if (kvmhv_on_pseries()) {
+ barrier();
+ get_lppaca()->pmcregs_in_use = load_pmu;
+ barrier();
+ }
+#endif
+
+ /*
+ * Load guest. If the VPA said the PMCs are not in use but the guest
+ * tried to access them anyway, HFSCR[PM] will be set by the HFAC
+ * fault so we can make forward progress.
+ */
+ if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
+ mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
+ mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
+ mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
+ mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
+ mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
+ mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
+ mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
+ mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
+ mtspr(SPRN_SDAR, vcpu->arch.sdar);
+ mtspr(SPRN_SIAR, vcpu->arch.siar);
+ mtspr(SPRN_SIER, vcpu->arch.sier[0]);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
+ mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
+ mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
+ }
+
+ /* Set MMCRA then MMCR0 last */
+ mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
+ mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
+ /* No isync necessary because we're starting counters */
+
+ if (!vcpu->arch.nested &&
+ (vcpu->arch.hfscr_permitted & HFSCR_PM))
+ vcpu->arch.hfscr |= HFSCR_PM;
+ }
+}
+EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
+
+void switch_pmu_to_host(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ struct lppaca *lp;
+ int save_pmu = 1;
+
+ lp = vcpu->arch.vpa.pinned_addr;
+ if (lp)
+ save_pmu = lp->pmcregs_in_use;
+ if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
+ /*
+ * Save pmu if this guest is capable of running nested guests.
+ * This is option is for old L1s that do not set their
+ * lppaca->pmcregs_in_use properly when entering their L2.
+ */
+ save_pmu |= nesting_enabled(vcpu->kvm);
+ }
+
+ if (save_pmu) {
+ vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
+ vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
+
+ freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
+
+ vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
+ vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
+ vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
+ vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
+ vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
+ vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
+ vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
+ vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
+ vcpu->arch.sdar = mfspr(SPRN_SDAR);
+ vcpu->arch.siar = mfspr(SPRN_SIAR);
+ vcpu->arch.sier[0] = mfspr(SPRN_SIER);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
+ vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
+ vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
+ }
+
+ } else if (vcpu->arch.hfscr & HFSCR_PM) {
+ /*
+ * The guest accessed PMC SPRs without specifying they should
+ * be preserved, or it cleared pmcregs_in_use after the last
+ * access. Just ensure they are frozen.
+ */
+ freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
+
+ /*
+ * Demand-fault PMU register access in the guest.
+ *
+ * This is used to grab the guest's VPA pmcregs_in_use value
+ * and reflect it into the host's VPA in the case of a nested
+ * hypervisor.
+ *
+ * It also avoids having to zero-out SPRs after each guest
+ * exit to avoid side-channels when.
+ *
+ * This is cleared here when we exit the guest, so later HFSCR
+ * interrupt handling can add it back to run the guest with
+ * PM enabled next time.
+ */
+ if (!vcpu->arch.nested)
+ vcpu->arch.hfscr &= ~HFSCR_PM;
+ } /* otherwise the PMU should still be frozen */
+
+#ifdef CONFIG_PPC_PSERIES
+ if (kvmhv_on_pseries()) {
+ barrier();
+ get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
+ barrier();
+ }
+#endif
+
+ if (ppc_get_pmu_inuse()) {
+ mtspr(SPRN_PMC1, host_os_sprs->pmc1);
+ mtspr(SPRN_PMC2, host_os_sprs->pmc2);
+ mtspr(SPRN_PMC3, host_os_sprs->pmc3);
+ mtspr(SPRN_PMC4, host_os_sprs->pmc4);
+ mtspr(SPRN_PMC5, host_os_sprs->pmc5);
+ mtspr(SPRN_PMC6, host_os_sprs->pmc6);
+ mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
+ mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
+ mtspr(SPRN_SDAR, host_os_sprs->sdar);
+ mtspr(SPRN_SIAR, host_os_sprs->siar);
+ mtspr(SPRN_SIER, host_os_sprs->sier1);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
+ mtspr(SPRN_SIER2, host_os_sprs->sier2);
+ mtspr(SPRN_SIER3, host_os_sprs->sier3);
+ }
+
+ /* Set MMCRA then MMCR0 last */
+ mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
+ mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
+ isync();
+ }
+}
+EXPORT_SYMBOL_GPL(switch_pmu_to_host);
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 79f7d07ef674..ccfd96965630 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -65,10 +65,9 @@ static void reload_slb(struct kvm_vcpu *vcpu)
* On POWER7, see if we can handle a machine check that occurred inside
* the guest in real mode, without switching to the host partition.
*/
-static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
+static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
{
unsigned long srr1 = vcpu->arch.shregs.msr;
- struct machine_check_event mce_evt;
long handled = 1;
if (srr1 & SRR1_MC_LDSTERR) {
@@ -106,6 +105,21 @@ static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
handled = 0;
}
+ return handled;
+}
+
+void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
+{
+ struct machine_check_event mce_evt;
+ long handled;
+
+ if (vcpu->kvm->arch.fwnmi_enabled) {
+ /* FWNMI guests handle their own recovery */
+ handled = 0;
+ } else {
+ handled = kvmppc_realmode_mc_power7(vcpu);
+ }
+
/*
* Now get the event and stash it in the vcpu struct so it can
* be handled by the primary thread in virtual mode. We can't
@@ -122,11 +136,60 @@ static void kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
vcpu->arch.mce_evt = mce_evt;
}
-void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
+
+long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu)
{
- kvmppc_realmode_mc_power7(vcpu);
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ long ret = 0;
+
+ /*
+ * Unapply and clear the offset first. That way, if the TB was not
+ * resynced then it will remain in host-offset, and if it was resynced
+ * then it is brought into host-offset. Then the tb offset is
+ * re-applied before continuing with the KVM exit.
+ *
+ * This way, we don't need to actually know whether not OPAL resynced
+ * the timebase or do any of the complicated dance that the P7/8
+ * path requires.
+ */
+ if (vc->tb_offset_applied) {
+ u64 new_tb = mftb() - vc->tb_offset_applied;
+ mtspr(SPRN_TBU40, new_tb);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ vc->tb_offset_applied = 0;
+ }
+
+ local_paca->hmi_irqs++;
+
+ if (hmi_handle_debugtrig(NULL) >= 0) {
+ ret = 1;
+ goto out;
+ }
+
+ if (ppc_md.hmi_exception_early)
+ ppc_md.hmi_exception_early(NULL);
+
+out:
+ if (vc->tb_offset) {
+ u64 new_tb = mftb() + vc->tb_offset;
+ mtspr(SPRN_TBU40, new_tb);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ vc->tb_offset_applied = vc->tb_offset;
+ }
+
+ return ret;
}
+/*
+ * The following subcore HMI handling is all only for pre-POWER9 CPUs.
+ */
+
/* Check if dynamic split is in force and return subcore size accordingly. */
static inline int kvmppc_cur_subcore_size(void)
{
@@ -244,7 +307,7 @@ long kvmppc_realmode_hmi_handler(void)
{
bool resync_req;
- __this_cpu_inc(irq_stat.hmi_exceptions);
+ local_paca->hmi_irqs++;
if (hmi_handle_debugtrig(NULL) >= 0)
return 1;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 220305454c23..5a05953ae13f 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -23,20 +23,9 @@
#include <asm/pte-walk.h>
/* Translate address of a vmalloc'd thing to a linear map address */
-static void *real_vmalloc_addr(void *x)
+static void *real_vmalloc_addr(void *addr)
{
- unsigned long addr = (unsigned long) x;
- pte_t *p;
- /*
- * assume we don't have huge pages in vmalloc space...
- * So don't worry about THP collapse/split. Called
- * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
- */
- p = find_init_mm_pte(addr, NULL);
- if (!p || !pte_present(*p))
- return NULL;
- addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
- return __va(addr);
+ return __va(ppc_find_vmap_phys((unsigned long)addr));
}
/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
@@ -57,17 +46,15 @@ static int global_invalidates(struct kvm *kvm)
else
global = 1;
+ /* LPID has been switched to host if in virt mode so can't do local */
+ if (!global && (mfmsr() & (MSR_IR|MSR_DR)))
+ global = 1;
+
if (!global) {
/* any other core might now have stale TLB entries... */
smp_wmb();
cpumask_setall(&kvm->arch.need_tlb_flush);
cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
- /*
- * On POWER9, threads are independent but the TLB is shared,
- * so use the bit for the first thread to represent the core.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- cpu = cpu_first_thread_sibling(cpu);
cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
}
@@ -210,10 +197,19 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pte_t *ptep;
unsigned int writing;
unsigned long mmu_seq;
- unsigned long rcbits, irq_flags = 0;
+ unsigned long rcbits;
if (kvm_is_radix(kvm))
return H_FUNCTION;
+ /*
+ * The HPTE gets used by compute_tlbie_rb() to set TLBIE bits, so
+ * these functions should work together -- must ensure a guest can not
+ * cause problems with the TLBIE that KVM executes.
+ */
+ if ((pteh >> HPTE_V_SSIZE_SHIFT) & 0x2) {
+ /* B=0b1x is a reserved value, disallow it. */
+ return H_PARAMETER;
+ }
psize = kvmppc_actual_pgsz(pteh, ptel);
if (!psize)
return H_PARAMETER;
@@ -223,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
g_ptel = ptel;
/* used later to detect if we might have been invalidated */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/* Find the memslot (if any) for this address */
@@ -248,17 +244,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Translate to host virtual address */
hva = __gfn_to_hva_memslot(memslot, gfn);
- /*
- * If we had a page table table change after lookup, we would
- * retry via mmu_notifier_retry.
- */
- if (!realmode)
- local_irq_save(irq_flags);
- /*
- * If called in real mode we have MSR_EE = 0. Otherwise
- * we disable irq above.
- */
- ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
+
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift);
if (ptep) {
pte_t pte;
unsigned int host_pte_size;
@@ -272,8 +260,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
* to <= host page size, if host is using hugepage
*/
if (host_pte_size < psize) {
- if (!realmode)
- local_irq_restore(flags);
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
return H_PARAMETER;
}
pte = kvmppc_read_update_linux_pte(ptep, writing);
@@ -287,8 +274,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pa |= gpa & ~PAGE_MASK;
}
}
- if (!realmode)
- local_irq_restore(irq_flags);
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
ptel |= pa;
@@ -380,7 +366,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
rmap = real_vmalloc_addr(rmap);
lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */
- if (mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_invalidate_retry(kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
@@ -419,6 +405,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu->arch.pgdir, true,
&vcpu->arch.regs.gpr[4]);
}
+EXPORT_SYMBOL_GPL(kvmppc_h_enter);
#ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
@@ -563,6 +550,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.regs.gpr[4]);
}
+EXPORT_SYMBOL_GPL(kvmppc_h_remove);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{
@@ -681,10 +669,10 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
- unsigned long pte_index, unsigned long avpn,
- unsigned long va)
+ unsigned long pte_index, unsigned long avpn)
{
struct kvm *kvm = vcpu->kvm;
__be64 *hpte;
@@ -752,6 +740,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_protect);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -792,6 +781,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
}
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_read);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -840,6 +830,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref);
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -887,9 +878,10 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod);
-static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
- int writing, unsigned long *hpa,
+static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
+ unsigned long gpa, int writing, unsigned long *hpa,
struct kvm_memory_slot **memslot_p)
{
struct kvm *kvm = vcpu->kvm;
@@ -908,7 +900,7 @@ static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
hva = __gfn_to_hva_memslot(memslot, gfn);
/* Try to find the host pte for that virtual address */
- ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
if (!ptep)
return H_TOO_HARD;
pte = kvmppc_read_update_linux_pte(ptep, writing);
@@ -940,19 +932,14 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
int i;
/* Used later to detect if we might have been invalidated */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
- ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot);
- if (ret != H_SUCCESS)
- return ret;
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
- /* Check if we've been invalidated */
- raw_spin_lock(&kvm->mmu_lock.rlock);
- if (mmu_notifier_retry(kvm, mmu_seq)) {
- ret = H_TOO_HARD;
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot);
+ if (ret != H_SUCCESS)
goto out_unlock;
- }
/* Zero the page */
for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES)
@@ -960,7 +947,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
out_unlock:
- raw_spin_unlock(&kvm->mmu_lock.rlock);
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
return ret;
}
@@ -973,22 +960,17 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
long ret = H_SUCCESS;
/* Used later to detect if we might have been invalidated */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
- ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot);
- if (ret != H_SUCCESS)
- return ret;
- ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL);
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot);
if (ret != H_SUCCESS)
- return ret;
+ goto out_unlock;
- /* Check if we've been invalidated */
- raw_spin_lock(&kvm->mmu_lock.rlock);
- if (mmu_notifier_retry(kvm, mmu_seq)) {
- ret = H_TOO_HARD;
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, src, 0, &src_pa, NULL);
+ if (ret != H_SUCCESS)
goto out_unlock;
- }
/* Copy the page */
memcpy((void *)dest_pa, (void *)src_pa, SZ_4K);
@@ -996,7 +978,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
out_unlock:
- raw_spin_unlock(&kvm->mmu_lock.rlock);
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
return ret;
}
@@ -1260,7 +1242,7 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
if (!data) {
if (gr & (HPTE_R_N | HPTE_R_G))
- return status | SRR1_ISI_N_OR_G;
+ return status | SRR1_ISI_N_G_OR_CIP;
if (!hpte_read_permission(pp, slb_v & key))
return status | SRR1_ISI_PROT;
} else if (status & DSISR_ISSTORE) {
@@ -1315,3 +1297,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
return -1; /* send fault up to host kernel mode */
}
+EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 287d5911df0f..e165bfa842bf 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -8,6 +8,7 @@
#include <linux/kvm_host.h>
#include <linux/err.h>
#include <linux/kernel_stat.h>
+#include <linux/pgtable.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
@@ -15,7 +16,6 @@
#include <asm/xics.h>
#include <asm/synch.h>
#include <asm/cputhreads.h>
-#include <asm/pgtable.h>
#include <asm/ppc-opcode.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
@@ -141,13 +141,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
return;
}
- if (xive_enabled() && kvmhv_on_pseries()) {
- /* No XICS access or hypercalls available, too hard */
- this_icp->rm_action |= XICS_RM_KICK_VCPU;
- this_icp->rm_kick_target = vcpu;
- return;
- }
-
/*
* Check if the core is loaded,
* if not, find an available host core to post to wake the VCPU,
@@ -486,6 +479,11 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
}
}
+unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.regs.gpr[5] = get_tb();
+ return xics_rm_h_xirr(vcpu);
+}
unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
{
@@ -713,6 +711,7 @@ static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
icp->rm_eoied_irq = irq;
}
+ /* Handle passthrough interrupts */
if (state->host_irq) {
++vcpu->stat.pthru_all;
if (state->intr_cpu != -1) {
@@ -764,22 +763,14 @@ int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
return ics_rm_eoi(vcpu, irq);
}
-unsigned long eoi_rc;
+static unsigned long eoi_rc;
-static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
+static void icp_eoi(struct irq_data *d, u32 hwirq, __be32 xirr, bool *again)
{
void __iomem *xics_phys;
int64_t rc;
- if (kvmhv_on_pseries()) {
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
-
- iosync();
- plpar_hcall_raw(H_EOI, retbuf, hwirq);
- return;
- }
-
- rc = pnv_opal_pci_msi_eoi(c, hwirq);
+ rc = pnv_opal_pci_msi_eoi(d);
if (rc)
eoi_rc = rc;
@@ -887,8 +878,7 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
icp_rm_deliver_irq(xics, icp, irq, false);
/* EOI the interrupt */
- icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr,
- again);
+ icp_eoi(irq_desc_get_irq_data(irq_map->desc), irq_map->r_hwirq, xirr, again);
if (check_too_hard(xics, icp) == H_TOO_HARD)
return 2;
@@ -898,7 +888,7 @@ long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
/* --- Non-real mode XICS-related built-in routines --- */
-/**
+/*
* Host Operations poked by RM KVM
*/
static void rm_host_ipi_action(int action, void *data)
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xive.c b/arch/powerpc/kvm/book3s_hv_rm_xive.c
deleted file mode 100644
index 174d75e476fa..000000000000
--- a/arch/powerpc/kvm/book3s_hv_rm_xive.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kernel.h>
-#include <linux/kvm_host.h>
-#include <linux/err.h>
-#include <linux/kernel_stat.h>
-
-#include <asm/kvm_book3s.h>
-#include <asm/kvm_ppc.h>
-#include <asm/hvcall.h>
-#include <asm/xics.h>
-#include <asm/debug.h>
-#include <asm/synch.h>
-#include <asm/cputhreads.h>
-#include <asm/pgtable.h>
-#include <asm/ppc-opcode.h>
-#include <asm/pnv-pci.h>
-#include <asm/opal.h>
-#include <asm/smp.h>
-#include <asm/asm-prototypes.h>
-#include <asm/xive.h>
-#include <asm/xive-regs.h>
-
-#include "book3s_xive.h"
-
-/* XXX */
-#include <asm/udbg.h>
-//#define DBG(fmt...) udbg_printf(fmt)
-#define DBG(fmt...) do { } while(0)
-
-static inline void __iomem *get_tima_phys(void)
-{
- return local_paca->kvm_hstate.xive_tima_phys;
-}
-
-#undef XIVE_RUNTIME_CHECKS
-#define X_PFX xive_rm_
-#define X_STATIC
-#define X_STAT_PFX stat_rm_
-#define __x_tima get_tima_phys()
-#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page))
-#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page))
-#define __x_writeb __raw_rm_writeb
-#define __x_readw __raw_rm_readw
-#define __x_readq __raw_rm_readq
-#define __x_writeq __raw_rm_writeq
-
-#include "book3s_xive_template.c"
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index dbc2fecc37f0..37f50861dd98 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -25,18 +25,10 @@
#include <asm/export.h>
#include <asm/tm.h>
#include <asm/opal.h>
-#include <asm/xive-regs.h>
#include <asm/thread_info.h>
#include <asm/asm-compat.h>
#include <asm/feature-fixups.h>
#include <asm/cpuidle.h>
-#include <asm/ultravisor-api.h>
-
-/* Sign-extend HDEC if not on POWER9 */
-#define EXTEND_HDEC(reg) \
-BEGIN_FTR_SECTION; \
- extsw reg, reg; \
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* Values in HSTATE_NAPPING(r13) */
#define NAPPING_CEDE 1
@@ -44,21 +36,27 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define NAPPING_UNSPLIT 3
/* Stack frame offsets for kvmppc_hv_entry */
-#define SFS 208
+#define SFS 160
#define STACK_SLOT_TRAP (SFS-4)
-#define STACK_SLOT_SHORT_PATH (SFS-8)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
#define STACK_SLOT_PID (SFS-32)
#define STACK_SLOT_IAMR (SFS-40)
#define STACK_SLOT_CIABR (SFS-48)
-#define STACK_SLOT_DAWR (SFS-56)
-#define STACK_SLOT_DAWRX (SFS-64)
+#define STACK_SLOT_DAWR0 (SFS-56)
+#define STACK_SLOT_DAWRX0 (SFS-64)
#define STACK_SLOT_HFSCR (SFS-72)
#define STACK_SLOT_AMR (SFS-80)
#define STACK_SLOT_UAMOR (SFS-88)
-/* the following is used by the P9 short path */
-#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
+#define STACK_SLOT_FSCR (SFS-96)
+
+/*
+ * Use the last LPID (all implemented LPID bits = 1) for partition switching.
+ * This is reserved in the LPID allocator. POWER7 only implements 0x3ff, but
+ * we write 0xfff into the LPID SPR anyway, which seems to work and just
+ * ignores the top bits.
+ */
+#define LPID_RSVD 0xfff
/*
* Call kvmppc_hv_entry in real mode.
@@ -85,19 +83,6 @@ _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
RFI_TO_KERNEL
kvmppc_call_hv_entry:
-BEGIN_FTR_SECTION
- /* On P9, do LPCR setting, if necessary */
- ld r3, HSTATE_SPLIT_MODE(r13)
- cmpdi r3, 0
- beq 46f
- lwz r4, KVM_SPLIT_DO_SET(r3)
- cmpwi r4, 0
- beq 46f
- bl kvmhv_p9_set_lpcr
- nop
-46:
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
ld r4, HSTATE_KVM_VCPU(r13)
bl kvmppc_hv_entry
@@ -147,15 +132,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Return the trap number on this thread as the return value */
mr r3, r12
- /*
- * If we came back from the guest via a relocation-on interrupt,
- * we will be in virtual mode at this point, which makes it a
- * little easier to get back to the caller.
- */
- mfmsr r0
- andi. r0, r0, MSR_IR /* in real mode? */
- bne .Lvirt_return
-
/* RFI into the highmem handler */
mfmsr r6
li r0, MSR_RI
@@ -165,11 +141,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtsrr1 r7
RFI_TO_KERNEL
- /* Virtual-mode return */
-.Lvirt_return:
- mtlr r8
- blr
-
kvmppc_primary_no_guest:
/* We handle this much like a ceded vcpu */
/* put the HDEC into the DEC, since HDEC interrupts don't wake us */
@@ -256,7 +227,7 @@ kvm_novcpu_wakeup:
/* See if our timeslice has expired (HDEC is negative) */
mfspr r0, SPRN_HDEC
- EXTEND_HDEC(r0)
+ extsw r0, r0
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
cmpdi r0, 0
blt kvm_novcpu_exit
@@ -266,14 +237,14 @@ kvm_novcpu_wakeup:
cmpdi r4, 0
beq kvmppc_primary_no_guest
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r4, VCPU_TB_RMENTRY
bl kvmhv_start_timing
#endif
b kvmppc_got_guest
kvm_novcpu_exit:
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
ld r4, HSTATE_KVM_VCPU(r13)
cmpdi r4, 0
beq 13f
@@ -292,13 +263,16 @@ kvm_novcpu_exit:
* r3 contains the SRR1 wakeup value, SRR1 is trashed.
*/
_GLOBAL(idle_kvm_start_guest)
- ld r4,PACAEMERGSP(r13)
mfcr r5
mflr r0
- std r1,0(r4)
- std r5,8(r4)
- std r0,16(r4)
- subi r1,r4,STACK_FRAME_OVERHEAD
+ std r5, 8(r1) // Save CR in caller's frame
+ std r0, 16(r1) // Save LR in caller's frame
+ // Create frame on emergency stack
+ ld r4, PACAEMERGSP(r13)
+ stdu r1, -SWITCH_FRAME_SIZE(r4)
+ // Switch to new frame on emergency stack
+ mr r1, r4
+ std r3, 32(r1) // Save SRR1 wakeup value
SAVE_NVGPRS(r1)
/*
@@ -350,6 +324,10 @@ kvm_unsplit_wakeup:
kvm_secondary_got_guest:
+ // About to go to guest, clear saved SRR1
+ li r0, 0
+ std r0, 32(r1)
+
/* Set HSTATE_DSCR(r13) to something sensible */
ld r6, PACA_DSCR_DEFAULT(r13)
std r6, HSTATE_DSCR(r13)
@@ -358,14 +336,12 @@ kvm_secondary_got_guest:
lbz r4, HSTATE_PTID(r13)
cmpwi r4, 0
bne 63f
- LOAD_REG_ADDR(r6, decrementer_max)
- ld r6, 0(r6)
+ lis r6,0x7fff /* MAX_INT@h */
mtspr SPRN_HDEC, r6
/* and set per-LPAR registers, if doing dynamic micro-threading */
ld r6, HSTATE_SPLIT_MODE(r13)
cmpdi r6, 0
beq 63f
-BEGIN_FTR_SECTION
ld r0, KVM_SPLIT_RPR(r6)
mtspr SPRN_RPR, r0
ld r0, KVM_SPLIT_PMMAR(r6)
@@ -373,16 +349,6 @@ BEGIN_FTR_SECTION
ld r0, KVM_SPLIT_LDBAR(r6)
mtspr SPRN_LDBAR, r0
isync
-FTR_SECTION_ELSE
- /* On P9 we use the split_info for coordinating LPCR changes */
- lwz r4, KVM_SPLIT_DO_SET(r6)
- cmpwi r4, 0
- beq 1f
- mr r3, r6
- bl kvmhv_p9_set_lpcr
- nop
-1:
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
63:
/* Order load of vcpu after load of vcore */
lwsync
@@ -441,30 +407,24 @@ kvm_no_guest:
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
- /* set up r3 for return */
- mfspr r3,SPRN_SRR1
+ // Return SRR1 wakeup value, or 0 if we went into the guest
+ ld r3, 32(r1)
REST_NVGPRS(r1)
- addi r1, r1, STACK_FRAME_OVERHEAD
- ld r0, 16(r1)
- ld r5, 8(r1)
- ld r1, 0(r1)
+ ld r1, 0(r1) // Switch back to caller stack
+ ld r0, 16(r1) // Reload LR
+ ld r5, 8(r1) // Reload CR
mtlr r0
mtcr r5
blr
-53: HMT_LOW
+53:
+ HMT_LOW
ld r5, HSTATE_KVM_VCORE(r13)
cmpdi r5, 0
bne 60f
ld r3, HSTATE_SPLIT_MODE(r13)
cmpdi r3, 0
beq kvm_no_guest
- lwz r0, KVM_SPLIT_DO_SET(r3)
- cmpwi r0, 0
- bne kvmhv_do_set
- lwz r0, KVM_SPLIT_DO_RESTORE(r3)
- cmpwi r0, 0
- bne kvmhv_do_restore
lbz r0, KVM_SPLIT_DO_NAP(r3)
cmpwi r0, 0
beq kvm_no_guest
@@ -477,19 +437,6 @@ kvm_no_guest:
stb r0, HSTATE_HWTHREAD_STATE(r13)
b kvm_no_guest
-kvmhv_do_set:
- /* Set LPCR, LPIDR etc. on P9 */
- HMT_MEDIUM
- bl kvmhv_p9_set_lpcr
- nop
- b kvm_no_guest
-
-kvmhv_do_restore:
- HMT_MEDIUM
- bl kvmhv_p9_restore_lpcr
- nop
- b kvm_no_guest
-
/*
* Here the primary thread is trying to return the core to
* whole-core mode, so we need to nap.
@@ -527,7 +474,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* Set kvm_split_mode.napped[tid] = 1 */
ld r3, HSTATE_SPLIT_MODE(r13)
li r0, 1
- lbz r4, HSTATE_TID(r13)
+ lhz r4, PACAPACAINDEX(r13)
+ clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
addi r4, r4, KVM_SPLIT_NAPPED
stbx r0, r3, r4
/* Check the do_nap flag again after setting napped[] */
@@ -575,7 +523,7 @@ kvmppc_hv_entry:
li r6, KVM_GUEST_MODE_HOST_HV
stb r6, HSTATE_IN_GUEST(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
/* Store initial timestamp */
cmpdi r4, 0
beq 1f
@@ -609,13 +557,11 @@ kvmppc_hv_entry:
bne 10f
lwz r7,KVM_LPID(r9)
-BEGIN_FTR_SECTION
ld r6,KVM_SDR1(r9)
li r0,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r0
ptesync
mtspr SPRN_SDR1,r6 /* switch to partition page table */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -696,24 +642,16 @@ kvmppc_got_guest:
/* Save host values of some registers */
BEGIN_FTR_SECTION
- mfspr r5, SPRN_TIDR
- mfspr r6, SPRN_PSSCR
- mfspr r7, SPRN_PID
- std r5, STACK_SLOT_TID(r1)
- std r6, STACK_SLOT_PSSCR(r1)
- std r7, STACK_SLOT_PID(r1)
- mfspr r5, SPRN_HFSCR
- std r5, STACK_SLOT_HFSCR(r1)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-BEGIN_FTR_SECTION
mfspr r5, SPRN_CIABR
- mfspr r6, SPRN_DAWR
- mfspr r7, SPRN_DAWRX
+ mfspr r6, SPRN_DAWR0
+ mfspr r7, SPRN_DAWRX0
mfspr r8, SPRN_IAMR
std r5, STACK_SLOT_CIABR(r1)
- std r6, STACK_SLOT_DAWR(r1)
- std r7, STACK_SLOT_DAWRX(r1)
+ std r6, STACK_SLOT_DAWR0(r1)
+ std r7, STACK_SLOT_DAWRX0(r1)
std r8, STACK_SLOT_IAMR(r1)
+ mfspr r5, SPRN_FSCR
+ std r5, STACK_SLOT_FSCR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r5, SPRN_AMR
@@ -732,13 +670,9 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -801,10 +735,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
lbz r5, 0(r5)
cmpdi r5, 0
beq 1f
- ld r5, VCPU_DAWR(r4)
- ld r6, VCPU_DAWRX(r4)
- mtspr SPRN_DAWR, r5
- mtspr SPRN_DAWRX, r6
+ ld r5, VCPU_DAWR0(r4)
+ ld r6, VCPU_DAWRX0(r4)
+ mtspr SPRN_DAWR0, r5
+ mtspr SPRN_DAWRX0, r6
1:
ld r7, VCPU_CIABR(r4)
ld r8, VCPU_TAR(r4)
@@ -822,7 +756,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_BESCR, r6
mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8
-BEGIN_FTR_SECTION
/* POWER8-only registers */
ld r5, VCPU_TCSCR(r4)
ld r6, VCPU_ACOP(r4)
@@ -833,18 +766,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_CSIGR, r7
mtspr SPRN_TACR, r8
nop
-FTR_SECTION_ELSE
- /* POWER9-only registers */
- ld r5, VCPU_TID(r4)
- ld r6, VCPU_PSSCR(r4)
- lbz r8, HSTATE_FAKE_SUSPEND(r13)
- oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
- rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
- ld r7, VCPU_HFSCR(r4)
- mtspr SPRN_TIDR, r5
- mtspr SPRN_PSSCR, r6
- mtspr SPRN_HFSCR, r7
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
8:
ld r5, VCPU_SPRG0(r4)
@@ -865,17 +786,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
/* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4)
ld r6,VCPU_UAMOR(r4)
- li r7,-1
mtspr SPRN_AMR,r5
mtspr SPRN_UAMOR,r6
- mtspr SPRN_AMOR,r7
- /* Restore state of CTRL run bit; assume 1 on entry */
+ /* Restore state of CTRL run bit; the host currently has it set to 1 */
lwz r5,VCPU_CTRL(r4)
andi. r5,r5,1
bne 4f
- mfspr r6,SPRN_CTRLF
- clrrdi r6,r6,1
+ li r6,0
mtspr SPRN_CTRLT,r6
4:
/* Secondary threads wait for primary to have done partition switch */
@@ -904,28 +822,20 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
* Set the decrementer to the guest decrementer.
*/
ld r8,VCPU_DEC_EXPIRES(r4)
- /* r8 is a host timebase value here, convert to guest TB */
- ld r5,HSTATE_KVM_VCORE(r13)
- ld r6,VCORE_TB_OFFSET_APPL(r5)
- add r8,r8,r6
mftb r7
subf r3,r7,r8
mtspr SPRN_DEC,r3
/* Check if HDEC expires soon */
mfspr r3, SPRN_HDEC
- EXTEND_HDEC(r3)
+ extsw r3, r3
cmpdi r3, 512 /* 1 microsecond */
blt hdec_soon
- /* For hash guest, clear out and reload the SLB */
- ld r6, VCPU_KVM(r4)
- lbz r0, KVM_RADIX(r6)
- cmpwi r0, 0
- bne 9f
+ /* Clear out and reload the SLB */
li r6, 0
slbmte r6, r6
- slbia
+ PPC_SLBIA(6)
ptesync
/* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
@@ -941,96 +851,9 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
bdnz 1b
9:
-#ifdef CONFIG_KVM_XICS
- /* We are entering the guest on that thread, push VCPU to XIVE */
- ld r11, VCPU_XIVE_SAVED_STATE(r4)
- li r9, TM_QW1_OS
- lwz r8, VCPU_XIVE_CAM_WORD(r4)
- cmpwi r8, 0
- beq no_xive
- li r7, TM_QW1_OS + TM_WORD2
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 2f
- ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
- cmpldi cr1, r10, 0
- beq cr1, no_xive
- eieio
- stdx r11,r9,r10
- stwx r8,r7,r10
- b 3f
-2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
- cmpldi cr1, r10, 0
- beq cr1, no_xive
- eieio
- stdcix r11,r9,r10
- stwcix r8,r7,r10
-3: li r9, 1
- stb r9, VCPU_XIVE_PUSHED(r4)
- eieio
-
- /*
- * We clear the irq_pending flag. There is a small chance of a
- * race vs. the escalation interrupt happening on another
- * processor setting it again, but the only consequence is to
- * cause a spurrious wakeup on the next H_CEDE which is not an
- * issue.
- */
- li r0,0
- stb r0, VCPU_IRQ_PENDING(r4)
-
- /*
- * In single escalation mode, if the escalation interrupt is
- * on, we mask it.
- */
- lbz r0, VCPU_XIVE_ESC_ON(r4)
- cmpwi cr1, r0,0
- beq cr1, 1f
- li r9, XIVE_ESB_SET_PQ_01
- beq 4f /* in real mode? */
- ld r10, VCPU_XIVE_ESC_VADDR(r4)
- ldx r0, r10, r9
- b 5f
-4: ld r10, VCPU_XIVE_ESC_RADDR(r4)
- ldcix r0, r10, r9
-5: sync
-
- /* We have a possible subtle race here: The escalation interrupt might
- * have fired and be on its way to the host queue while we mask it,
- * and if we unmask it early enough (re-cede right away), there is
- * a theorical possibility that it fires again, thus landing in the
- * target queue more than once which is a big no-no.
- *
- * Fortunately, solving this is rather easy. If the above load setting
- * PQ to 01 returns a previous value where P is set, then we know the
- * escalation interrupt is somewhere on its way to the host. In that
- * case we simply don't clear the xive_esc_on flag below. It will be
- * eventually cleared by the handler for the escalation interrupt.
- *
- * Then, when doing a cede, we check that flag again before re-enabling
- * the escalation interrupt, and if set, we abort the cede.
- */
- andi. r0, r0, XIVE_ESB_VAL_P
- bne- 1f
-
- /* Now P is 0, we can clear the flag */
- li r0, 0
- stb r0, VCPU_XIVE_ESC_ON(r4)
-1:
-no_xive:
-#endif /* CONFIG_KVM_XICS */
-
- li r0, 0
- stw r0, STACK_SLOT_SHORT_PATH(r1)
-
deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
/* Check if we can deliver an external or decrementer interrupt now */
ld r0, VCPU_PENDING_EXC(r4)
-BEGIN_FTR_SECTION
- /* On POWER9, also check for emulated doorbell interrupt */
- lbz r3, VCPU_DBELL_REQ(r4)
- or r0, r0, r3
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
cmpdi r0, 0
beq 71f
mr r3, r4
@@ -1042,7 +865,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r7
-fast_guest_entry_c:
ld r10, VCPU_PC(r4)
ld r11, VCPU_MSR(r4)
/* r11 = vcpu->arch.msr & ~MSR_HV */
@@ -1072,7 +894,7 @@ fast_guest_return:
li r9, KVM_GUEST_MODE_GUEST_HV
stb r9, HSTATE_IN_GUEST(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
/* Accumulate timing */
addi r3, r4, VCPU_TB_GUEST
bl kvmhv_accumulate_time
@@ -1104,18 +926,8 @@ BEGIN_FTR_SECTION
mtspr SPRN_PPR, r0
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-/* Move canary into DSISR to check for later */
-BEGIN_FTR_SECTION
- li r0, 0x7fff
- mtspr SPRN_HDSISR, r0
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
- ld r6, VCPU_KVM(r4)
- lbz r7, KVM_SECURE_GUEST(r6)
- cmpdi r7, 0
ld r6, VCPU_GPR(R6)(r4)
ld r7, VCPU_GPR(R7)(r4)
- bne ret_to_ultra
ld r0, VCPU_CR(r4)
mtcr r0
@@ -1126,103 +938,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r4, VCPU_GPR(R4)(r4)
HRFI_TO_GUEST
b .
-/*
- * Use UV_RETURN ultracall to return control back to the Ultravisor after
- * processing an hypercall or interrupt that was forwarded (a.k.a. reflected)
- * to the Hypervisor.
- *
- * All registers have already been loaded, except:
- * R0 = hcall result
- * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
- * R3 = UV_RETURN
- */
-ret_to_ultra:
- ld r0, VCPU_CR(r4)
- mtcr r0
-
- ld r0, VCPU_GPR(R3)(r4)
- mfspr r2, SPRN_SRR1
- li r3, 0
- ori r3, r3, UV_RETURN
- ld r4, VCPU_GPR(R4)(r4)
- sc 2
-
-/*
- * Enter the guest on a P9 or later system where we have exactly
- * one vcpu per vcore and we don't need to go to real mode
- * (which implies that host and guest are both using radix MMU mode).
- * r3 = vcpu pointer
- * Most SPRs and all the VSRs have been loaded already.
- */
-_GLOBAL(__kvmhv_vcpu_entry_p9)
-EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
- mflr r0
- std r0, PPC_LR_STKOFF(r1)
- stdu r1, -SFS(r1)
-
- li r0, 1
- stw r0, STACK_SLOT_SHORT_PATH(r1)
-
- std r3, HSTATE_KVM_VCPU(r13)
- mfcr r4
- stw r4, SFS+8(r1)
-
- std r1, HSTATE_HOST_R1(r13)
-
- reg = 14
- .rept 18
- std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
- reg = reg + 1
- .endr
-
- reg = 14
- .rept 18
- ld reg, __VCPU_GPR(reg)(r3)
- reg = reg + 1
- .endr
-
- mfmsr r10
- std r10, HSTATE_HOST_MSR(r13)
-
- mr r4, r3
- b fast_guest_entry_c
-guest_exit_short_path:
-
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
-
- reg = 14
- .rept 18
- std reg, __VCPU_GPR(reg)(r9)
- reg = reg + 1
- .endr
-
- reg = 14
- .rept 18
- ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
- reg = reg + 1
- .endr
-
- lwz r4, SFS+8(r1)
- mtcr r4
-
- mr r3, r12 /* trap number */
-
- addi r1, r1, SFS
- ld r0, PPC_LR_STKOFF(r1)
- mtlr r0
-
- /* If we are in real mode, do a rfid to get back to the caller */
- mfmsr r4
- andi. r5, r4, MSR_IR
- bnelr
- rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
- mtspr SPRN_SRR0, r0
- ld r10, HSTATE_HOST_MSR(r13)
- rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
- mtspr SPRN_SRR1, r10
- RFI_TO_KERNEL
- b .
secondary_too_late:
li r12, 0
@@ -1230,7 +945,7 @@ secondary_too_late:
cmpdi r4, 0
beq 11f
stw r12, VCPU_TRAP(r4)
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r4, VCPU_TB_RMEXIT
bl kvmhv_accumulate_time
#endif
@@ -1244,7 +959,7 @@ hdec_soon:
li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
12: stw r12, VCPU_TRAP(r4)
mr r9, r4
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r4, VCPU_TB_RMEXIT
bl kvmhv_accumulate_time
#endif
@@ -1263,22 +978,16 @@ hdec_soon:
kvmppc_interrupt_hv:
/*
* Register contents:
+ * R9 = HSTATE_IN_GUEST
* R12 = (guest CR << 32) | interrupt vector
* R13 = PACA
* guest R12 saved in shadow VCPU SCRATCH0
- * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
* guest R13 saved in SPRN_SCRATCH0
+ * guest R9 saved in HSTATE_SCRATCH2
*/
- std r9, HSTATE_SCRATCH2(r13)
- lbz r9, HSTATE_IN_GUEST(r13)
- cmpwi r9, KVM_GUEST_MODE_HOST_HV
- beq kvmppc_bad_host_intr
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
- cmpwi r9, KVM_GUEST_MODE_GUEST
- ld r9, HSTATE_SCRATCH2(r13)
- beq kvmppc_interrupt_pr
-#endif
/* We're now back in the host but in guest MMU context */
+ cmpwi r9,KVM_GUEST_MODE_HOST_HV
+ beq kvmppc_bad_host_intr
li r9, KVM_GUEST_MODE_HOST_HV
stb r9, HSTATE_IN_GUEST(r13)
@@ -1315,7 +1024,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Restore R1/R2 so we can handle faults */
ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATOC(r13)
+ LOAD_PACA_TOC()
mfspr r10, SPRN_SRR0
mfspr r11, SPRN_SRR1
@@ -1347,7 +1056,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
li r0, MSR_RI
mtmsrd r0, 1
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r9, VCPU_TB_RMINTR
mr r4, r9
bl kvmhv_accumulate_time
@@ -1367,12 +1076,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
11: stw r3,VCPU_HEIR(r9)
/* these are volatile across C function calls */
-#ifdef CONFIG_RELOCATABLE
- ld r3, HSTATE_SCRATCH1(r13)
- mtctr r3
-#else
mfctr r3
-#endif
mfxer r4
std r3, VCPU_CTR(r9)
std r4, VCPU_XER(r9)
@@ -1391,17 +1095,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
beq kvmppc_hisi
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* For softpatch interrupt, go off and do TM instruction emulation */
- cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
- beq kvmppc_tm_emul
-#endif
-
/* See if this is a leftover HDEC interrupt */
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
bne 2f
mfspr r3,SPRN_HDEC
- EXTEND_HDEC(r3)
+ extsw r3, r3
cmpdi r3,0
mr r4,r9
bge fast_guest_return
@@ -1413,14 +1111,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/* Hypervisor doorbell - exit only if host IPI flag set */
cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
bne 3f
-BEGIN_FTR_SECTION
- PPC_MSGSYNC
- lwsync
- /* always exit if we're running a nested guest */
- ld r0, VCPU_NESTED(r9)
- cmpdi r0, 0
- bne guest_exit_cont
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
beq maybe_reenter_guest
@@ -1445,67 +1135,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r9, VCPU_TB_RMEXIT
mr r4, r9
bl kvmhv_accumulate_time
#endif
-#ifdef CONFIG_KVM_XICS
- /* We are exiting, pull the VP from the XIVE */
- lbz r0, VCPU_XIVE_PUSHED(r9)
- cmpwi cr0, r0, 0
- beq 1f
- li r7, TM_SPC_PULL_OS_CTX
- li r6, TM_QW1_OS
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 2f
- ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
- cmpldi cr0, r10, 0
- beq 1f
- /* First load to pull the context, we ignore the value */
- eieio
- lwzx r11, r7, r10
- /* Second load to recover the context state (Words 0 and 1) */
- ldx r11, r6, r10
- b 3f
-2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
- cmpldi cr0, r10, 0
- beq 1f
- /* First load to pull the context, we ignore the value */
- eieio
- lwzcix r11, r7, r10
- /* Second load to recover the context state (Words 0 and 1) */
- ldcix r11, r6, r10
-3: std r11, VCPU_XIVE_SAVED_STATE(r9)
- /* Fixup some of the state for the next load */
- li r10, 0
- li r0, 0xff
- stb r10, VCPU_XIVE_PUSHED(r9)
- stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
- stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
- eieio
-1:
-#endif /* CONFIG_KVM_XICS */
/*
* Possibly flush the link stack here, before we do a blr in
- * guest_exit_short_path.
+ * kvmhv_switch_to_host.
*/
1: nop
patch_site 1b patch__call_kvm_flush_link_stack
- /* If we came in through the P9 short path, go back out to C now */
- lwz r0, STACK_SLOT_SHORT_PATH(r1)
- cmpwi r0, 0
- bne guest_exit_short_path
-
/* For hash guest, read the guest SLB and save it away */
- ld r5, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r5)
li r5, 0
- cmpwi r0, 0
- bne 3f /* for radix, save 0 entries */
lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
mtctr r0
li r6,0
@@ -1524,14 +1168,11 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
/* Finally clear out the SLB */
li r0,0
slbmte r0,r0
- slbia
+ PPC_SLBIA(6)
ptesync
-3: stw r5,VCPU_SLB_MAX(r9)
+ stw r5,VCPU_SLB_MAX(r9)
/* load host SLB entries */
-BEGIN_MMU_FTR_SECTION
- b 0f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
@@ -1544,7 +1185,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
slbmte r6,r5
1: addi r8,r8,16
.endr
-0:
guest_bypass:
stw r12, STACK_SLOT_TRAP(r1)
@@ -1554,17 +1194,8 @@ guest_bypass:
ld r3, HSTATE_KVM_VCORE(r13)
mfspr r5,SPRN_DEC
mftb r6
- /* On P9, if the guest has large decr enabled, don't sign extend */
-BEGIN_FTR_SECTION
- ld r4, VCORE_LPCR(r3)
- andis. r4, r4, LPCR_LD@h
- bne 16f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r5,r5
16: add r5,r5,r6
- /* r5 is a guest timebase value here, convert to host TB */
- ld r4,VCORE_TB_OFFSET_APPL(r3)
- subf r5,r4,r5
std r5,VCPU_DEC_EXPIRES(r9)
/* Increment exit count, poke other threads to exit */
@@ -1578,12 +1209,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
stw r0, VCPU_CPU(r9)
stw r0, VCPU_THREAD_CPU(r9)
- /* Save guest CTRL register, set runlatch to 1 */
+ /* Save guest CTRL register, set runlatch to 1 if it was clear */
mfspr r6,SPRN_CTRLF
stw r6,VCPU_CTRL(r9)
andi. r0,r6,1
bne 4f
- ori r6,r6,1
+ li r6,1
mtspr SPRN_CTRLT,r6
4:
/*
@@ -1633,7 +1264,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
std r6, VCPU_BESCR(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
-BEGIN_FTR_SECTION
mfspr r5, SPRN_TCSCR
mfspr r6, SPRN_ACOP
mfspr r7, SPRN_CSIGR
@@ -1642,17 +1272,10 @@ BEGIN_FTR_SECTION
std r6, VCPU_ACOP(r9)
std r7, VCPU_CSIGR(r9)
std r8, VCPU_TACR(r9)
-FTR_SECTION_ELSE
- mfspr r5, SPRN_TIDR
- mfspr r6, SPRN_PSSCR
- std r5, VCPU_TID(r9)
- rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
- rotldi r6, r6, 60
- std r6, VCPU_PSSCR(r9)
- /* Restore host HFSCR value */
- ld r7, STACK_SLOT_HFSCR(r1)
- mtspr SPRN_HFSCR, r7
-ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
+BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_FSCR(r1)
+ mtspr SPRN_FSCR, r5
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/*
* Restore various registers to 0, where non-zero values
* set by the guest could disrupt the host.
@@ -1660,13 +1283,11 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
li r0, 0
mtspr SPRN_PSPB, r0
mtspr SPRN_WORT, r0
-BEGIN_FTR_SECTION
mtspr SPRN_TCSCR, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
li r0, 1
sldi r0, r0, 31
mtspr SPRN_MMCRS, r0
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
/* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
ld r8, STACK_SLOT_IAMR(r1)
@@ -1723,13 +1344,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
bl kvmppc_save_fp
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -1765,76 +1382,16 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/* Restore host values of some registers */
BEGIN_FTR_SECTION
ld r5, STACK_SLOT_CIABR(r1)
- ld r6, STACK_SLOT_DAWR(r1)
- ld r7, STACK_SLOT_DAWRX(r1)
+ ld r6, STACK_SLOT_DAWR0(r1)
+ ld r7, STACK_SLOT_DAWRX0(r1)
mtspr SPRN_CIABR, r5
/*
* If the DAWR doesn't work, it's ok to write these here as
* this value should always be zero
*/
- mtspr SPRN_DAWR, r6
- mtspr SPRN_DAWRX, r7
+ mtspr SPRN_DAWR0, r6
+ mtspr SPRN_DAWRX0, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-BEGIN_FTR_SECTION
- ld r5, STACK_SLOT_TID(r1)
- ld r6, STACK_SLOT_PSSCR(r1)
- ld r7, STACK_SLOT_PID(r1)
- mtspr SPRN_TIDR, r5
- mtspr SPRN_PSSCR, r6
- mtspr SPRN_PID, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
-#ifdef CONFIG_PPC_RADIX_MMU
- /*
- * Are we running hash or radix ?
- */
- ld r5, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r5)
- cmpwi cr2, r0, 0
- beq cr2, 2f
-
- /*
- * Radix: do eieio; tlbsync; ptesync sequence in case we
- * interrupted the guest between a tlbie and a ptesync.
- */
- eieio
- tlbsync
- ptesync
-
-BEGIN_FTR_SECTION
- /* Radix: Handle the case where the guest used an illegal PID */
- LOAD_REG_ADDR(r4, mmu_base_pid)
- lwz r3, VCPU_GUEST_PID(r9)
- lwz r5, 0(r4)
- cmpw cr0,r3,r5
- blt 2f
-
- /*
- * Illegal PID, the HW might have prefetched and cached in the TLB
- * some translations for the LPID 0 / guest PID combination which
- * Linux doesn't know about, so we need to flush that PID out of
- * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
- * the right context.
- */
- li r0,0
- mtspr SPRN_LPID,r0
- isync
-
- /* Then do a congruence class local flush */
- ld r6,VCPU_KVM(r9)
- lwz r0,KVM_TLB_SETS(r6)
- mtctr r0
- li r7,0x400 /* IS field = 0b01 */
- ptesync
- sldi r0,r3,32 /* RS has PID */
-1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
- addi r7,r7,0x1000
- bdnz 1b
- ptesync
-END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
-
-2:
-#endif /* CONFIG_PPC_RADIX_MMU */
/*
* POWER7/POWER8 guest -> host partition switch code.
@@ -1871,13 +1428,11 @@ kvmhv_switch_to_host:
/* Primary thread switches back to host partition */
lwz r7,KVM_HOST_LPID(r4)
-BEGIN_FTR_SECTION
ld r6,KVM_HOST_SDR1(r4)
li r8,LPID_RSVD /* switch to reserved LPID */
mtspr SPRN_LPID,r8
ptesync
mtspr SPRN_SDR1,r6 /* switch to host page table */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_LPID,r7
isync
@@ -1936,25 +1491,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
19: lis r8,0x7fff /* MAX_INT@h */
mtspr SPRN_HDEC,r8
-16:
-BEGIN_FTR_SECTION
- /* On POWER9 with HPT-on-radix we need to wait for all other threads */
- ld r3, HSTATE_SPLIT_MODE(r13)
- cmpdi r3, 0
- beq 47f
- lwz r8, KVM_SPLIT_DO_RESTORE(r3)
- cmpwi r8, 0
- beq 47f
- bl kvmhv_p9_restore_lpcr
- nop
- b 48f
-47:
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- ld r8,KVM_HOST_LPCR(r4)
+16: ld r8,KVM_HOST_LPCR(r4)
mtspr SPRN_LPCR,r8
isync
-48:
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
/* Finish timing, if we have a vcpu */
ld r4, HSTATE_KVM_VCPU(r13)
cmpdi r4, 0
@@ -2056,42 +1597,6 @@ maybe_reenter_guest:
blt deliver_guest_interrupt
b guest_exit_cont
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Softpatch interrupt for transactional memory emulation cases
- * on POWER9 DD2.2. This is early in the guest exit path - we
- * haven't saved registers or done a treclaim yet.
- */
-kvmppc_tm_emul:
- /* Save instruction image in HEIR */
- mfspr r3, SPRN_HEIR
- stw r3, VCPU_HEIR(r9)
-
- /*
- * The cases we want to handle here are those where the guest
- * is in real suspend mode and is trying to transition to
- * transactional mode.
- */
- lbz r0, HSTATE_FAKE_SUSPEND(r13)
- cmpwi r0, 0 /* keep exiting guest if in fake suspend */
- bne guest_exit_cont
- rldicl r3, r11, 64 - MSR_TS_S_LG, 62
- cmpwi r3, 1 /* or if not in suspend state */
- bne guest_exit_cont
-
- /* Call C code to do the emulation */
- mr r3, r9
- bl kvmhv_p9_tm_emulation_early
- nop
- ld r9, HSTATE_KVM_VCPU(r13)
- li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
- cmpwi r3, 0
- beq guest_exit_cont /* continue exiting if not handled */
- ld r10, VCPU_PC(r9)
- ld r11, VCPU_MSR(r9)
- b fast_interrupt_c_return /* go back to guest if handled */
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-
/*
* Check whether an HDSI is an HPTE not found fault or something else.
* If it is an HPTE not found fault that is due to the guest accessing
@@ -2100,26 +1605,13 @@ kvmppc_tm_emul:
* reflect the HDSI to the guest as a DSI.
*/
kvmppc_hdsi:
- ld r3, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r3)
mfspr r4, SPRN_HDAR
mfspr r6, SPRN_HDSISR
-BEGIN_FTR_SECTION
- /* Look for DSISR canary. If we find it, retry instruction */
- cmpdi r6, 0x7fff
- beq 6f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- cmpwi r0, 0
- bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
/* HPTE not found fault or protection fault? */
andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
beq 1f /* if not, send it to the guest */
andi. r0, r11, MSR_DR /* data relocation enabled? */
beq 3f
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
- b 4f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
clrrdi r0, r4, 28
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
@@ -2187,31 +1679,15 @@ fast_interrupt_c_return:
stb r0, HSTATE_IN_GUEST(r13)
b guest_exit_cont
-.Lradix_hdsi:
- std r4, VCPU_FAULT_DAR(r9)
- stw r6, VCPU_FAULT_DSISR(r9)
-.Lradix_hisi:
- mfspr r5, SPRN_ASDR
- std r5, VCPU_FAULT_GPA(r9)
- b guest_exit_cont
-
/*
* Similarly for an HISI, reflect it to the guest as an ISI unless
* it is an HPTE not found fault for a page that we have paged out.
*/
kvmppc_hisi:
- ld r3, VCPU_KVM(r9)
- lbz r0, KVM_RADIX(r3)
- cmpwi r0, 0
- bne .Lradix_hisi /* for radix, just save ASDR */
andis. r0, r11, SRR1_ISI_NOPT@h
beq 1f
andi. r0, r11, MSR_IR /* instruction relocation enabled? */
beq 3f
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
- b 4f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
clrrdi r0, r10, 28
PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
li r0, BOOK3S_INTERRUPT_INST_SEGMENT
@@ -2259,10 +1735,6 @@ hcall_try_real_mode:
andi. r0,r11,MSR_PR
/* sc 1 from userspace - reflect to guest syscall */
bne sc_1_fast_return
- /* sc 1 from nested guest - give it to L1 to handle */
- ld r0, VCPU_NESTED(r9)
- cmpdi r0, 0
- bne guest_exit_cont
clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont
@@ -2320,13 +1792,8 @@ hcall_real_table:
.long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
.long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
.long DOTSYM(kvmppc_h_protect) - hcall_real_table
-#ifdef CONFIG_SPAPR_TCE_IOMMU
- .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
-#else
.long 0 /* 0x1c */
.long 0 /* 0x20 */
-#endif
.long 0 /* 0x24 - H_SET_SPRG0 */
.long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
.long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
@@ -2344,11 +1811,11 @@ hcall_real_table:
.long 0 /* 0x5c */
.long 0 /* 0x60 */
#ifdef CONFIG_KVM_XICS
- .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
+ .long DOTSYM(xics_rm_h_eoi) - hcall_real_table
+ .long DOTSYM(xics_rm_h_cppr) - hcall_real_table
+ .long DOTSYM(xics_rm_h_ipi) - hcall_real_table
+ .long 0 /* 0x70 - H_IPOLL */
+ .long DOTSYM(xics_rm_h_xirr) - hcall_real_table
#else
.long 0 /* 0x64 - H_EOI */
.long 0 /* 0x68 - H_CPPR */
@@ -2404,13 +1871,8 @@ hcall_real_table:
.long 0 /* 0x12c */
.long 0 /* 0x130 */
.long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
-#ifdef CONFIG_SPAPR_TCE_IOMMU
- .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
- .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
-#else
.long 0 /* 0x138 */
.long 0 /* 0x13c */
-#endif
.long 0 /* 0x140 */
.long 0 /* 0x144 */
.long 0 /* 0x148 */
@@ -2523,15 +1985,15 @@ hcall_real_table:
.long 0 /* 0x2f4 */
.long 0 /* 0x2f8 */
#ifdef CONFIG_KVM_XICS
- .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
+ .long DOTSYM(xics_rm_h_xirr_x) - hcall_real_table
#else
.long 0 /* 0x2fc - H_XIRR_X*/
#endif
- .long DOTSYM(kvmppc_h_random) - hcall_real_table
+ .long DOTSYM(kvmppc_rm_h_random) - hcall_real_table
.globl hcall_real_table_end
hcall_real_table_end:
-_GLOBAL(kvmppc_h_set_xdabr)
+_GLOBAL_TOC(kvmppc_h_set_xdabr)
EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
andi. r0, r5, DABRX_USER | DABRX_KERNEL
beq 6f
@@ -2541,7 +2003,7 @@ EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
6: li r3, H_PARAMETER
blr
-_GLOBAL(kvmppc_h_set_dabr)
+_GLOBAL_TOC(kvmppc_h_set_dabr)
EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
li r5, DABRX_USER | DABRX_KERNEL
3:
@@ -2572,8 +2034,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
rlwimi r5, r4, 2, DAWRX_WT
clrrdi r4, r4, 3
- std r4, VCPU_DAWR(r3)
- std r5, VCPU_DAWRX(r3)
+ std r4, VCPU_DAWR0(r3)
+ std r5, VCPU_DAWRX0(r3)
/*
* If came in through the real mode hcall handler then it is necessary
* to write the registers since the return path won't. Otherwise it is
@@ -2583,8 +2045,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfmsr r6
andi. r6, r6, MSR_DR /* in real mode? */
bne 4f
- mtspr SPRN_DAWR, r4
- mtspr SPRN_DAWRX, r5
+ mtspr SPRN_DAWR0, r4
+ mtspr SPRN_DAWRX0, r5
4: li r3, 0
blr
@@ -2658,13 +2120,9 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
bl kvmppc_save_fp
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -2684,15 +2142,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
mfspr r3, SPRN_DEC
mfspr r4, SPRN_HDEC
mftb r5
-BEGIN_FTR_SECTION
- /* On P9 check whether the guest has large decrementer mode enabled */
- ld r6, HSTATE_KVM_VCORE(r13)
- ld r6, VCORE_LPCR(r6)
- andis. r6, r6, LPCR_LD@h
- bne 68f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r3, r3
-68: EXTEND_HDEC(r4)
+ extsw r4, r4
cmpd r3, r4
ble 67f
mtspr SPRN_DEC, r4
@@ -2700,12 +2151,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
/* save expiry time of guest decrementer */
add r3, r3, r5
ld r4, HSTATE_KVM_VCPU(r13)
- ld r5, HSTATE_KVM_VCORE(r13)
- ld r6, VCORE_TB_OFFSET_APPL(r5)
- subf r3, r6, r3 /* convert to host TB value */
std r3, VCPU_DEC_EXPIRES(r4)
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
ld r4, HSTATE_KVM_VCPU(r13)
addi r3, r4, VCPU_TB_CEDE
bl kvmhv_accumulate_time
@@ -2723,8 +2171,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
* Also clear the runlatch bit before napping.
*/
kvm_do_nap:
- mfspr r0, SPRN_CTRLF
- clrrdi r0, r0, 1
+ li r0,0
mtspr SPRN_CTRLT, r0
li r0,1
@@ -2737,31 +2184,13 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
kvm_nap_sequence: /* desired LPCR value in r5 */
-BEGIN_FTR_SECTION
- /*
- * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
- * enable state loss = 1 (allow SMT mode switch)
- * requested level = 0 (just stop dispatching)
- */
- lis r3, (PSSCR_EC | PSSCR_ESL)@h
- /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
- li r4, LPCR_PECE_HVEE@higher
- sldi r4, r4, 32
- or r5, r5, r4
-FTR_SECTION_ELSE
li r3, PNV_THREAD_NAP
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
mtspr SPRN_LPCR,r5
isync
-BEGIN_FTR_SECTION
- bl isa300_idle_stop_mayloss
-FTR_SECTION_ELSE
bl isa206_idle_insn_mayloss
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
- mfspr r0, SPRN_CTRLF
- ori r0, r0, 1
+ li r0,1
mtspr SPRN_CTRLT, r0
mtspr SPRN_SRR1, r3
@@ -2792,19 +2221,15 @@ kvm_end_cede:
/* get vcpu pointer */
ld r4, HSTATE_KVM_VCPU(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
addi r3, r4, VCPU_TB_RMINTR
bl kvmhv_accumulate_time
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-/*
- * Branch around the call if both CPU_FTR_TM and
- * CPU_FTR_P9_TM_HV_ASSIST are off.
- */
BEGIN_FTR_SECTION
b 91f
-END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/*
* NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
@@ -2822,9 +2247,6 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/* Restore guest decrementer */
ld r3, VCPU_DEC_EXPIRES(r4)
- ld r5, HSTATE_KVM_VCORE(r13)
- ld r6, VCORE_TB_OFFSET_APPL(r5)
- add r3, r3, r6 /* convert host TB to guest TB value */
mftb r7
subf r3, r7, r3
mtspr SPRN_DEC, r3
@@ -2894,42 +2316,7 @@ kvm_cede_prodded:
/* we've ceded but we want to give control to the host */
kvm_cede_exit:
ld r9, HSTATE_KVM_VCPU(r13)
-#ifdef CONFIG_KVM_XICS
- /* are we using XIVE with single escalation? */
- ld r10, VCPU_XIVE_ESC_VADDR(r9)
- cmpdi r10, 0
- beq 3f
- li r6, XIVE_ESB_SET_PQ_00
- /*
- * If we still have a pending escalation, abort the cede,
- * and we must set PQ to 10 rather than 00 so that we don't
- * potentially end up with two entries for the escalation
- * interrupt in the XIVE interrupt queue. In that case
- * we also don't want to set xive_esc_on to 1 here in
- * case we race with xive_esc_irq().
- */
- lbz r5, VCPU_XIVE_ESC_ON(r9)
- cmpwi r5, 0
- beq 4f
- li r0, 0
- stb r0, VCPU_CEDED(r9)
- li r6, XIVE_ESB_SET_PQ_10
- b 5f
-4: li r0, 1
- stb r0, VCPU_XIVE_ESC_ON(r9)
- /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
- sync
-5: /* Enable XIVE escalation */
- mfmsr r0
- andi. r0, r0, MSR_DR /* in real mode? */
- beq 1f
- ldx r0, r10, r6
- b 2f
-1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
- ldcix r0, r10, r6
-2: sync
-#endif /* CONFIG_KVM_XICS */
-3: b guest_exit_cont
+ b guest_exit_cont
/* Try to do machine check recovery in real mode */
machine_check_realmode:
@@ -3006,10 +2393,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
PPC_MSGCLR(6)
/* see if it's a host IPI */
li r3, 1
-BEGIN_FTR_SECTION
- PPC_MSGSYNC
- lwsync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
bnelr
@@ -3139,7 +2522,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
/* The following code handles the fake_suspend = 1 case */
mflr r0
std r0, PPC_LR_STKOFF(r1)
- stdu r1, -PPC_MIN_STKFRM(r1)
+ stdu r1, -TM_FRAME_SIZE(r1)
/* Turn on TM. */
mfmsr r8
@@ -3154,10 +2537,42 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
+ /*
+ * It's possible that treclaim. may modify registers, if we have lost
+ * track of fake-suspend state in the guest due to it using rfscv.
+ * Save and restore registers in case this occurs.
+ */
+ mfspr r3, SPRN_DSCR
+ mfspr r4, SPRN_XER
+ mfspr r5, SPRN_AMR
+ /* SPRN_TAR would need to be saved here if the kernel ever used it */
+ mfcr r12
+ SAVE_NVGPRS(r1)
+ SAVE_GPR(2, r1)
+ SAVE_GPR(3, r1)
+ SAVE_GPR(4, r1)
+ SAVE_GPR(5, r1)
+ stw r12, 8(r1)
+ std r1, HSTATE_HOST_R1(r13)
+
/* We have to treclaim here because that's the only way to do S->N */
li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3)
+ GET_PACA(r13)
+ ld r1, HSTATE_HOST_R1(r13)
+ REST_GPR(2, r1)
+ REST_GPR(3, r1)
+ REST_GPR(4, r1)
+ REST_GPR(5, r1)
+ lwz r12, 8(r1)
+ REST_NVGPRS(r1)
+ mtspr SPRN_DSCR, r3
+ mtspr SPRN_XER, r4
+ mtspr SPRN_AMR, r5
+ mtcr r12
+ HMT_MEDIUM
+
/*
* We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since
@@ -3185,7 +2600,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
- addi r1, r1, PPC_MIN_STKFRM
+ addi r1, r1, TM_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr
@@ -3258,7 +2673,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
* r12 is (CR << 32) | vector
* r13 points to our PACA
* r12 is saved in HSTATE_SCRATCH0(r13)
- * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
* r9 is saved in HSTATE_SCRATCH2(r13)
* r13 is saved in HSPRG1
* cfar is saved in HSTATE_CFAR(r13)
@@ -3277,8 +2691,7 @@ kvmppc_bad_host_intr:
std r0, GPR0(r1)
std r9, GPR1(r1)
std r2, GPR2(r1)
- SAVE_4GPRS(3, r1)
- SAVE_2GPRS(7, r1)
+ SAVE_GPRS(3, 8, r1)
srdi r0, r12, 32
clrldi r12, r12, 32
std r0, _CCR(r1)
@@ -3301,90 +2714,30 @@ kvmppc_bad_host_intr:
ld r9, HSTATE_SCRATCH2(r13)
ld r12, HSTATE_SCRATCH0(r13)
GET_SCRATCH0(r0)
- SAVE_4GPRS(9, r1)
+ SAVE_GPRS(9, 12, r1)
std r0, GPR13(r1)
SAVE_NVGPRS(r1)
ld r5, HSTATE_CFAR(r13)
std r5, ORIG_GPR3(r1)
mflr r3
-#ifdef CONFIG_RELOCATABLE
- ld r4, HSTATE_SCRATCH1(r13)
-#else
mfctr r4
-#endif
mfxer r5
lbz r6, PACAIRQSOFTMASK(r13)
std r3, _LINK(r1)
std r4, _CTR(r1)
std r5, _XER(r1)
std r6, SOFTE(r1)
- ld r2, PACATOC(r13)
- LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
+ LOAD_PACA_TOC()
+ LOAD_REG_IMMEDIATE(3, STACK_FRAME_REGS_MARKER)
std r3, STACK_FRAME_OVERHEAD-16(r1)
/*
- * On POWER9 do a minimal restore of the MMU and call C code,
- * which will print a message and panic.
* XXX On POWER7 and POWER8, we just spin here since we don't
* know what the other threads are doing (and we don't want to
* coordinate with them) - but at least we now have register state
* in memory that we might be able to look at from another CPU.
*/
-BEGIN_FTR_SECTION
b .
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
- ld r9, HSTATE_KVM_VCPU(r13)
- ld r10, VCPU_KVM(r9)
-
- li r0, 0
- mtspr SPRN_AMR, r0
- mtspr SPRN_IAMR, r0
- mtspr SPRN_CIABR, r0
- mtspr SPRN_DAWRX, r0
-
-BEGIN_MMU_FTR_SECTION
- b 4f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
-
- slbmte r0, r0
- slbia
- ptesync
- ld r8, PACA_SLBSHADOWPTR(r13)
- .rept SLB_NUM_BOLTED
- li r3, SLBSHADOW_SAVEAREA
- LDX_BE r5, r8, r3
- addi r3, r3, 8
- LDX_BE r6, r8, r3
- andis. r7, r5, SLB_ESID_V@h
- beq 3f
- slbmte r6, r5
-3: addi r8, r8, 16
- .endr
-
-4: lwz r7, KVM_HOST_LPID(r10)
- mtspr SPRN_LPID, r7
- mtspr SPRN_PID, r0
- ld r8, KVM_HOST_LPCR(r10)
- mtspr SPRN_LPCR, r8
- isync
- li r0, KVM_GUEST_MODE_NONE
- stb r0, HSTATE_IN_GUEST(r13)
-
- /*
- * Turn on the MMU and jump to C code
- */
- bcl 20, 31, .+4
-5: mflr r3
- addi r3, r3, 9f - 5b
- li r4, -1
- rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
- ld r4, PACAKMSR(r13)
- mtspr SPRN_SRR0, r3
- mtspr SPRN_SRR1, r4
- RFI_TO_KERNEL
-9: addi r3, r1, STACK_FRAME_OVERHEAD
- bl kvmppc_bad_interrupt
- b 9b
/*
* This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
@@ -3404,10 +2757,11 @@ kvmppc_msr_interrupt:
blr
/*
+ * void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu)
+ *
* Load up guest PMU state. R3 points to the vcpu struct.
*/
-_GLOBAL(kvmhv_load_guest_pmu)
-EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
+kvmhv_load_guest_pmu:
mr r4, r3
mflr r0
li r3, 1
@@ -3434,7 +2788,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
mtspr SPRN_PMC6, r9
ld r3, VCPU_MMCR(r4)
ld r5, VCPU_MMCR + 8(r4)
- ld r6, VCPU_MMCR + 16(r4)
+ ld r6, VCPU_MMCRA(r4)
ld r7, VCPU_SIAR(r4)
ld r8, VCPU_SDAR(r4)
mtspr SPRN_MMCR1, r5
@@ -3442,18 +2796,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
mtspr SPRN_SIAR, r7
mtspr SPRN_SDAR, r8
BEGIN_FTR_SECTION
- ld r5, VCPU_MMCR + 24(r4)
+ ld r5, VCPU_MMCR + 16(r4)
ld r6, VCPU_SIER(r4)
mtspr SPRN_MMCR2, r5
mtspr SPRN_SIER, r6
-BEGIN_FTR_SECTION_NESTED(96)
lwz r7, VCPU_PMC + 24(r4)
lwz r8, VCPU_PMC + 28(r4)
- ld r9, VCPU_MMCR + 32(r4)
+ ld r9, VCPU_MMCRS(r4)
mtspr SPRN_SPMC1, r7
mtspr SPRN_SPMC2, r8
mtspr SPRN_MMCRS, r9
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_MMCR0, r3
isync
@@ -3461,10 +2813,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
blr
/*
+ * void kvmhv_load_host_pmu(void)
+ *
* Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
*/
-_GLOBAL(kvmhv_load_host_pmu)
-EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
+kvmhv_load_host_pmu:
mflr r0
lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
cmpwi r4, 0
@@ -3508,11 +2861,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
23: blr
/*
+ * void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use)
+ *
* Save guest PMU state into the vcpu struct.
* r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
*/
-_GLOBAL(kvmhv_save_guest_pmu)
-EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
+kvmhv_save_guest_pmu:
mr r9, r3
mr r8, r4
BEGIN_FTR_SECTION
@@ -3557,9 +2911,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mfspr r8, SPRN_SDAR
std r4, VCPU_MMCR(r9)
std r5, VCPU_MMCR + 8(r9)
- std r6, VCPU_MMCR + 16(r9)
+ std r6, VCPU_MMCRA(r9)
BEGIN_FTR_SECTION
- std r10, VCPU_MMCR + 24(r9)
+ std r10, VCPU_MMCR + 16(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
std r7, VCPU_SIAR(r9)
std r8, VCPU_SDAR(r9)
@@ -3578,16 +2932,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION
mfspr r5, SPRN_SIER
std r5, VCPU_SIER(r9)
-BEGIN_FTR_SECTION_NESTED(96)
mfspr r6, SPRN_SPMC1
mfspr r7, SPRN_SPMC2
mfspr r8, SPRN_MMCRS
stw r6, VCPU_PMC + 24(r9)
stw r7, VCPU_PMC + 28(r9)
- std r8, VCPU_MMCR + 32(r9)
+ std r8, VCPU_MMCRS(r9)
lis r4, 0x8000
mtspr SPRN_MMCRS, r4
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
22: blr
@@ -3609,7 +2961,7 @@ kvmppc_fix_pmao:
isync
blr
-#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
+#ifdef CONFIG_KVM_BOOK3S_HV_P8_TIMING
/*
* Start timing an activity
* r3 = pointer to time accumulation struct, r4 = vcpu
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
index 0db937497169..866cadd70094 100644
--- a/arch/powerpc/kvm/book3s_hv_tm.c
+++ b/arch/powerpc/kvm/book3s_hv_tm.c
@@ -3,6 +3,8 @@
* Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kvm_host.h>
#include <asm/kvm_ppc.h>
@@ -44,7 +46,27 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
u64 newmsr, bescr;
int ra, rs;
- switch (instr & 0xfc0007ff) {
+ /*
+ * The TM softpatch interrupt sets NIP to the instruction following
+ * the faulting instruction, which is not executed. Rewind nip to the
+ * faulting instruction so it looks like a normal synchronous
+ * interrupt, then update nip in the places where the instruction is
+ * emulated.
+ */
+ vcpu->arch.regs.nip -= 4;
+
+ /*
+ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
+ * in these instructions, so masking bit 31 out doesn't change these
+ * instructions. For treclaim., tsr., and trechkpt. instructions if bit
+ * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
+ * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
+ * 31 is an acceptable way to handle these invalid forms that have
+ * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
+ * bit 31 set) can generate a softpatch interrupt. Hence both forms
+ * are handled below for these instructions so they behave the same way.
+ */
+ switch (instr & PO_XOP_OPCODE_MASK) {
case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1;
@@ -54,7 +76,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
(newmsr & MSR_TM)));
newmsr = sanitize_msr(newmsr);
vcpu->arch.shregs.msr = newmsr;
- vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
+ vcpu->arch.cfar = vcpu->arch.regs.nip;
vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
return RESUME_GUEST;
@@ -66,14 +88,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
}
/* check EBB facility is available */
if (!(vcpu->arch.hfscr & HFSCR_EBB)) {
- /* generate an illegal instruction interrupt */
- kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
- return RESUME_GUEST;
+ vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+ vcpu->arch.hfscr |= (u64)FSCR_EBB_LG << 56;
+ vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+ return -1; /* rerun host interrupt handler */
}
if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) {
/* generate a facility unavailable interrupt */
- vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
- ((u64)FSCR_EBB_LG << 56);
+ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+ vcpu->arch.fscr |= (u64)FSCR_EBB_LG << 56;
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
}
@@ -87,7 +110,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.bescr = bescr;
msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
vcpu->arch.shregs.msr = msr;
- vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
+ vcpu->arch.cfar = vcpu->arch.regs.nip;
vcpu->arch.regs.nip = vcpu->arch.ebbrr;
return RESUME_GUEST;
@@ -103,9 +126,11 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE);
newmsr = sanitize_msr(newmsr);
vcpu->arch.shregs.msr = newmsr;
+ vcpu->arch.regs.nip += 4;
return RESUME_GUEST;
- case PPC_INST_TSR:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
/* check for PR=1 and arch 2.06 bit set in PCR */
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
/* generate an illegal instruction interrupt */
@@ -114,14 +139,15 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
}
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
- /* generate an illegal instruction interrupt */
- kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
- return RESUME_GUEST;
+ vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+ vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
+ vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+ return -1; /* rerun host interrupt handler */
}
if (!(msr & MSR_TM)) {
/* generate a facility unavailable interrupt */
- vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
- ((u64)FSCR_TM_LG << 56);
+ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+ vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
@@ -138,19 +164,22 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
msr = (msr & ~MSR_TS_MASK) | MSR_TS_S;
}
vcpu->arch.shregs.msr = msr;
+ vcpu->arch.regs.nip += 4;
return RESUME_GUEST;
- case PPC_INST_TRECLAIM:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
- /* generate an illegal instruction interrupt */
- kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
- return RESUME_GUEST;
+ vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+ vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
+ vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+ return -1; /* rerun host interrupt handler */
}
if (!(msr & MSR_TM)) {
/* generate a facility unavailable interrupt */
- vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
- ((u64)FSCR_TM_LG << 56);
+ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+ vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
@@ -174,20 +203,23 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
+ vcpu->arch.regs.nip += 4;
return RESUME_GUEST;
- case PPC_INST_TRECHKPT:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
/* XXX do we need to check for PR=0 here? */
/* check for TM disabled in the HFSCR or MSR */
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
- /* generate an illegal instruction interrupt */
- kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
- return RESUME_GUEST;
+ vcpu->arch.hfscr &= ~HFSCR_INTR_CAUSE;
+ vcpu->arch.hfscr |= (u64)FSCR_TM_LG << 56;
+ vcpu->arch.trap = BOOK3S_INTERRUPT_H_FAC_UNAVAIL;
+ return -1; /* rerun host interrupt handler */
}
if (!(msr & MSR_TM)) {
/* generate a facility unavailable interrupt */
- vcpu->arch.fscr = (vcpu->arch.fscr & ~(0xffull << 56)) |
- ((u64)FSCR_TM_LG << 56);
+ vcpu->arch.fscr &= ~FSCR_INTR_CAUSE;
+ vcpu->arch.fscr |= (u64)FSCR_TM_LG << 56;
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_FAC_UNAVAIL);
return RESUME_GUEST;
@@ -204,10 +236,13 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
vcpu->arch.shregs.msr = msr | MSR_TS_S;
+ vcpu->arch.regs.nip += 4;
return RESUME_GUEST;
}
/* What should we do here? We didn't recognize the instruction */
- WARN_ON_ONCE(1);
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+ pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
+
return RESUME_GUEST;
}
diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
index 217246279dfa..fad931f224ef 100644
--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
@@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
u64 newmsr, msr, bescr;
int rs;
- switch (instr & 0xfc0007ff) {
+ /*
+ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
+ * in these instructions, so masking bit 31 out doesn't change these
+ * instructions. For the tsr. instruction if bit 31 = 0 then it is per
+ * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
+ * Forms, informs specifically that ignoring bit 31 is an acceptable way
+ * to handle TM-related invalid forms that have bit 31 = 0. Moreover,
+ * for emulation purposes both forms (w/ and wo/ bit 31 set) can
+ * generate a softpatch interrupt. Hence both forms are handled below
+ * for tsr. to make them behave the same way.
+ */
+ switch (instr & PO_XOP_OPCODE_MASK) {
case PPC_INST_RFID:
/* XXX do we need to check for PR=0 here? */
newmsr = vcpu->arch.shregs.srr1;
@@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr = newmsr;
return 1;
- case PPC_INST_TSR:
+ /* ignore bit 31, see comment above */
+ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
/* we know the MSR has the TS field = S (0b01) here */
msr = vcpu->arch.shregs.msr;
/* check for PR=1 and arch 2.06 bit set in PCR */
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 79b1202b1c62..e2f11f9c3f2a 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -47,7 +47,7 @@
* Locking order
*
* 1. kvm->srcu - Protects KVM memslots
- * 2. kvm->mm->mmap_sem - find_vma, migrate_vma_pages and helpers, ksm_madvise
+ * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
* 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
* as sync-points for page-in/out
*/
@@ -90,15 +90,138 @@
#include <linux/migrate.h>
#include <linux/kvm_host.h>
#include <linux/ksm.h>
+#include <linux/of.h>
+#include <linux/memremap.h>
#include <asm/ultravisor.h>
#include <asm/mman.h>
#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s_uvmem.h>
static struct dev_pagemap kvmppc_uvmem_pgmap;
static unsigned long *kvmppc_uvmem_bitmap;
static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
-#define KVMPPC_UVMEM_PFN (1UL << 63)
+/*
+ * States of a GFN
+ * ---------------
+ * The GFN can be in one of the following states.
+ *
+ * (a) Secure - The GFN is secure. The GFN is associated with
+ * a Secure VM, the contents of the GFN is not accessible
+ * to the Hypervisor. This GFN can be backed by a secure-PFN,
+ * or can be backed by a normal-PFN with contents encrypted.
+ * The former is true when the GFN is paged-in into the
+ * ultravisor. The latter is true when the GFN is paged-out
+ * of the ultravisor.
+ *
+ * (b) Shared - The GFN is shared. The GFN is associated with a
+ * a secure VM. The contents of the GFN is accessible to
+ * Hypervisor. This GFN is backed by a normal-PFN and its
+ * content is un-encrypted.
+ *
+ * (c) Normal - The GFN is a normal. The GFN is associated with
+ * a normal VM. The contents of the GFN is accessible to
+ * the Hypervisor. Its content is never encrypted.
+ *
+ * States of a VM.
+ * ---------------
+ *
+ * Normal VM: A VM whose contents are always accessible to
+ * the hypervisor. All its GFNs are normal-GFNs.
+ *
+ * Secure VM: A VM whose contents are not accessible to the
+ * hypervisor without the VM's consent. Its GFNs are
+ * either Shared-GFN or Secure-GFNs.
+ *
+ * Transient VM: A Normal VM that is transitioning to secure VM.
+ * The transition starts on successful return of
+ * H_SVM_INIT_START, and ends on successful return
+ * of H_SVM_INIT_DONE. This transient VM, can have GFNs
+ * in any of the three states; i.e Secure-GFN, Shared-GFN,
+ * and Normal-GFN. The VM never executes in this state
+ * in supervisor-mode.
+ *
+ * Memory slot State.
+ * -----------------------------
+ * The state of a memory slot mirrors the state of the
+ * VM the memory slot is associated with.
+ *
+ * VM State transition.
+ * --------------------
+ *
+ * A VM always starts in Normal Mode.
+ *
+ * H_SVM_INIT_START moves the VM into transient state. During this
+ * time the Ultravisor may request some of its GFNs to be shared or
+ * secured. So its GFNs can be in one of the three GFN states.
+ *
+ * H_SVM_INIT_DONE moves the VM entirely from transient state to
+ * secure-state. At this point any left-over normal-GFNs are
+ * transitioned to Secure-GFN.
+ *
+ * H_SVM_INIT_ABORT moves the transient VM back to normal VM.
+ * All its GFNs are moved to Normal-GFNs.
+ *
+ * UV_TERMINATE transitions the secure-VM back to normal-VM. All
+ * the secure-GFN and shared-GFNs are tranistioned to normal-GFN
+ * Note: The contents of the normal-GFN is undefined at this point.
+ *
+ * GFN state implementation:
+ * -------------------------
+ *
+ * Secure GFN is associated with a secure-PFN; also called uvmem_pfn,
+ * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag
+ * set, and contains the value of the secure-PFN.
+ * It is associated with a normal-PFN; also called mem_pfn, when
+ * the GFN is pagedout. Its pfn[] has KVMPPC_GFN_MEM_PFN flag set.
+ * The value of the normal-PFN is not tracked.
+ *
+ * Shared GFN is associated with a normal-PFN. Its pfn[] has
+ * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN
+ * is not tracked.
+ *
+ * Normal GFN is associated with normal-PFN. Its pfn[] has
+ * no flag set. The value of the normal-PFN is not tracked.
+ *
+ * Life cycle of a GFN
+ * --------------------
+ *
+ * --------------------------------------------------------------
+ * | | Share | Unshare | SVM |H_SVM_INIT_DONE|
+ * | |operation |operation | abort/ | |
+ * | | | | terminate | |
+ * -------------------------------------------------------------
+ * | | | | | |
+ * | Secure | Shared | Secure |Normal |Secure |
+ * | | | | | |
+ * | Shared | Shared | Secure |Normal |Shared |
+ * | | | | | |
+ * | Normal | Shared | Secure |Normal |Secure |
+ * --------------------------------------------------------------
+ *
+ * Life cycle of a VM
+ * --------------------
+ *
+ * --------------------------------------------------------------------
+ * | | start | H_SVM_ |H_SVM_ |H_SVM_ |UV_SVM_ |
+ * | | VM |INIT_START|INIT_DONE|INIT_ABORT |TERMINATE |
+ * | | | | | | |
+ * --------- ----------------------------------------------------------
+ * | | | | | | |
+ * | Normal | Normal | Transient|Error |Error |Normal |
+ * | | | | | | |
+ * | Secure | Error | Error |Error |Error |Normal |
+ * | | | | | | |
+ * |Transient| N/A | Error |Secure |Normal |Normal |
+ * --------------------------------------------------------------------
+ */
+
+#define KVMPPC_GFN_UVMEM_PFN (1UL << 63)
+#define KVMPPC_GFN_MEM_PFN (1UL << 62)
+#define KVMPPC_GFN_SHARED (1UL << 61)
+#define KVMPPC_GFN_SECURE (KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN)
+#define KVMPPC_GFN_FLAG_MASK (KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED)
+#define KVMPPC_GFN_PFN_MASK (~KVMPPC_GFN_FLAG_MASK)
struct kvmppc_uvmem_slot {
struct list_head list;
@@ -106,13 +229,22 @@ struct kvmppc_uvmem_slot {
unsigned long base_pfn;
unsigned long *pfns;
};
-
struct kvmppc_uvmem_page_pvt {
struct kvm *kvm;
unsigned long gpa;
bool skip_page_out;
+ bool remove_gfn;
};
+bool kvmppc_uvmem_available(void)
+{
+ /*
+ * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
+ * and our data structures have been initialized successfully.
+ */
+ return !!kvmppc_uvmem_bitmap;
+}
+
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
{
struct kvmppc_uvmem_slot *p;
@@ -120,7 +252,7 @@ int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
- p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns)));
+ p->pfns = vcalloc(slot->npages, sizeof(*p->pfns));
if (!p->pfns) {
kfree(p);
return -ENOMEM;
@@ -154,8 +286,8 @@ void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
mutex_unlock(&kvm->arch.uvmem_lock);
}
-static void kvmppc_uvmem_pfn_insert(unsigned long gfn, unsigned long uvmem_pfn,
- struct kvm *kvm)
+static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
+ unsigned long flag, unsigned long uvmem_pfn)
{
struct kvmppc_uvmem_slot *p;
@@ -163,24 +295,41 @@ static void kvmppc_uvmem_pfn_insert(unsigned long gfn, unsigned long uvmem_pfn,
if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
unsigned long index = gfn - p->base_pfn;
- p->pfns[index] = uvmem_pfn | KVMPPC_UVMEM_PFN;
+ if (flag == KVMPPC_GFN_UVMEM_PFN)
+ p->pfns[index] = uvmem_pfn | flag;
+ else
+ p->pfns[index] = flag;
return;
}
}
}
-static void kvmppc_uvmem_pfn_remove(unsigned long gfn, struct kvm *kvm)
+/* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */
+static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,
+ unsigned long uvmem_pfn, struct kvm *kvm)
{
- struct kvmppc_uvmem_slot *p;
+ kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
+}
- list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
- if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
- p->pfns[gfn - p->base_pfn] = 0;
- return;
- }
- }
+/* mark the GFN as secure-GFN associated with a memory-PFN. */
+static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
+{
+ kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
+}
+
+/* mark the GFN as a shared GFN. */
+static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
+{
+ kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
+}
+
+/* mark the GFN as a non-existent GFN. */
+static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
+{
+ kvmppc_mark_gfn(gfn, kvm, 0, 0);
}
+/* return true, if the GFN is a secure-GFN backed by a secure-PFN */
static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
unsigned long *uvmem_pfn)
{
@@ -190,10 +339,10 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
unsigned long index = gfn - p->base_pfn;
- if (p->pfns[index] & KVMPPC_UVMEM_PFN) {
+ if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) {
if (uvmem_pfn)
*uvmem_pfn = p->pfns[index] &
- ~KVMPPC_UVMEM_PFN;
+ KVMPPC_GFN_PFN_MASK;
return true;
} else
return false;
@@ -202,12 +351,120 @@ static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
return false;
}
+/*
+ * starting from *gfn search for the next available GFN that is not yet
+ * transitioned to a secure GFN. return the value of that GFN in *gfn. If a
+ * GFN is found, return true, else return false
+ *
+ * Must be called with kvm->arch.uvmem_lock held.
+ */
+static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
+ struct kvm *kvm, unsigned long *gfn)
+{
+ struct kvmppc_uvmem_slot *p = NULL, *iter;
+ bool ret = false;
+ unsigned long i;
+
+ list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
+ if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
+ p = iter;
+ break;
+ }
+ if (!p)
+ return ret;
+ /*
+ * The code below assumes, one to one correspondence between
+ * kvmppc_uvmem_slot and memslot.
+ */
+ for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
+ unsigned long index = i - p->base_pfn;
+
+ if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
+ *gfn = i;
+ ret = true;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int kvmppc_memslot_page_merge(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot, bool merge)
+{
+ unsigned long gfn = memslot->base_gfn;
+ unsigned long end, start = gfn_to_hva(kvm, gfn);
+ int ret = 0;
+ struct vm_area_struct *vma;
+ int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
+
+ if (kvm_is_error_hva(start))
+ return H_STATE;
+
+ end = start + (memslot->npages << PAGE_SHIFT);
+
+ mmap_write_lock(kvm->mm);
+ do {
+ vma = find_vma_intersection(kvm->mm, start, end);
+ if (!vma) {
+ ret = H_STATE;
+ break;
+ }
+ ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
+ merge_flag, &vma->vm_flags);
+ if (ret) {
+ ret = H_STATE;
+ break;
+ }
+ start = vma->vm_end;
+ } while (end > vma->vm_end);
+
+ mmap_write_unlock(kvm->mm);
+ return ret;
+}
+
+static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot)
+{
+ uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
+ kvmppc_uvmem_slot_free(kvm, memslot);
+ kvmppc_memslot_page_merge(kvm, memslot, true);
+}
+
+static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot)
+{
+ int ret = H_PARAMETER;
+
+ if (kvmppc_memslot_page_merge(kvm, memslot, false))
+ return ret;
+
+ if (kvmppc_uvmem_slot_init(kvm, memslot))
+ goto out1;
+
+ ret = uv_register_mem_slot(kvm->arch.lpid,
+ memslot->base_gfn << PAGE_SHIFT,
+ memslot->npages * PAGE_SIZE,
+ 0, memslot->id);
+ if (ret < 0) {
+ ret = H_PARAMETER;
+ goto out;
+ }
+ return 0;
+out:
+ kvmppc_uvmem_slot_free(kvm, memslot);
+out1:
+ kvmppc_memslot_page_merge(kvm, memslot, true);
+ return ret;
+}
+
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
{
struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
+ struct kvm_memory_slot *memslot, *m;
int ret = H_SUCCESS;
- int srcu_idx;
+ int srcu_idx, bkt;
+
+ kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
if (!kvmppc_uvmem_bitmap)
return H_UNSUPPORTED;
@@ -216,37 +473,125 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
if (!kvm_is_radix(kvm))
return H_UNSUPPORTED;
+ /* NAK the transition to secure if not enabled */
+ if (!kvm->arch.svm_enabled)
+ return H_AUTHORITY;
+
srcu_idx = srcu_read_lock(&kvm->srcu);
+
+ /* register the memslot */
slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
- if (kvmppc_uvmem_slot_init(kvm, memslot)) {
- ret = H_PARAMETER;
- goto out;
- }
- ret = uv_register_mem_slot(kvm->arch.lpid,
- memslot->base_gfn << PAGE_SHIFT,
- memslot->npages * PAGE_SIZE,
- 0, memslot->id);
- if (ret < 0) {
- kvmppc_uvmem_slot_free(kvm, memslot);
- ret = H_PARAMETER;
- goto out;
+ kvm_for_each_memslot(memslot, bkt, slots) {
+ ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ slots = kvm_memslots(kvm);
+ kvm_for_each_memslot(m, bkt, slots) {
+ if (m == memslot)
+ break;
+ __kvmppc_uvmem_memslot_delete(kvm, memslot);
}
}
- kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_START;
-out:
+
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
-unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
+/*
+ * Provision a new page on HV side and copy over the contents
+ * from secure memory using UV_PAGE_OUT uvcall.
+ * Caller must held kvm->arch.uvmem_lock.
+ */
+static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end, unsigned long page_shift,
+ struct kvm *kvm, unsigned long gpa, struct page *fault_page)
{
- if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
- return H_UNSUPPORTED;
+ unsigned long src_pfn, dst_pfn = 0;
+ struct migrate_vma mig = { 0 };
+ struct page *dpage, *spage;
+ struct kvmppc_uvmem_page_pvt *pvt;
+ unsigned long pfn;
+ int ret = U_SUCCESS;
- kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
- pr_info("LPID %d went secure\n", kvm->arch.lpid);
- return H_SUCCESS;
+ memset(&mig, 0, sizeof(mig));
+ mig.vma = vma;
+ mig.start = start;
+ mig.end = end;
+ mig.src = &src_pfn;
+ mig.dst = &dst_pfn;
+ mig.pgmap_owner = &kvmppc_uvmem_pgmap;
+ mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
+ mig.fault_page = fault_page;
+
+ /* The requested page is already paged-out, nothing to do */
+ if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
+ return ret;
+
+ ret = migrate_vma_setup(&mig);
+ if (ret)
+ return -1;
+
+ spage = migrate_pfn_to_page(*mig.src);
+ if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
+ goto out_finalize;
+
+ if (!is_zone_device_page(spage))
+ goto out_finalize;
+
+ dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
+ if (!dpage) {
+ ret = -1;
+ goto out_finalize;
+ }
+
+ lock_page(dpage);
+ pvt = spage->zone_device_data;
+ pfn = page_to_pfn(dpage);
+
+ /*
+ * This function is used in two cases:
+ * - When HV touches a secure page, for which we do UV_PAGE_OUT
+ * - When a secure page is converted to shared page, we *get*
+ * the page to essentially unmap the device page. In this
+ * case we skip page-out.
+ */
+ if (!pvt->skip_page_out)
+ ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
+ gpa, 0, page_shift);
+
+ if (ret == U_SUCCESS)
+ *mig.dst = migrate_pfn(pfn);
+ else {
+ unlock_page(dpage);
+ __free_page(dpage);
+ goto out_finalize;
+ }
+
+ migrate_vma_pages(&mig);
+
+out_finalize:
+ migrate_vma_finalize(&mig);
+ return ret;
+}
+
+static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long page_shift,
+ struct kvm *kvm, unsigned long gpa,
+ struct page *fault_page)
+{
+ int ret;
+
+ mutex_lock(&kvm->arch.uvmem_lock);
+ ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa,
+ fault_page);
+ mutex_unlock(&kvm->arch.uvmem_lock);
+
+ return ret;
}
/*
@@ -257,38 +602,58 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
* fault on them, do fault time migration to replace the device PTEs in
* QEMU page table with normal PTEs from newly allocated pages.
*/
-void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
+void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
struct kvm *kvm, bool skip_page_out)
{
int i;
struct kvmppc_uvmem_page_pvt *pvt;
- unsigned long pfn, uvmem_pfn;
- unsigned long gfn = free->base_gfn;
+ struct page *uvmem_page;
+ struct vm_area_struct *vma = NULL;
+ unsigned long uvmem_pfn, gfn;
+ unsigned long addr;
- for (i = free->npages; i; --i, ++gfn) {
- struct page *uvmem_page;
+ mmap_read_lock(kvm->mm);
+
+ addr = slot->userspace_addr;
+
+ gfn = slot->base_gfn;
+ for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
+
+ /* Fetch the VMA if addr is not in the latest fetched one */
+ if (!vma || addr >= vma->vm_end) {
+ vma = vma_lookup(kvm->mm, addr);
+ if (!vma) {
+ pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
+ break;
+ }
+ }
mutex_lock(&kvm->arch.uvmem_lock);
- if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
- mutex_unlock(&kvm->arch.uvmem_lock);
- continue;
+
+ if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
+ uvmem_page = pfn_to_page(uvmem_pfn);
+ pvt = uvmem_page->zone_device_data;
+ pvt->skip_page_out = skip_page_out;
+ pvt->remove_gfn = true;
+
+ if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
+ PAGE_SHIFT, kvm, pvt->gpa, NULL))
+ pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
+ pvt->gpa, addr);
+ } else {
+ /* Remove the shared flag if any */
+ kvmppc_gfn_remove(gfn, kvm);
}
- uvmem_page = pfn_to_page(uvmem_pfn);
- pvt = uvmem_page->zone_device_data;
- pvt->skip_page_out = skip_page_out;
mutex_unlock(&kvm->arch.uvmem_lock);
-
- pfn = gfn_to_pfn(kvm, gfn);
- if (is_error_noslot_pfn(pfn))
- continue;
- kvm_release_pfn_clean(pfn);
}
+
+ mmap_read_unlock(kvm->mm);
}
unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
{
- int srcu_idx;
+ int srcu_idx, bkt;
struct kvm_memory_slot *memslot;
/*
@@ -303,7 +668,7 @@ unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
srcu_idx = srcu_read_lock(&kvm->srcu);
- kvm_for_each_memslot(memslot, kvm_memslots(kvm))
+ kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
kvmppc_uvmem_drop_pages(memslot, kvm, false);
srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -329,9 +694,9 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
struct kvmppc_uvmem_page_pvt *pvt;
unsigned long pfn_last, pfn_first;
- pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT;
+ pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
pfn_last = pfn_first +
- (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT);
+ (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
spin_lock(&kvmppc_uvmem_bitmap_lock);
bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
@@ -346,15 +711,14 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
goto out_clear;
uvmem_pfn = bit + pfn_first;
- kvmppc_uvmem_pfn_insert(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
+ kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
pvt->gpa = gpa;
pvt->kvm = kvm;
dpage = pfn_to_page(uvmem_pfn);
dpage->zone_device_data = pvt;
- get_page(dpage);
- lock_page(dpage);
+ zone_device_page_init(dpage);
return dpage;
out_clear:
spin_lock(&kvmppc_uvmem_bitmap_lock);
@@ -365,16 +729,17 @@ out:
}
/*
- * Alloc a PFN from private device memory pool and copy page from normal
- * memory to secure memory using UV_PAGE_IN uvcall.
+ * Alloc a PFN from private device memory pool. If @pagein is true,
+ * copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
*/
-static int
-kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, unsigned long gpa, struct kvm *kvm,
- unsigned long page_shift, bool *downgrade)
+static int kvmppc_svm_page_in(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end, unsigned long gpa, struct kvm *kvm,
+ unsigned long page_shift,
+ bool pagein)
{
unsigned long src_pfn, dst_pfn = 0;
- struct migrate_vma mig;
+ struct migrate_vma mig = { 0 };
struct page *spage;
unsigned long pfn;
struct page *dpage;
@@ -386,18 +751,7 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
mig.end = end;
mig.src = &src_pfn;
mig.dst = &dst_pfn;
-
- /*
- * We come here with mmap_sem write lock held just for
- * ksm_madvise(), otherwise we only need read mmap_sem.
- * Hence downgrade to read lock once ksm_madvise() is done.
- */
- ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
- MADV_UNMERGEABLE, &vma->vm_flags);
- downgrade_write(&kvm->mm->mmap_sem);
- *downgrade = true;
- if (ret)
- return ret;
+ mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
ret = migrate_vma_setup(&mig);
if (ret)
@@ -414,19 +768,98 @@ kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
goto out_finalize;
}
- pfn = *mig.src >> MIGRATE_PFN_SHIFT;
- spage = migrate_pfn_to_page(*mig.src);
- if (spage)
- uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
- page_shift);
+ if (pagein) {
+ pfn = *mig.src >> MIGRATE_PFN_SHIFT;
+ spage = migrate_pfn_to_page(*mig.src);
+ if (spage) {
+ ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
+ gpa, 0, page_shift);
+ if (ret)
+ goto out_finalize;
+ }
+ }
- *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
+ *mig.dst = migrate_pfn(page_to_pfn(dpage));
migrate_vma_pages(&mig);
out_finalize:
migrate_vma_finalize(&mig);
return ret;
}
+static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot)
+{
+ unsigned long gfn = memslot->base_gfn;
+ struct vm_area_struct *vma;
+ unsigned long start, end;
+ int ret = 0;
+
+ mmap_read_lock(kvm->mm);
+ mutex_lock(&kvm->arch.uvmem_lock);
+ while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
+ ret = H_STATE;
+ start = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(start))
+ break;
+
+ end = start + (1UL << PAGE_SHIFT);
+ vma = find_vma_intersection(kvm->mm, start, end);
+ if (!vma || vma->vm_start > start || vma->vm_end < end)
+ break;
+
+ ret = kvmppc_svm_page_in(vma, start, end,
+ (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
+ if (ret) {
+ ret = H_STATE;
+ break;
+ }
+
+ /* relinquish the cpu if needed */
+ cond_resched();
+ }
+ mutex_unlock(&kvm->arch.uvmem_lock);
+ mmap_read_unlock(kvm->mm);
+ return ret;
+}
+
+unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
+{
+ struct kvm_memslots *slots;
+ struct kvm_memory_slot *memslot;
+ int srcu_idx, bkt;
+ long ret = H_SUCCESS;
+
+ if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
+ return H_UNSUPPORTED;
+
+ /* migrate any unmoved normal pfn to device pfns*/
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ slots = kvm_memslots(kvm);
+ kvm_for_each_memslot(memslot, bkt, slots) {
+ ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
+ if (ret) {
+ /*
+ * The pages will remain transitioned.
+ * Its the callers responsibility to
+ * terminate the VM, which will undo
+ * all state of the VM. Till then
+ * this VM is in a erroneous state.
+ * Its KVMPPC_SECURE_INIT_DONE will
+ * remain unset.
+ */
+ ret = H_STATE;
+ goto out;
+ }
+ }
+
+ kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
+ pr_info("LPID %d went secure\n", kvm->arch.lpid);
+
+out:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return ret;
+}
+
/*
* Shares the page with HV, thus making it a normal page.
*
@@ -436,8 +869,8 @@ out_finalize:
* In the former case, uses dev_pagemap_ops.migrate_to_ram handler
* to unmap the device page from QEMU's page tables.
*/
-static unsigned long
-kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift)
+static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
+ unsigned long page_shift)
{
int ret = H_PARAMETER;
@@ -454,6 +887,11 @@ kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift)
uvmem_page = pfn_to_page(uvmem_pfn);
pvt = uvmem_page->zone_device_data;
pvt->skip_page_out = true;
+ /*
+ * do not drop the GFN. It is a valid GFN
+ * that is transitioned to a shared GFN.
+ */
+ pvt->remove_gfn = false;
}
retry:
@@ -467,12 +905,16 @@ retry:
uvmem_page = pfn_to_page(uvmem_pfn);
pvt = uvmem_page->zone_device_data;
pvt->skip_page_out = true;
+ pvt->remove_gfn = false; /* it continues to be a valid GFN */
kvm_release_pfn_clean(pfn);
goto retry;
}
- if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift))
+ if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
+ page_shift)) {
+ kvmppc_gfn_shared(gfn, kvm);
ret = H_SUCCESS;
+ }
kvm_release_pfn_clean(pfn);
mutex_unlock(&kvm->arch.uvmem_lock);
out:
@@ -486,11 +928,10 @@ out:
* H_PAGE_IN_SHARED flag makes the page shared which means that the same
* memory in is visible from both UV and HV.
*/
-unsigned long
-kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
- unsigned long flags, unsigned long page_shift)
+unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
+ unsigned long flags,
+ unsigned long page_shift)
{
- bool downgrade = false;
unsigned long start, end;
struct vm_area_struct *vma;
int srcu_idx;
@@ -511,7 +952,7 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
ret = H_PARAMETER;
srcu_idx = srcu_read_lock(&kvm->srcu);
- down_write(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
start = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(start))
@@ -527,95 +968,20 @@ kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
if (!vma || vma->vm_start > start || vma->vm_end < end)
goto out_unlock;
- if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
- &downgrade))
- ret = H_SUCCESS;
+ if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
+ true))
+ goto out_unlock;
+
+ ret = H_SUCCESS;
+
out_unlock:
mutex_unlock(&kvm->arch.uvmem_lock);
out:
- if (downgrade)
- up_read(&kvm->mm->mmap_sem);
- else
- up_write(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
-/*
- * Provision a new page on HV side and copy over the contents
- * from secure memory using UV_PAGE_OUT uvcall.
- */
-static int
-kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, unsigned long page_shift,
- struct kvm *kvm, unsigned long gpa)
-{
- unsigned long src_pfn, dst_pfn = 0;
- struct migrate_vma mig;
- struct page *dpage, *spage;
- struct kvmppc_uvmem_page_pvt *pvt;
- unsigned long pfn;
- int ret = U_SUCCESS;
-
- memset(&mig, 0, sizeof(mig));
- mig.vma = vma;
- mig.start = start;
- mig.end = end;
- mig.src = &src_pfn;
- mig.dst = &dst_pfn;
-
- mutex_lock(&kvm->arch.uvmem_lock);
- /* The requested page is already paged-out, nothing to do */
- if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
- goto out;
-
- ret = migrate_vma_setup(&mig);
- if (ret)
- goto out;
-
- spage = migrate_pfn_to_page(*mig.src);
- if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
- goto out_finalize;
-
- if (!is_zone_device_page(spage))
- goto out_finalize;
-
- dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
- if (!dpage) {
- ret = -1;
- goto out_finalize;
- }
-
- lock_page(dpage);
- pvt = spage->zone_device_data;
- pfn = page_to_pfn(dpage);
-
- /*
- * This function is used in two cases:
- * - When HV touches a secure page, for which we do UV_PAGE_OUT
- * - When a secure page is converted to shared page, we *get*
- * the page to essentially unmap the device page. In this
- * case we skip page-out.
- */
- if (!pvt->skip_page_out)
- ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
- gpa, 0, page_shift);
-
- if (ret == U_SUCCESS)
- *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
- else {
- unlock_page(dpage);
- __free_page(dpage);
- goto out_finalize;
- }
-
- migrate_vma_pages(&mig);
-out_finalize:
- migrate_vma_finalize(&mig);
-out:
- mutex_unlock(&kvm->arch.uvmem_lock);
- return ret;
-}
/*
* Fault handler callback that gets called when HV touches any page that
@@ -631,7 +997,7 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
if (kvmppc_svm_page_out(vmf->vma, vmf->address,
vmf->address + PAGE_SIZE, PAGE_SHIFT,
- pvt->kvm, pvt->gpa))
+ pvt->kvm, pvt->gpa, vmf->page))
return VM_FAULT_SIGBUS;
else
return 0;
@@ -640,13 +1006,14 @@ static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
/*
* Release the device PFN back to the pool
*
- * Gets called when secure page becomes a normal page during H_SVM_PAGE_OUT.
+ * Gets called when secure GFN tranistions from a secure-PFN
+ * to a normal PFN during H_SVM_PAGE_OUT.
* Gets called with kvm->arch.uvmem_lock held.
*/
static void kvmppc_uvmem_page_free(struct page *page)
{
unsigned long pfn = page_to_pfn(page) -
- (kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT);
+ (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
struct kvmppc_uvmem_page_pvt *pvt;
spin_lock(&kvmppc_uvmem_bitmap_lock);
@@ -655,7 +1022,10 @@ static void kvmppc_uvmem_page_free(struct page *page)
pvt = page->zone_device_data;
page->zone_device_data = NULL;
- kvmppc_uvmem_pfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
+ if (pvt->remove_gfn)
+ kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
+ else
+ kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
kfree(pvt);
}
@@ -688,7 +1058,7 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
ret = H_PARAMETER;
srcu_idx = srcu_read_lock(&kvm->srcu);
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
start = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(start))
goto out;
@@ -698,10 +1068,10 @@ kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
if (!vma || vma->vm_start > start || vma->vm_end < end)
goto out;
- if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
+ if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL))
ret = H_SUCCESS;
out:
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
srcu_read_unlock(&kvm->srcu, srcu_idx);
return ret;
}
@@ -727,6 +1097,21 @@ out:
return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
}
+int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
+{
+ int ret = __kvmppc_uvmem_memslot_create(kvm, new);
+
+ if (!ret)
+ ret = kvmppc_uv_migrate_mem_slot(kvm, new);
+
+ return ret;
+}
+
+void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
+{
+ __kvmppc_uvmem_memslot_delete(kvm, old);
+}
+
static u64 kvmppc_get_secmem_size(void)
{
struct device_node *np;
@@ -734,6 +1119,20 @@ static u64 kvmppc_get_secmem_size(void)
const __be32 *prop;
u64 size = 0;
+ /*
+ * First try the new ibm,secure-memory nodes which supersede the
+ * secure-memory-ranges property.
+ * If we found some, no need to read the deprecated ones.
+ */
+ for_each_compatible_node(np, NULL, "ibm,secure-memory") {
+ prop = of_get_property(np, "reg", &len);
+ if (!prop)
+ continue;
+ size += of_read_number(prop + 2, 2);
+ }
+ if (size)
+ return size;
+
np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
if (!np)
goto out;
@@ -777,8 +1176,12 @@ int kvmppc_uvmem_init(void)
}
kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
- kvmppc_uvmem_pgmap.res = *res;
+ kvmppc_uvmem_pgmap.range.start = res->start;
+ kvmppc_uvmem_pgmap.range.end = res->end;
+ kvmppc_uvmem_pgmap.nr_range = 1;
kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
+ /* just one global instance: */
+ kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
@@ -806,8 +1209,11 @@ out:
void kvmppc_uvmem_free(void)
{
+ if (!kvmppc_uvmem_bitmap)
+ return;
+
memunmap_pages(&kvmppc_uvmem_pgmap);
- release_mem_region(kvmppc_uvmem_pgmap.res.start,
- resource_size(&kvmppc_uvmem_pgmap.res));
+ release_mem_region(kvmppc_uvmem_pgmap.range.start,
+ range_len(&kvmppc_uvmem_pgmap.range));
kfree(kvmppc_uvmem_bitmap);
}
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index f7ad99d972ce..f4bec2fc51aa 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -15,7 +15,7 @@
#include <asm/asm-compat.h>
#if defined(CONFIG_PPC_BOOK3S_64)
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
@@ -26,7 +26,7 @@
#define FUNC(name) name
#define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
-#endif /* CONFIG_PPC_BOOK3S_XX */
+#endif /* CONFIG_PPC_BOOK3S_64 */
#define VCPU_LOAD_NVGPRS(vcpu) \
PPC_LL r14, VCPU_GPR(R14)(vcpu); \
@@ -55,8 +55,7 @@
****************************************************************************/
/* Registers:
- * r3: kvm_run pointer
- * r4: vcpu pointer
+ * r3: vcpu pointer
*/
_GLOBAL(__kvmppc_vcpu_run)
@@ -68,8 +67,8 @@ kvm_start_entry:
/* Save host state to the stack */
PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
- /* Save r3 (kvm_run) and r4 (vcpu) */
- SAVE_2GPRS(3, r1)
+ /* Save r3 (vcpu) */
+ SAVE_GPR(3, r1)
/* Save non-volatile registers (r14 - r31) */
SAVE_NVGPRS(r1)
@@ -82,47 +81,46 @@ kvm_start_entry:
PPC_STL r0, _LINK(r1)
/* Load non-volatile guest state from the vcpu */
- VCPU_LOAD_NVGPRS(r4)
+ VCPU_LOAD_NVGPRS(r3)
kvm_start_lightweight:
/* Copy registers into shadow vcpu so we can access them in real mode */
- mr r3, r4
bl FUNC(kvmppc_copy_to_svcpu)
nop
- REST_GPR(4, r1)
+ REST_GPR(3, r1)
#ifdef CONFIG_PPC_BOOK3S_64
/* Get the dcbz32 flag */
- PPC_LL r3, VCPU_HFLAGS(r4)
- rldicl r3, r3, 0, 63 /* r3 &= 1 */
- stb r3, HSTATE_RESTORE_HID5(r13)
+ PPC_LL r0, VCPU_HFLAGS(r3)
+ rldicl r0, r0, 0, 63 /* r3 &= 1 */
+ stb r0, HSTATE_RESTORE_HID5(r13)
/* Load up guest SPRG3 value, since it's user readable */
- lwz r3, VCPU_SHAREDBE(r4)
- cmpwi r3, 0
- ld r5, VCPU_SHARED(r4)
+ lbz r4, VCPU_SHAREDBE(r3)
+ cmpwi r4, 0
+ ld r5, VCPU_SHARED(r3)
beq sprg3_little_endian
sprg3_big_endian:
#ifdef __BIG_ENDIAN__
- ld r3, VCPU_SHARED_SPRG3(r5)
+ ld r4, VCPU_SHARED_SPRG3(r5)
#else
addi r5, r5, VCPU_SHARED_SPRG3
- ldbrx r3, 0, r5
+ ldbrx r4, 0, r5
#endif
b after_sprg3_load
sprg3_little_endian:
#ifdef __LITTLE_ENDIAN__
- ld r3, VCPU_SHARED_SPRG3(r5)
+ ld r4, VCPU_SHARED_SPRG3(r5)
#else
addi r5, r5, VCPU_SHARED_SPRG3
- ldbrx r3, 0, r5
+ ldbrx r4, 0, r5
#endif
after_sprg3_load:
- mtspr SPRN_SPRG3, r3
+ mtspr SPRN_SPRG3, r4
#endif /* CONFIG_PPC_BOOK3S_64 */
- PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
+ PPC_LL r4, VCPU_SHADOW_MSR(r3) /* get shadow_msr */
/* Jump to segment patching handler and into our guest */
bl FUNC(kvmppc_entry_trampoline)
@@ -146,7 +144,7 @@ after_sprg3_load:
*
*/
- PPC_LL r3, GPR4(r1) /* vcpu pointer */
+ PPC_LL r3, GPR3(r1) /* vcpu pointer */
/*
* kvmppc_copy_from_svcpu can clobber volatile registers, save
@@ -169,7 +167,7 @@ after_sprg3_load:
#endif /* CONFIG_PPC_BOOK3S_64 */
/* R7 = vcpu */
- PPC_LL r7, GPR4(r1)
+ PPC_LL r7, GPR3(r1)
PPC_STL r14, VCPU_GPR(R14)(r7)
PPC_STL r15, VCPU_GPR(R15)(r7)
@@ -190,11 +188,11 @@ after_sprg3_load:
PPC_STL r30, VCPU_GPR(R30)(r7)
PPC_STL r31, VCPU_GPR(R31)(r7)
- /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
- lwz r5, VCPU_TRAP(r7)
+ /* Pass the exit number as 2nd argument to kvmppc_handle_exit */
+ lwz r4, VCPU_TRAP(r7)
- /* Restore r3 (kvm_run) and r4 (vcpu) */
- REST_2GPRS(3, r1)
+ /* Restore r3 (vcpu) */
+ REST_GPR(3, r1)
bl FUNC(kvmppc_handle_exit_pr)
/* If RESUME_GUEST, get back in the loop */
@@ -223,11 +221,11 @@ kvm_loop_heavyweight:
PPC_LL r4, _LINK(r1)
PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
- /* Load vcpu and cpu_run */
- REST_2GPRS(3, r1)
+ /* Load vcpu */
+ REST_GPR(3, r1)
/* Load non-volatile guest state from the vcpu */
- VCPU_LOAD_NVGPRS(r4)
+ VCPU_LOAD_NVGPRS(r3)
/* Jump back into the beginning of this function */
b kvm_start_lightweight
@@ -235,7 +233,7 @@ kvm_loop_heavyweight:
kvm_loop_lightweight:
/* We'll need the vcpu pointer */
- REST_GPR(4, r1)
+ REST_GPR(3, r1)
/* Jump back into the beginning of this function */
b kvm_start_lightweight
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index bf0282775e37..a11436720a8c 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -169,7 +169,7 @@ static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
}
-static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu,
int rs, ulong addr, int ls_type)
{
int emulated = EMULATE_FAIL;
@@ -188,7 +188,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_inject_pf(vcpu, addr, false);
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
+ emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs,
len, 1);
goto done_load;
}
@@ -213,7 +213,7 @@ done_load:
return emulated;
}
-static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_fpr_store(struct kvm_vcpu *vcpu,
int rs, ulong addr, int ls_type)
{
int emulated = EMULATE_FAIL;
@@ -248,7 +248,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (r < 0) {
kvmppc_inject_pf(vcpu, addr, true);
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_store(run, vcpu, val, len, 1);
+ emulated = kvmppc_handle_store(vcpu, val, len, 1);
} else {
emulated = EMULATE_DONE;
}
@@ -259,7 +259,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_psq_load(struct kvm_vcpu *vcpu,
int rs, ulong addr, bool w, int i)
{
int emulated = EMULATE_FAIL;
@@ -279,12 +279,12 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_inject_pf(vcpu, addr, false);
goto done_load;
} else if ((r == EMULATE_DO_MMIO) && w) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
+ emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FPR | rs,
4, 1);
vcpu->arch.qpr[rs] = tmp[1];
goto done_load;
} else if (r == EMULATE_DO_MMIO) {
- emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs,
+ emulated = kvmppc_handle_load(vcpu, KVM_MMIO_REG_FQPR | rs,
8, 1);
goto done_load;
}
@@ -302,7 +302,7 @@ done_load:
return emulated;
}
-static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_emulate_psq_store(struct kvm_vcpu *vcpu,
int rs, ulong addr, bool w, int i)
{
int emulated = EMULATE_FAIL;
@@ -318,10 +318,10 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (r < 0) {
kvmppc_inject_pf(vcpu, addr, true);
} else if ((r == EMULATE_DO_MMIO) && w) {
- emulated = kvmppc_handle_store(run, vcpu, tmp[0], 4, 1);
+ emulated = kvmppc_handle_store(vcpu, tmp[0], 4, 1);
} else if (r == EMULATE_DO_MMIO) {
u64 val = ((u64)tmp[0] << 32) | tmp[1];
- emulated = kvmppc_handle_store(run, vcpu, val, 8, 1);
+ emulated = kvmppc_handle_store(vcpu, val, 8, 1);
} else {
emulated = EMULATE_DONE;
}
@@ -618,7 +618,7 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc,
return EMULATE_DONE;
}
-int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu)
{
u32 inst;
enum emulation_result emulated = EMULATE_DONE;
@@ -680,7 +680,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
break;
}
case OP_PSQ_LU:
@@ -690,7 +690,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -703,7 +703,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
break;
}
case OP_PSQ_STU:
@@ -713,7 +713,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 17, 19);
addr += get_d_signext(inst);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -733,7 +733,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
break;
}
case OP_4X_PS_CMPO0:
@@ -747,7 +747,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_load(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -824,7 +824,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
break;
}
case OP_4XW_PSQ_STUX:
@@ -834,7 +834,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
int i = inst_get_field(inst, 22, 24);
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
+ emulated = kvmppc_emulate_psq_store(vcpu, ax_rd, addr, w, i);
if (emulated == EMULATE_DONE)
kvmppc_set_gpr(vcpu, ax_ra, addr);
@@ -922,7 +922,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
break;
}
@@ -930,7 +930,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -941,7 +941,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
break;
}
@@ -949,7 +949,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -960,7 +960,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
break;
}
@@ -968,7 +968,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -979,7 +979,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
break;
}
@@ -987,7 +987,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd, addr,
FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -1001,7 +1001,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
addr += kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
break;
}
@@ -1010,7 +1010,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -1022,7 +1022,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
break;
}
@@ -1031,7 +1031,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_load(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -1043,7 +1043,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
break;
}
@@ -1052,7 +1052,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_SINGLE);
if (emulated == EMULATE_DONE)
@@ -1064,7 +1064,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
break;
}
@@ -1073,7 +1073,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr, FPU_LS_DOUBLE);
if (emulated == EMULATE_DONE)
@@ -1085,7 +1085,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
kvmppc_get_gpr(vcpu, ax_rb);
- emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
+ emulated = kvmppc_emulate_fpr_store(vcpu, ax_rd,
addr,
FPU_LS_SINGLE_LOW);
break;
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index db3a87319642..9fc4dd8f66eb 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -25,6 +25,7 @@
#include <asm/cputable.h>
#include <asm/cacheflush.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -136,12 +137,15 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
svcpu->in_use = 0;
svcpu_put(svcpu);
-#endif
/* Disable AIL if supported */
- if (cpu_has_feature(CPU_FTR_HVMODE) &&
- cpu_has_feature(CPU_FTR_ARCH_207S))
- mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV))
+ mtspr(SPRN_FSCR, mfspr(SPRN_FSCR) & ~FSCR_SCV);
+ }
+#endif
vcpu->cpu = smp_processor_id();
#ifdef CONFIG_PPC_BOOK3S_32
@@ -164,6 +168,14 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
svcpu_put(svcpu);
+
+ /* Enable AIL if supported */
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && (current->thread.fscr & FSCR_SCV))
+ mtspr(SPRN_FSCR, mfspr(SPRN_FSCR) | FSCR_SCV);
+ }
#endif
if (kvmppc_is_split_real(vcpu))
@@ -173,11 +185,6 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
kvmppc_save_tm_pr(vcpu);
- /* Enable AIL if supported */
- if (cpu_has_feature(CPU_FTR_HVMODE) &&
- cpu_has_feature(CPU_FTR_ARCH_207S))
- mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
-
vcpu->cpu = -1;
}
@@ -239,7 +246,7 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
/* 64-bit Process MSR values */
#ifdef CONFIG_PPC_BOOK3S_64
- smsr |= MSR_ISF | MSR_HV;
+ smsr |= MSR_HV;
#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
@@ -425,61 +432,39 @@ static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
}
/************* MMU Notifiers *************/
-static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
- unsigned long end)
+static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- long i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
- unsigned long hva_start, hva_end;
- gfn_t gfn, gfn_end;
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvmppc_mmu_pte_pflush(vcpu, range->start << PAGE_SHIFT,
+ range->end << PAGE_SHIFT);
- hva_start = max(start, memslot->userspace_addr);
- hva_end = min(end, memslot->userspace_addr +
- (memslot->npages << PAGE_SHIFT));
- if (hva_start >= hva_end)
- continue;
- /*
- * {gfn(page) | page intersects with [hva_start, hva_end)} =
- * {gfn, gfn+1, ..., gfn_end-1}.
- */
- gfn = hva_to_gfn_memslot(hva_start, memslot);
- gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
- gfn_end << PAGE_SHIFT);
- }
+ return false;
}
-static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
- unsigned long end)
+static bool kvm_unmap_gfn_range_pr(struct kvm *kvm, struct kvm_gfn_range *range)
{
- do_kvm_unmap_hva(kvm, start, end);
-
- return 0;
+ return do_kvm_unmap_gfn(kvm, range);
}
-static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
- unsigned long end)
+static bool kvm_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* XXX could be more clever ;) */
- return 0;
+ return false;
}
-static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
+static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* XXX could be more clever ;) */
- return 0;
+ return false;
}
-static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
+static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* The page will get remapped properly on its next fault */
- do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
+ return do_kvm_unmap_gfn(kvm, range);
}
/*****************************************/
@@ -513,9 +498,8 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
if (msr & MSR_POW) {
if (!vcpu->arch.pending_exceptions) {
- kvm_vcpu_block(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- vcpu->stat.halt_wakeup++;
+ kvm_vcpu_halt(vcpu);
+ vcpu->stat.generic.halt_wakeup++;
/* Unset POW bit after we woke up */
msr &= ~MSR_POW;
@@ -569,7 +553,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
#endif
}
-void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
+static void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
{
u32 host_pvr;
@@ -700,7 +684,7 @@ static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
}
-int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_handle_pagefault(struct kvm_vcpu *vcpu,
ulong eaddr, int vec)
{
bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
@@ -740,7 +724,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
(vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
pte.raddr &= ~SPLIT_HACK_MASK;
- /* fall through */
+ fallthrough;
case MSR_IR:
vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
@@ -795,7 +779,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* The guest's PTE is not mapped yet. Map on the host */
if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
/* Exit KVM if mapping failed */
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
if (data)
@@ -808,7 +792,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->stat.mmio_exits++;
vcpu->arch.paddr_accessed = pte.raddr;
vcpu->arch.vaddr_accessed = pte.eaddr;
- r = kvmppc_emulate_mmio(run, vcpu);
+ r = kvmppc_emulate_mmio(vcpu);
if ( r == RESUME_HOST_NV )
r = RESUME_HOST;
}
@@ -992,7 +976,7 @@ static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
enum emulation_result er = EMULATE_FAIL;
if (!(kvmppc_get_msr(vcpu) & MSR_PR))
- er = kvmppc_emulate_instruction(vcpu->run, vcpu);
+ er = kvmppc_emulate_instruction(vcpu);
if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
/* Couldn't emulate, trigger interrupt in guest */
@@ -1058,6 +1042,8 @@ static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
{
+ if (fscr & FSCR_SCV)
+ fscr &= ~FSCR_SCV; /* SCV must not be enabled */
if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
/* TAR got dropped, drop it in shadow too */
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
@@ -1089,8 +1075,7 @@ static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
}
}
-static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+static int kvmppc_exit_pr_progint(struct kvm_vcpu *vcpu, unsigned int exit_nr)
{
enum emulation_result er;
ulong flags;
@@ -1124,7 +1109,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
vcpu->stat.emulated_inst_exits++;
- er = kvmppc_emulate_instruction(run, vcpu);
+ er = kvmppc_emulate_instruction(vcpu);
switch (er) {
case EMULATE_DONE:
r = RESUME_GUEST_NV;
@@ -1139,7 +1124,7 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
case EMULATE_DO_MMIO:
- run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
r = RESUME_HOST_NV;
break;
case EMULATE_EXIT_USER:
@@ -1152,9 +1137,9 @@ static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r;
}
-int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+int kvmppc_handle_exit_pr(struct kvm_vcpu *vcpu, unsigned int exit_nr)
{
+ struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
int s;
@@ -1198,7 +1183,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* only care about PTEG not found errors, but leave NX alone */
if (shadow_srr1 & 0x40000000) {
int idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
+ r = kvmppc_handle_pagefault(vcpu, kvmppc_get_pc(vcpu), exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu->stat.sp_instruc++;
} else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -1248,7 +1233,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
int idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
+ r = kvmppc_handle_pagefault(vcpu, dar, exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
} else {
kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
@@ -1292,7 +1277,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOK3S_INTERRUPT_PROGRAM:
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
- r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
+ r = kvmppc_exit_pr_progint(vcpu, exit_nr);
break;
case BOOK3S_INTERRUPT_SYSCALL:
{
@@ -1301,7 +1286,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Get last sc for papr */
if (vcpu->arch.papr_enabled) {
- /* The sc instuction points SRR0 to the next inst */
+ /* The sc instruction points SRR0 to the next inst */
emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
if (emul != EMULATE_DONE) {
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
@@ -1370,7 +1355,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
&last_inst);
if (emul == EMULATE_DONE)
- r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
+ r = kvmppc_exit_pr_progint(vcpu, exit_nr);
else
r = RESUME_GUEST;
@@ -1795,7 +1780,7 @@ static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
- err = kvmppc_mmu_init(vcpu);
+ err = kvmppc_mmu_init_pr(vcpu);
if (err < 0)
goto free_shared_page;
@@ -1825,16 +1810,13 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
vfree(vcpu_book3s);
}
-static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+static int kvmppc_vcpu_run_pr(struct kvm_vcpu *vcpu)
{
int ret;
-#ifdef CONFIG_ALTIVEC
- unsigned long uninitialized_var(vrsave);
-#endif
/* Check if we can run the vcpu at all */
if (!vcpu->arch.sane) {
- kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = -EINVAL;
goto out;
}
@@ -1861,7 +1843,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_fix_ee_before_entry();
- ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+ ret = __kvmppc_vcpu_run(vcpu);
kvmppc_clear_debug(vcpu);
@@ -1874,6 +1856,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Make sure we save the guest TAR/EBB/DSCR state */
kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
+ srr_regs_clobbered();
out:
vcpu->mode = OUTSIDE_GUEST_MODE;
return ret;
@@ -1885,7 +1868,6 @@ out:
static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
struct kvm_vcpu *vcpu;
ulong ga, ga_end;
@@ -1895,15 +1877,12 @@ static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
+ r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
if (r)
goto out;
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
- slots = kvm_memslots(kvm);
- memslot = id_to_memslot(slots, log->slot);
-
ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT);
@@ -1927,34 +1906,26 @@ static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
}
static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
return 0;
}
static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
return;
}
-static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *slot)
{
return;
}
-static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
- unsigned long npages)
-{
- return 0;
-}
-
-
#ifdef CONFIG_PPC64
static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
struct kvm_ppc_smmu_info *info)
@@ -2094,13 +2065,11 @@ static struct kvmppc_ops kvm_ops_pr = {
.flush_memslot = kvmppc_core_flush_memslot_pr,
.prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
.commit_memory_region = kvmppc_core_commit_memory_region_pr,
- .unmap_hva_range = kvm_unmap_hva_range_pr,
- .age_hva = kvm_age_hva_pr,
- .test_age_hva = kvm_test_age_hva_pr,
- .set_spte_hva = kvm_set_spte_hva_pr,
- .mmu_destroy = kvmppc_mmu_destroy_pr,
+ .unmap_gfn_range = kvm_unmap_gfn_range_pr,
+ .age_gfn = kvm_age_gfn_pr,
+ .test_age_gfn = kvm_test_age_gfn_pr,
+ .set_spte_gfn = kvm_set_spte_gfn_pr,
.free_memslot = kvmppc_core_free_memslot_pr,
- .create_memslot = kvmppc_core_create_memslot_pr,
.init_vm = kvmppc_core_init_vm_pr,
.destroy_vm = kvmppc_core_destroy_vm_pr,
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 031c8015864a..b2c89e850d7a 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -281,6 +281,22 @@ static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu)
return EMULATE_DONE;
}
+static int kvmppc_h_pr_set_mode(struct kvm_vcpu *vcpu)
+{
+ unsigned long mflags = kvmppc_get_gpr(vcpu, 4);
+ unsigned long resource = kvmppc_get_gpr(vcpu, 5);
+
+ if (resource == H_SET_MODE_RESOURCE_ADDR_TRANS_MODE) {
+ /* KVM PR does not provide AIL!=0 to guests */
+ if (mflags == 0)
+ kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
+ else
+ kvmppc_set_gpr(vcpu, 3, H_UNSUPPORTED_FLAG_START - 63);
+ return EMULATE_DONE;
+ }
+ return EMULATE_FAIL;
+}
+
#ifdef CONFIG_SPAPR_TCE_IOMMU
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
@@ -376,14 +392,15 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
return kvmppc_h_pr_stuff_tce(vcpu);
case H_CEDE:
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
- kvm_vcpu_block(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
- vcpu->stat.halt_wakeup++;
+ kvm_vcpu_halt(vcpu);
+ vcpu->stat.generic.halt_wakeup++;
return EMULATE_DONE;
case H_LOGICAL_CI_LOAD:
return kvmppc_h_pr_logical_ci_load(vcpu);
case H_LOGICAL_CI_STORE:
return kvmppc_h_pr_logical_ci_store(vcpu);
+ case H_SET_MODE:
+ return kvmppc_h_pr_set_mode(vcpu);
case H_XIRR:
case H_CPPR:
case H_EOI:
@@ -415,12 +432,16 @@ int kvmppc_hcall_impl_pr(unsigned long cmd)
case H_REMOVE:
case H_PROTECT:
case H_BULK_REMOVE:
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ case H_GET_TCE:
case H_PUT_TCE:
case H_PUT_TCE_INDIRECT:
case H_STUFF_TCE:
+#endif
case H_CEDE:
case H_LOGICAL_CI_LOAD:
case H_LOGICAL_CI_STORE:
+ case H_SET_MODE:
#ifdef CONFIG_KVM_XICS
case H_XIRR:
case H_CPPR:
@@ -445,8 +466,12 @@ static unsigned int default_hcall_list[] = {
H_REMOVE,
H_PROTECT,
H_BULK_REMOVE,
+#ifdef CONFIG_SPAPR_TCE_IOMMU
+ H_GET_TCE,
H_PUT_TCE,
+#endif
H_CEDE,
+ H_SET_MODE,
#ifdef CONFIG_KVM_XICS
H_XIRR,
H_CPPR,
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 3dc129a254b5..03886ca24498 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -26,7 +26,7 @@
#if defined(CONFIG_PPC_BOOK3S_64)
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define FUNC(name) name
#else
#define FUNC(name) GLUE(.,name)
@@ -36,8 +36,8 @@
#define FUNC(name) name
-#define RFI_TO_KERNEL RFI
-#define RFI_TO_GUEST RFI
+#define RFI_TO_KERNEL rfi
+#define RFI_TO_GUEST rfi
.macro INTERRUPT_TRAMPOLINE intno
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 26b25994c969..6808bda0dbc1 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -229,7 +229,9 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
*/
args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM;
+ kvm_vcpu_srcu_read_lock(vcpu);
rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
+ kvm_vcpu_srcu_read_unlock(vcpu);
if (rc)
goto fail;
@@ -240,6 +242,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
* value so we can restore it on the way out.
*/
orig_rets = args.rets;
+ if (be32_to_cpu(args.nargs) >= ARRAY_SIZE(args.args)) {
+ /*
+ * Don't overflow our args array: ensure there is room for
+ * at least rets[0] (even if the call specifies 0 nret).
+ *
+ * Each handler must then check for the correct nargs and nret
+ * values, but they may always return failure in rets[0].
+ */
+ rc = -EINVAL;
+ goto fail;
+ }
args.rets = &args.args[be32_to_cpu(args.nargs)];
mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
@@ -267,9 +280,17 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
fail:
/*
* We only get here if the guest has called RTAS with a bogus
- * args pointer. That means we can't get to the args, and so we
- * can't fail the RTAS call. So fail right out to userspace,
- * which should kill the guest.
+ * args pointer or nargs/nret values that would overflow the
+ * array. That means we can't get to the args, and so we can't
+ * fail the RTAS call. So fail right out to userspace, which
+ * should kill the guest.
+ *
+ * SLOF should actually pass the hcall return value from the
+ * rtas handler call in r3, so enter_rtas could be modified to
+ * return a failure indication in r3 and we could return such
+ * errors to the guest rather than failing to host userspace.
+ * However old guests that don't test for failure could then
+ * continue silently after errors, so for now we won't do this.
*/
return rc;
}
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 0169bab544dd..202046a83fc1 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -164,19 +164,15 @@ kvmppc_interrupt_pr:
/* 64-bit entry. Register usage at this point:
*
* SPRG_SCRATCH0 = guest R13
+ * R9 = HSTATE_IN_GUEST
* R12 = (guest CR << 32) | exit handler id
* R13 = PACA
* HSTATE.SCRATCH0 = guest R12
- * HSTATE.SCRATCH1 = guest CTR if RELOCATABLE
+ * HSTATE.SCRATCH2 = guest R9
*/
#ifdef CONFIG_PPC64
/* Match 32-bit entry */
-#ifdef CONFIG_RELOCATABLE
- std r9, HSTATE_SCRATCH2(r13)
- ld r9, HSTATE_SCRATCH1(r13)
- mtctr r9
- ld r9, HSTATE_SCRATCH2(r13)
-#endif
+ ld r9,HSTATE_SCRATCH2(r13)
rotldi r12, r12, 32 /* Flip R12 halves for stw */
stw r12, HSTATE_SCRATCH1(r13) /* CR is now in the low half */
srdi r12, r12, 32 /* shift trap into low half */
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 381bf8dea193..589a8f257120 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -10,13 +10,13 @@
#include <linux/gfp.h>
#include <linux/anon_inodes.h>
#include <linux/spinlock.h>
-
+#include <linux/debugfs.h>
#include <linux/uaccess.h>
+
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
#include <asm/xics.h>
-#include <asm/debugfs.h>
#include <asm/time.h>
#include <linux/seq_file.h>
@@ -462,7 +462,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
* new guy. We cannot assume that the rejected interrupt is less
* favored than the new one, and thus doesn't need to be delivered,
* because by the time we exit icp_try_to_deliver() the target
- * processor may well have alrady consumed & completed it, and thus
+ * processor may well have already consumed & completed it, and thus
* the rejected interrupt might actually be already acceptable.
*/
if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
@@ -473,7 +473,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
new_irq = reject;
- check_resend = 0;
+ check_resend = false;
goto again;
}
} else {
@@ -501,7 +501,7 @@ static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
state->resend = 0;
arch_spin_unlock(&ics->lock);
local_irq_restore(flags);
- check_resend = 0;
+ check_resend = false;
goto again;
}
}
@@ -942,8 +942,8 @@ static int xics_debug_show(struct seq_file *m, void *private)
struct kvmppc_xics *xics = m->private;
struct kvm *kvm = xics->kvm;
struct kvm_vcpu *vcpu;
- int icsid, i;
- unsigned long flags;
+ int icsid;
+ unsigned long flags, i;
unsigned long t_rm_kick_vcpu, t_rm_check_resend;
unsigned long t_rm_notify_eoi;
unsigned long t_reject, t_check_resend;
@@ -1016,19 +1016,10 @@ DEFINE_SHOW_ATTRIBUTE(xics_debug);
static void xics_debugfs_init(struct kvmppc_xics *xics)
{
- char *name;
-
- name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
- if (!name) {
- pr_err("%s: no memory for name\n", __func__);
- return;
- }
-
- xics->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
+ xics->dentry = debugfs_create_file("xics", 0444, xics->kvm->debugfs_dentry,
xics, &xics_debug_fops);
- pr_debug("%s: created %s\n", __func__, name);
- kfree(name);
+ pr_debug("%s: created\n", __func__);
}
static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
@@ -1334,47 +1325,97 @@ static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
return -ENXIO;
}
-static void kvmppc_xics_free(struct kvm_device *dev)
+/*
+ * Called when device fd is closed. kvm->lock is held.
+ */
+static void kvmppc_xics_release(struct kvm_device *dev)
{
struct kvmppc_xics *xics = dev->private;
- int i;
+ unsigned long i;
struct kvm *kvm = xics->kvm;
+ struct kvm_vcpu *vcpu;
+
+ pr_devel("Releasing xics device\n");
+
+ /*
+ * Since this is the device release function, we know that
+ * userspace does not have any open fd referring to the
+ * device. Therefore there can not be any of the device
+ * attribute set/get functions being executed concurrently,
+ * and similarly, the connect_vcpu and set/clr_mapped
+ * functions also cannot be being executed.
+ */
debugfs_remove(xics->dentry);
+ /*
+ * We should clean up the vCPU interrupt presenters first.
+ */
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ /*
+ * Take vcpu->mutex to ensure that no one_reg get/set ioctl
+ * (i.e. kvmppc_xics_[gs]et_icp) can be done concurrently.
+ * Holding the vcpu->mutex also means that execution is
+ * excluded for the vcpu until the ICP was freed. When the vcpu
+ * can execute again, vcpu->arch.icp and vcpu->arch.irq_type
+ * have been cleared and the vcpu will not be going into the
+ * XICS code anymore.
+ */
+ mutex_lock(&vcpu->mutex);
+ kvmppc_xics_free_icp(vcpu);
+ mutex_unlock(&vcpu->mutex);
+ }
+
if (kvm)
kvm->arch.xics = NULL;
- for (i = 0; i <= xics->max_icsid; i++)
+ for (i = 0; i <= xics->max_icsid; i++) {
kfree(xics->ics[i]);
- kfree(xics);
+ xics->ics[i] = NULL;
+ }
+ /*
+ * A reference of the kvmppc_xics pointer is now kept under
+ * the xics_device pointer of the machine for reuse. It is
+ * freed when the VM is destroyed for now until we fix all the
+ * execution paths.
+ */
kfree(dev);
}
+static struct kvmppc_xics *kvmppc_xics_get_device(struct kvm *kvm)
+{
+ struct kvmppc_xics **kvm_xics_device = &kvm->arch.xics_device;
+ struct kvmppc_xics *xics = *kvm_xics_device;
+
+ if (!xics) {
+ xics = kzalloc(sizeof(*xics), GFP_KERNEL);
+ *kvm_xics_device = xics;
+ } else {
+ memset(xics, 0, sizeof(*xics));
+ }
+
+ return xics;
+}
+
static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
{
struct kvmppc_xics *xics;
struct kvm *kvm = dev->kvm;
- int ret = 0;
- xics = kzalloc(sizeof(*xics), GFP_KERNEL);
+ pr_devel("Creating xics for partition\n");
+
+ /* Already there ? */
+ if (kvm->arch.xics)
+ return -EEXIST;
+
+ xics = kvmppc_xics_get_device(kvm);
if (!xics)
return -ENOMEM;
dev->private = xics;
xics->dev = dev;
xics->kvm = kvm;
-
- /* Already there ? */
- if (kvm->arch.xics)
- ret = -EEXIST;
- else
- kvm->arch.xics = xics;
-
- if (ret) {
- kfree(xics);
- return ret;
- }
+ kvm->arch.xics = xics;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
if (cpu_has_feature(CPU_FTR_ARCH_206) &&
@@ -1390,7 +1431,7 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
static void kvmppc_xics_init(struct kvm_device *dev)
{
- struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
+ struct kvmppc_xics *xics = dev->private;
xics_debugfs_init(xics);
}
@@ -1399,7 +1440,7 @@ struct kvm_device_ops kvm_xics_ops = {
.name = "kvm-xics",
.create = kvmppc_xics_create,
.init = kvmppc_xics_init,
- .destroy = kvmppc_xics_free,
+ .release = kvmppc_xics_release,
.set_attr = xics_set_attr,
.get_attr = xics_get_attr,
.has_attr = xics_has_attr,
@@ -1415,7 +1456,7 @@ int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
return -EPERM;
if (xics->kvm != vcpu->kvm)
return -EPERM;
- if (vcpu->arch.irq_type)
+ if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
return -EBUSY;
r = kvmppc_xics_create_icp(vcpu, xcpu);
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index 6231f76bdd66..08fb0843faf5 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -116,7 +116,7 @@ static inline struct kvmppc_icp *kvmppc_xics_find_server(struct kvm *kvm,
u32 nr)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num)
@@ -143,6 +143,7 @@ static inline struct kvmppc_ics *kvmppc_xics_find_ics(struct kvmppc_xics *xics,
}
extern unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu);
+extern unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu);
extern int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
extern int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 85215e79db42..4ca23644f752 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -14,6 +14,7 @@
#include <linux/percpu.h>
#include <linux/cpumask.h>
#include <linux/uaccess.h>
+#include <linux/irqdomain.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
#include <asm/hvcall.h>
@@ -21,7 +22,6 @@
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/debug.h>
-#include <asm/debugfs.h>
#include <asm/time.h>
#include <asm/opal.h>
@@ -30,27 +30,629 @@
#include "book3s_xive.h"
-
-/*
- * Virtual mode variants of the hcalls for use on radix/radix
- * with AIL. They require the VCPU's VP to be "pushed"
- *
- * We still instantiate them here because we use some of the
- * generated utility functions as well in this file.
- */
-#define XIVE_RUNTIME_CHECKS
-#define X_PFX xive_vm_
-#define X_STATIC static
-#define X_STAT_PFX stat_vm_
-#define __x_tima xive_tima
#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
-#define __x_writeb __raw_writeb
-#define __x_readw __raw_readw
-#define __x_readq __raw_readq
-#define __x_writeq __raw_writeq
-#include "book3s_xive_template.c"
+/* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
+#define XICS_DUMMY 1
+
+static void xive_vm_ack_pending(struct kvmppc_xive_vcpu *xc)
+{
+ u8 cppr;
+ u16 ack;
+
+ /*
+ * Ensure any previous store to CPPR is ordered vs.
+ * the subsequent loads from PIPR or ACK.
+ */
+ eieio();
+
+ /* Perform the acknowledge OS to register cycle. */
+ ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_OS_REG));
+
+ /* Synchronize subsequent queue accesses */
+ mb();
+
+ /* XXX Check grouping level */
+
+ /* Anything ? */
+ if (!((ack >> 8) & TM_QW1_NSR_EO))
+ return;
+
+ /* Grab CPPR of the most favored pending interrupt */
+ cppr = ack & 0xff;
+ if (cppr < 8)
+ xc->pending |= 1 << cppr;
+
+ /* Check consistency */
+ if (cppr >= xc->hw_cppr)
+ pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
+ smp_processor_id(), cppr, xc->hw_cppr);
+
+ /*
+ * Update our image of the HW CPPR. We don't yet modify
+ * xc->cppr, this will be done as we scan for interrupts
+ * in the queues.
+ */
+ xc->hw_cppr = cppr;
+}
+
+static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
+{
+ u64 val;
+
+ if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ offset |= XIVE_ESB_LD_ST_MO;
+
+ val = __raw_readq(__x_eoi_page(xd) + offset);
+#ifdef __LITTLE_ENDIAN__
+ val >>= 64-8;
+#endif
+ return (u8)val;
+}
+
+
+static void xive_vm_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
+{
+ /* If the XIVE supports the new "store EOI facility, use it */
+ if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ __raw_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
+ else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
+ /*
+ * For LSIs the HW EOI cycle is used rather than PQ bits,
+ * as they are automatically re-triggred in HW when still
+ * pending.
+ */
+ __raw_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
+ } else {
+ uint64_t eoi_val;
+
+ /*
+ * Otherwise for EOI, we use the special MMIO that does
+ * a clear of both P and Q and returns the old Q,
+ * except for LSIs where we use the "EOI cycle" special
+ * load.
+ *
+ * This allows us to then do a re-trigger if Q was set
+ * rather than synthetizing an interrupt in software
+ */
+ eoi_val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_00);
+
+ /* Re-trigger if needed */
+ if ((eoi_val & 1) && __x_trig_page(xd))
+ __raw_writeq(0, __x_trig_page(xd));
+ }
+}
+
+enum {
+ scan_fetch,
+ scan_poll,
+ scan_eoi,
+};
+
+static u32 xive_vm_scan_interrupts(struct kvmppc_xive_vcpu *xc,
+ u8 pending, int scan_type)
+{
+ u32 hirq = 0;
+ u8 prio = 0xff;
+
+ /* Find highest pending priority */
+ while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
+ struct xive_q *q;
+ u32 idx, toggle;
+ __be32 *qpage;
+
+ /*
+ * If pending is 0 this will return 0xff which is what
+ * we want
+ */
+ prio = ffs(pending) - 1;
+
+ /* Don't scan past the guest cppr */
+ if (prio >= xc->cppr || prio > 7) {
+ if (xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ }
+ break;
+ }
+
+ /* Grab queue and pointers */
+ q = &xc->queues[prio];
+ idx = q->idx;
+ toggle = q->toggle;
+
+ /*
+ * Snapshot the queue page. The test further down for EOI
+ * must use the same "copy" that was used by __xive_read_eq
+ * since qpage can be set concurrently and we don't want
+ * to miss an EOI.
+ */
+ qpage = READ_ONCE(q->qpage);
+
+skip_ipi:
+ /*
+ * Try to fetch from the queue. Will return 0 for a
+ * non-queueing priority (ie, qpage = 0).
+ */
+ hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
+
+ /*
+ * If this was a signal for an MFFR change done by
+ * H_IPI we skip it. Additionally, if we were fetching
+ * we EOI it now, thus re-enabling reception of a new
+ * such signal.
+ *
+ * We also need to do that if prio is 0 and we had no
+ * page for the queue. In this case, we have non-queued
+ * IPI that needs to be EOId.
+ *
+ * This is safe because if we have another pending MFRR
+ * change that wasn't observed above, the Q bit will have
+ * been set and another occurrence of the IPI will trigger.
+ */
+ if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
+ if (scan_type == scan_fetch) {
+ xive_vm_source_eoi(xc->vp_ipi,
+ &xc->vp_ipi_data);
+ q->idx = idx;
+ q->toggle = toggle;
+ }
+ /* Loop back on same queue with updated idx/toggle */
+ WARN_ON(hirq && hirq != XICS_IPI);
+ if (hirq)
+ goto skip_ipi;
+ }
+
+ /* If it's the dummy interrupt, continue searching */
+ if (hirq == XICS_DUMMY)
+ goto skip_ipi;
+
+ /* Clear the pending bit if the queue is now empty */
+ if (!hirq) {
+ pending &= ~(1 << prio);
+
+ /*
+ * Check if the queue count needs adjusting due to
+ * interrupts being moved away.
+ */
+ if (atomic_read(&q->pending_count)) {
+ int p = atomic_xchg(&q->pending_count, 0);
+
+ if (p) {
+ WARN_ON(p > atomic_read(&q->count));
+ atomic_sub(p, &q->count);
+ }
+ }
+ }
+
+ /*
+ * If the most favoured prio we found pending is less
+ * favored (or equal) than a pending IPI, we return
+ * the IPI instead.
+ */
+ if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
+ prio = xc->mfrr;
+ hirq = XICS_IPI;
+ break;
+ }
+
+ /* If fetching, update queue pointers */
+ if (scan_type == scan_fetch) {
+ q->idx = idx;
+ q->toggle = toggle;
+ }
+ }
+
+ /* If we are just taking a "peek", do nothing else */
+ if (scan_type == scan_poll)
+ return hirq;
+
+ /* Update the pending bits */
+ xc->pending = pending;
+
+ /*
+ * If this is an EOI that's it, no CPPR adjustment done here,
+ * all we needed was cleanup the stale pending bits and check
+ * if there's anything left.
+ */
+ if (scan_type == scan_eoi)
+ return hirq;
+
+ /*
+ * If we found an interrupt, adjust what the guest CPPR should
+ * be as if we had just fetched that interrupt from HW.
+ *
+ * Note: This can only make xc->cppr smaller as the previous
+ * loop will only exit with hirq != 0 if prio is lower than
+ * the current xc->cppr. Thus we don't need to re-check xc->mfrr
+ * for pending IPIs.
+ */
+ if (hirq)
+ xc->cppr = prio;
+ /*
+ * If it was an IPI the HW CPPR might have been lowered too much
+ * as the HW interrupt we use for IPIs is routed to priority 0.
+ *
+ * We re-sync it here.
+ */
+ if (xc->cppr != xc->hw_cppr) {
+ xc->hw_cppr = xc->cppr;
+ __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR);
+ }
+
+ return hirq;
+}
+
+static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 old_cppr;
+ u32 hirq;
+
+ pr_devel("H_XIRR\n");
+
+ xc->stat_vm_h_xirr++;
+
+ /* First collect pending bits from HW */
+ xive_vm_ack_pending(xc);
+
+ pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
+ xc->pending, xc->hw_cppr, xc->cppr);
+
+ /* Grab previous CPPR and reverse map it */
+ old_cppr = xive_prio_to_guest(xc->cppr);
+
+ /* Scan for actual interrupts */
+ hirq = xive_vm_scan_interrupts(xc, xc->pending, scan_fetch);
+
+ pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
+ hirq, xc->hw_cppr, xc->cppr);
+
+ /* That should never hit */
+ if (hirq & 0xff000000)
+ pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
+
+ /*
+ * XXX We could check if the interrupt is masked here and
+ * filter it. If we chose to do so, we would need to do:
+ *
+ * if (masked) {
+ * lock();
+ * if (masked) {
+ * old_Q = true;
+ * hirq = 0;
+ * }
+ * unlock();
+ * }
+ */
+
+ /* Return interrupt and old CPPR in GPR4 */
+ vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
+
+ return H_SUCCESS;
+}
+
+static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ u8 pending = xc->pending;
+ u32 hirq;
+
+ pr_devel("H_IPOLL(server=%ld)\n", server);
+
+ xc->stat_vm_h_ipoll++;
+
+ /* Grab the target VCPU if not the current one */
+ if (xc->server_num != server) {
+ vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
+ if (!vcpu)
+ return H_PARAMETER;
+ xc = vcpu->arch.xive_vcpu;
+
+ /* Scan all priorities */
+ pending = 0xff;
+ } else {
+ /* Grab pending interrupt if any */
+ __be64 qw1 = __raw_readq(xive_tima + TM_QW1_OS);
+ u8 pipr = be64_to_cpu(qw1) & 0xff;
+
+ if (pipr < 8)
+ pending |= 1 << pipr;
+ }
+
+ hirq = xive_vm_scan_interrupts(xc, pending, scan_poll);
+
+ /* Return interrupt and old CPPR in GPR4 */
+ vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
+
+ return H_SUCCESS;
+}
+
+static void xive_vm_push_pending_to_hw(struct kvmppc_xive_vcpu *xc)
+{
+ u8 pending, prio;
+
+ pending = xc->pending;
+ if (xc->mfrr != 0xff) {
+ if (xc->mfrr < 8)
+ pending |= 1 << xc->mfrr;
+ else
+ pending |= 0x80;
+ }
+ if (!pending)
+ return;
+ prio = ffs(pending) - 1;
+
+ __raw_writeb(prio, xive_tima + TM_SPC_SET_OS_PENDING);
+}
+
+static void xive_vm_scan_for_rerouted_irqs(struct kvmppc_xive *xive,
+ struct kvmppc_xive_vcpu *xc)
+{
+ unsigned int prio;
+
+ /* For each priority that is now masked */
+ for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
+ struct xive_q *q = &xc->queues[prio];
+ struct kvmppc_xive_irq_state *state;
+ struct kvmppc_xive_src_block *sb;
+ u32 idx, toggle, entry, irq, hw_num;
+ struct xive_irq_data *xd;
+ __be32 *qpage;
+ u16 src;
+
+ idx = q->idx;
+ toggle = q->toggle;
+ qpage = READ_ONCE(q->qpage);
+ if (!qpage)
+ continue;
+
+ /* For each interrupt in the queue */
+ for (;;) {
+ entry = be32_to_cpup(qpage + idx);
+
+ /* No more ? */
+ if ((entry >> 31) == toggle)
+ break;
+ irq = entry & 0x7fffffff;
+
+ /* Skip dummies and IPIs */
+ if (irq == XICS_DUMMY || irq == XICS_IPI)
+ goto next;
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb)
+ goto next;
+ state = &sb->irq_state[src];
+
+ /* Has it been rerouted ? */
+ if (xc->server_num == state->act_server)
+ goto next;
+
+ /*
+ * Allright, it *has* been re-routed, kill it from
+ * the queue.
+ */
+ qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
+
+ /* Find the HW interrupt */
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
+ if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
+ xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
+
+ /* EOI the source */
+ xive_vm_source_eoi(hw_num, xd);
+
+next:
+ idx = (idx + 1) & q->msk;
+ if (idx == 0)
+ toggle ^= 1;
+ }
+ }
+}
+
+static int xive_vm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+ u8 old_cppr;
+
+ pr_devel("H_CPPR(cppr=%ld)\n", cppr);
+
+ xc->stat_vm_h_cppr++;
+
+ /* Map CPPR */
+ cppr = xive_prio_from_guest(cppr);
+
+ /* Remember old and update SW state */
+ old_cppr = xc->cppr;
+ xc->cppr = cppr;
+
+ /*
+ * Order the above update of xc->cppr with the subsequent
+ * read of xc->mfrr inside push_pending_to_hw()
+ */
+ smp_mb();
+
+ if (cppr > old_cppr) {
+ /*
+ * We are masking less, we need to look for pending things
+ * to deliver and set VP pending bits accordingly to trigger
+ * a new interrupt otherwise we might miss MFRR changes for
+ * which we have optimized out sending an IPI signal.
+ */
+ xive_vm_push_pending_to_hw(xc);
+ } else {
+ /*
+ * We are masking more, we need to check the queue for any
+ * interrupt that has been routed to another CPU, take
+ * it out (replace it with the dummy) and retrigger it.
+ *
+ * This is necessary since those interrupts may otherwise
+ * never be processed, at least not until this CPU restores
+ * its CPPR.
+ *
+ * This is in theory racy vs. HW adding new interrupts to
+ * the queue. In practice this works because the interesting
+ * cases are when the guest has done a set_xive() to move the
+ * interrupt away, which flushes the xive, followed by the
+ * target CPU doing a H_CPPR. So any new interrupt coming into
+ * the queue must still be routed to us and isn't a source
+ * of concern.
+ */
+ xive_vm_scan_for_rerouted_irqs(xive, xc);
+ }
+
+ /* Apply new CPPR */
+ xc->hw_cppr = cppr;
+ __raw_writeb(cppr, xive_tima + TM_QW1_OS + TM_CPPR);
+
+ return H_SUCCESS;
+}
+
+static int xive_vm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
+{
+ struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
+ struct kvmppc_xive_src_block *sb;
+ struct kvmppc_xive_irq_state *state;
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct xive_irq_data *xd;
+ u8 new_cppr = xirr >> 24;
+ u32 irq = xirr & 0x00ffffff, hw_num;
+ u16 src;
+ int rc = 0;
+
+ pr_devel("H_EOI(xirr=%08lx)\n", xirr);
+
+ xc->stat_vm_h_eoi++;
+
+ xc->cppr = xive_prio_from_guest(new_cppr);
+
+ /*
+ * IPIs are synthetized from MFRR and thus don't need
+ * any special EOI handling. The underlying interrupt
+ * used to signal MFRR changes is EOId when fetched from
+ * the queue.
+ */
+ if (irq == XICS_IPI || irq == 0) {
+ /*
+ * This barrier orders the setting of xc->cppr vs.
+ * subsquent test of xc->mfrr done inside
+ * scan_interrupts and push_pending_to_hw
+ */
+ smp_mb();
+ goto bail;
+ }
+
+ /* Find interrupt source */
+ sb = kvmppc_xive_find_source(xive, irq, &src);
+ if (!sb) {
+ pr_devel(" source not found !\n");
+ rc = H_PARAMETER;
+ /* Same as above */
+ smp_mb();
+ goto bail;
+ }
+ state = &sb->irq_state[src];
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ state->in_eoi = true;
+
+ /*
+ * This barrier orders both setting of in_eoi above vs,
+ * subsequent test of guest_priority, and the setting
+ * of xc->cppr vs. subsquent test of xc->mfrr done inside
+ * scan_interrupts and push_pending_to_hw
+ */
+ smp_mb();
+
+again:
+ if (state->guest_priority == MASKED) {
+ arch_spin_lock(&sb->lock);
+ if (state->guest_priority != MASKED) {
+ arch_spin_unlock(&sb->lock);
+ goto again;
+ }
+ pr_devel(" EOI on saved P...\n");
+
+ /* Clear old_p, that will cause unmask to perform an EOI */
+ state->old_p = false;
+
+ arch_spin_unlock(&sb->lock);
+ } else {
+ pr_devel(" EOI on source...\n");
+
+ /* Perform EOI on the source */
+ xive_vm_source_eoi(hw_num, xd);
+
+ /* If it's an emulated LSI, check level and resend */
+ if (state->lsi && state->asserted)
+ __raw_writeq(0, __x_trig_page(xd));
+
+ }
+
+ /*
+ * This barrier orders the above guest_priority check
+ * and spin_lock/unlock with clearing in_eoi below.
+ *
+ * It also has to be a full mb() as it must ensure
+ * the MMIOs done in source_eoi() are completed before
+ * state->in_eoi is visible.
+ */
+ mb();
+ state->in_eoi = false;
+bail:
+
+ /* Re-evaluate pending IRQs and update HW */
+ xive_vm_scan_interrupts(xc, xc->pending, scan_eoi);
+ xive_vm_push_pending_to_hw(xc);
+ pr_devel(" after scan pending=%02x\n", xc->pending);
+
+ /* Apply new CPPR */
+ xc->hw_cppr = xc->cppr;
+ __raw_writeb(xc->cppr, xive_tima + TM_QW1_OS + TM_CPPR);
+
+ return rc;
+}
+
+static int xive_vm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
+ unsigned long mfrr)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
+
+ xc->stat_vm_h_ipi++;
+
+ /* Find target */
+ vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
+ if (!vcpu)
+ return H_PARAMETER;
+ xc = vcpu->arch.xive_vcpu;
+
+ /* Locklessly write over MFRR */
+ xc->mfrr = mfrr;
+
+ /*
+ * The load of xc->cppr below and the subsequent MMIO store
+ * to the IPI must happen after the above mfrr update is
+ * globally visible so that:
+ *
+ * - Synchronize with another CPU doing an H_EOI or a H_CPPR
+ * updating xc->cppr then reading xc->mfrr.
+ *
+ * - The target of the IPI sees the xc->mfrr update
+ */
+ mb();
+
+ /* Shoot the IPI if most favored than target cppr */
+ if (mfrr < xc->cppr)
+ __raw_writeq(0, __x_trig_page(&xc->vp_ipi_data));
+
+ return H_SUCCESS;
+}
/*
* We leave a gap of a couple of interrupts in the queue to
@@ -58,6 +660,25 @@
*/
#define XIVE_Q_GAP 2
+static bool kvmppc_xive_vcpu_has_save_restore(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+
+ /* Check enablement at VP level */
+ return xc->vp_cam & TM_QW1W2_HO;
+}
+
+bool kvmppc_xive_check_save_restore(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+ struct kvmppc_xive *xive = xc->xive;
+
+ if (xive->flags & KVMPPC_XIVE_FLAG_SAVE_RESTORE)
+ return kvmppc_xive_vcpu_has_save_restore(vcpu);
+
+ return true;
+}
+
/*
* Push a vcpu's context to the XIVE on guest entry.
* This assumes we are in virtual mode (MMU on)
@@ -76,7 +697,8 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
return;
eieio();
- __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
+ if (!kvmppc_xive_vcpu_has_save_restore(vcpu))
+ __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
__raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
vcpu->arch.xive_pushed = 1;
eieio();
@@ -104,7 +726,7 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
* interrupt might have fired and be on its way to the
* host queue while we mask it, and if we unmask it
* early enough (re-cede right away), there is a
- * theorical possibility that it fires again, thus
+ * theoretical possibility that it fires again, thus
* landing in the target queue more than once which is
* a big no-no.
*
@@ -128,6 +750,75 @@ void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
/*
+ * Pull a vcpu's context from the XIVE on guest exit.
+ * This assumes we are in virtual mode (MMU on)
+ */
+void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu)
+{
+ void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
+
+ if (!vcpu->arch.xive_pushed)
+ return;
+
+ /*
+ * Should not have been pushed if there is no tima
+ */
+ if (WARN_ON(!tima))
+ return;
+
+ eieio();
+ /* First load to pull the context, we ignore the value */
+ __raw_readl(tima + TM_SPC_PULL_OS_CTX);
+ /* Second load to recover the context state (Words 0 and 1) */
+ if (!kvmppc_xive_vcpu_has_save_restore(vcpu))
+ vcpu->arch.xive_saved_state.w01 = __raw_readq(tima + TM_QW1_OS);
+
+ /* Fixup some of the state for the next load */
+ vcpu->arch.xive_saved_state.lsmfb = 0;
+ vcpu->arch.xive_saved_state.ack = 0xff;
+ vcpu->arch.xive_pushed = 0;
+ eieio();
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu);
+
+bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu)
+{
+ void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr;
+ bool ret = true;
+
+ if (!esc_vaddr)
+ return ret;
+
+ /* we are using XIVE with single escalation */
+
+ if (vcpu->arch.xive_esc_on) {
+ /*
+ * If we still have a pending escalation, abort the cede,
+ * and we must set PQ to 10 rather than 00 so that we don't
+ * potentially end up with two entries for the escalation
+ * interrupt in the XIVE interrupt queue. In that case
+ * we also don't want to set xive_esc_on to 1 here in
+ * case we race with xive_esc_irq().
+ */
+ ret = false;
+ /*
+ * The escalation interrupts are special as we don't EOI them.
+ * There is no need to use the load-after-store ordering offset
+ * to set PQ to 10 as we won't use StoreEOI.
+ */
+ __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_10);
+ } else {
+ vcpu->arch.xive_esc_on = true;
+ mb();
+ __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00);
+ }
+ mb();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation);
+
+/*
* This is a simple trigger for a generic XIVE IRQ. This must
* only be called for interrupts that support a trigger page
*/
@@ -152,7 +843,7 @@ static irqreturn_t xive_esc_irq(int irq, void *data)
vcpu->arch.irq_pending = 1;
smp_mb();
- if (vcpu->arch.ceded)
+ if (vcpu->arch.ceded || vcpu->arch.nested)
kvmppc_fast_vcpu_kick(vcpu);
/* Since we have the no-EOI flag, the interrupt is effectively
@@ -219,7 +910,7 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
/* In single escalation mode, we grab the ESB MMIO of the
* interrupt and mask it. Also populate the VCPU v/raddr
* of the ESB page for use by asm entry/exit code. Finally
- * set the XIVE_IRQ_NO_EOI flag which will prevent the
+ * set the XIVE_IRQ_FLAG_NO_EOI flag which will prevent the
* core code from performing an EOI on the escalation
* interrupt, thus leaving it effectively masked after
* it fires once.
@@ -231,7 +922,7 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
vcpu->arch.xive_esc_raddr = xd->eoi_page;
vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
- xd->flags |= XIVE_IRQ_NO_EOI;
+ xd->flags |= XIVE_IRQ_FLAG_NO_EOI;
}
return 0;
@@ -282,7 +973,8 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio)
{
struct kvmppc_xive *xive = kvm->arch.xive;
struct kvm_vcpu *vcpu;
- int i, rc;
+ unsigned long i;
+ int rc;
lockdep_assert_held(&xive->lock);
@@ -297,9 +989,9 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio)
if (!vcpu->arch.xive_vcpu)
continue;
rc = xive_provision_queue(vcpu, prio);
- if (rc == 0 && !xive->single_escalation)
+ if (rc == 0 && !kvmppc_xive_has_single_escalation(xive))
kvmppc_xive_attach_escalation(vcpu, prio,
- xive->single_escalation);
+ kvmppc_xive_has_single_escalation(xive));
if (rc)
return rc;
}
@@ -353,7 +1045,8 @@ static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
{
struct kvm_vcpu *vcpu;
- int i, rc;
+ unsigned long i;
+ int rc;
/* Locate target server */
vcpu = kvmppc_xive_find_server(kvm, *server);
@@ -419,37 +1112,16 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
/* Get the right irq */
kvmppc_xive_select_irq(state, &hw_num, &xd);
- /*
- * If the interrupt is marked as needing masking via
- * firmware, we do it here. Firmware masking however
- * is "lossy", it won't return the old p and q bits
- * and won't set the interrupt to a state where it will
- * record queued ones. If this is an issue we should do
- * lazy masking instead.
- *
- * For now, we work around this in unmask by forcing
- * an interrupt whenever we unmask a non-LSI via FW
- * (if ever).
- */
- if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
- xive_native_configure_irq(hw_num,
- kvmppc_xive_vp(xive, state->act_server),
- MASKED, state->number);
- /* set old_p so we can track if an H_EOI was done */
- state->old_p = true;
- state->old_q = false;
- } else {
- /* Set PQ to 10, return old P and old Q and remember them */
- val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
- state->old_p = !!(val & 2);
- state->old_q = !!(val & 1);
+ /* Set PQ to 10, return old P and old Q and remember them */
+ val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
+ state->old_p = !!(val & 2);
+ state->old_q = !!(val & 1);
- /*
- * Synchronize hardware to sensure the queues are updated
- * when masking
- */
- xive_native_sync_source(hw_num);
- }
+ /*
+ * Synchronize hardware to sensure the queues are updated when
+ * masking
+ */
+ xive_native_sync_source(hw_num);
return old_prio;
}
@@ -483,23 +1155,6 @@ static void xive_finish_unmask(struct kvmppc_xive *xive,
/* Get the right irq */
kvmppc_xive_select_irq(state, &hw_num, &xd);
- /*
- * See comment in xive_lock_and_mask() concerning masking
- * via firmware.
- */
- if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
- xive_native_configure_irq(hw_num,
- kvmppc_xive_vp(xive, state->act_server),
- state->act_priority, state->number);
- /* If an EOI is needed, do it here */
- if (!state->old_p)
- xive_vm_source_eoi(hw_num, xd);
- /* If this is not an LSI, force a trigger */
- if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
- xive_irq_trigger(xd);
- goto bail;
- }
-
/* Old Q set, set PQ to 11 */
if (state->old_q)
xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
@@ -572,7 +1227,7 @@ static int xive_target_interrupt(struct kvm *kvm,
/*
* Targetting rules: In order to avoid losing track of
- * pending interrupts accross mask and unmask, which would
+ * pending interrupts across mask and unmask, which would
* allow queue overflows, we implement the following rules:
*
* - Unless it was never enabled (or we run out of capacity)
@@ -894,13 +1549,13 @@ int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
}
int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
- struct irq_desc *host_desc)
+ unsigned long host_irq)
{
struct kvmppc_xive *xive = kvm->arch.xive;
struct kvmppc_xive_src_block *sb;
struct kvmppc_xive_irq_state *state;
- struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
- unsigned int host_irq = irq_desc_get_irq(host_desc);
+ struct irq_data *host_data =
+ irq_domain_get_irq_data(irq_get_default_host(), host_irq);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
u16 idx;
u8 prio;
@@ -909,7 +1564,8 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
if (!xive)
return -ENODEV;
- pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
+ pr_debug("%s: GIRQ 0x%lx host IRQ %ld XIVE HW IRQ 0x%x\n",
+ __func__, guest_irq, host_irq, hw_irq);
sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
if (!sb)
@@ -931,7 +1587,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
*/
rc = irq_set_vcpu_affinity(host_irq, state);
if (rc) {
- pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
+ pr_err("Failed to set VCPU affinity for host IRQ %ld\n", host_irq);
return rc;
}
@@ -991,12 +1647,11 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
- struct irq_desc *host_desc)
+ unsigned long host_irq)
{
struct kvmppc_xive *xive = kvm->arch.xive;
struct kvmppc_xive_src_block *sb;
struct kvmppc_xive_irq_state *state;
- unsigned int host_irq = irq_desc_get_irq(host_desc);
u16 idx;
u8 prio;
int rc;
@@ -1004,7 +1659,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
if (!xive)
return -ENODEV;
- pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
+ pr_debug("%s: GIRQ 0x%lx host IRQ %ld\n", __func__, guest_irq, host_irq);
sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
if (!sb)
@@ -1023,7 +1678,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
/*
* If old_p is set, the interrupt is pending, we switch it to
* PQ=11. This will force a resend in the host so the interrupt
- * isn't lost to whatver host driver may pick it up
+ * isn't lost to whatever host driver may pick it up
*/
if (state->old_p)
xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
@@ -1031,7 +1686,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
/* Release the passed-through interrupt to the host */
rc = irq_set_vcpu_affinity(host_irq, NULL);
if (rc) {
- pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
+ pr_err("Failed to clr VCPU affinity for host IRQ %ld\n", host_irq);
return rc;
}
@@ -1171,7 +1826,7 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
/* Free escalations */
for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
if (xc->esc_virq[i]) {
- if (xc->xive->single_escalation)
+ if (kvmppc_xive_has_single_escalation(xc->xive))
xive_cleanup_single_escalation(vcpu, xc,
xc->esc_virq[i]);
free_irq(xc->esc_virq[i], vcpu);
@@ -1214,12 +1869,9 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
{
/* We have a block of xive->nr_servers VPs. We just need to check
- * raw vCPU ids are below the expected limit for this guest's
- * core stride ; kvmppc_pack_vcpu_id() will pack them down to an
- * index that can be safely used to compute a VP id that belongs
- * to the VP block.
+ * packed vCPU ids are below that.
*/
- return cpu < xive->nr_servers * xive->kvm->arch.emul_smt_mode;
+ return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
}
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
@@ -1294,6 +1946,12 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
if (r)
goto bail;
+ if (!kvmppc_xive_check_save_restore(vcpu)) {
+ pr_err("inconsistent save-restore setup for VCPU %d\n", cpu);
+ r = -EIO;
+ goto bail;
+ }
+
/* Configure VCPU fields for use by assembly push/pull */
vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
@@ -1315,7 +1973,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
* Enable the VP first as the single escalation mode will
* affect escalation interrupts numbering
*/
- r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
+ r = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
if (r) {
pr_err("Failed to enable VP in OPAL, err %d\n", r);
goto bail;
@@ -1332,15 +1990,15 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
struct xive_q *q = &xc->queues[i];
/* Single escalation, no queue 7 */
- if (i == 7 && xive->single_escalation)
+ if (i == 7 && kvmppc_xive_has_single_escalation(xive))
break;
/* Is queue already enabled ? Provision it */
if (xive->qmap & (1 << i)) {
r = xive_provision_queue(vcpu, i);
- if (r == 0 && !xive->single_escalation)
+ if (r == 0 && !kvmppc_xive_has_single_escalation(xive))
kvmppc_xive_attach_escalation(
- vcpu, i, xive->single_escalation);
+ vcpu, i, kvmppc_xive_has_single_escalation(xive));
if (r)
goto bail;
} else {
@@ -1355,7 +2013,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
}
/* If not done above, attach priority 0 escalation */
- r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
+ r = kvmppc_xive_attach_escalation(vcpu, 0, kvmppc_xive_has_single_escalation(xive));
if (r)
goto bail;
@@ -1468,7 +2126,8 @@ static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
static void xive_pre_save_scan(struct kvmppc_xive *xive)
{
struct kvm_vcpu *vcpu = NULL;
- int i, j;
+ unsigned long i;
+ int j;
/*
* See comment in xive_get_source() about how this
@@ -1649,7 +2308,7 @@ static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
{
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@@ -1854,9 +2513,9 @@ int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
return -EINVAL;
if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
- state->asserted = 1;
+ state->asserted = true;
else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
- state->asserted = 0;
+ state->asserted = false;
return 0;
}
@@ -1877,7 +2536,7 @@ int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
- if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID)
+ if (!nr_servers || nr_servers > KVM_MAX_VCPU_IDS)
return -EINVAL;
mutex_lock(&xive->lock);
@@ -1986,7 +2645,7 @@ static void kvmppc_xive_release(struct kvm_device *dev)
struct kvmppc_xive *xive = dev->private;
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
pr_devel("Releasing xive device\n");
@@ -2110,12 +2769,46 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
*/
xive->nr_servers = KVM_MAX_VCPUS;
- xive->single_escalation = xive_native_has_single_escalation();
+ if (xive_native_has_single_escalation())
+ xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
+
+ if (xive_native_has_save_restore())
+ xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE;
kvm->arch.xive = xive;
return 0;
}
+int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ /* The VM should have configured XICS mode before doing XICS hcalls. */
+ if (!kvmppc_xics_enabled(vcpu))
+ return H_TOO_HARD;
+
+ switch (req) {
+ case H_XIRR:
+ return xive_vm_h_xirr(vcpu);
+ case H_CPPR:
+ return xive_vm_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_EOI:
+ return xive_vm_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_IPI:
+ return xive_vm_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ case H_IPOLL:
+ return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
+ case H_XIRR_X:
+ xive_vm_h_xirr(vcpu);
+ kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
+ return H_SUCCESS;
+ }
+
+ return H_UNSUPPORTED;
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_xics_hcall);
+
int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
{
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@@ -2128,9 +2821,8 @@ int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
if (!q->qpage && !xc->esc_virq[i])
continue;
- seq_printf(m, " [q%d]: ", i);
-
if (q->qpage) {
+ seq_printf(m, " q[%d]: ", i);
idx = q->idx;
i0 = be32_to_cpup(q->qpage + idx);
idx = (idx + 1) & q->msk;
@@ -2144,16 +2836,54 @@ int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
irq_data_get_irq_handler_data(d);
u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
- seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
- (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
- (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
- xc->esc_virq[i], pq, xd->eoi_page);
+ seq_printf(m, " ESC %d %c%c EOI @%llx",
+ xc->esc_virq[i],
+ (pq & XIVE_ESB_VAL_P) ? 'P' : '-',
+ (pq & XIVE_ESB_VAL_Q) ? 'Q' : '-',
+ xd->eoi_page);
seq_puts(m, "\n");
}
}
return 0;
}
+void kvmppc_xive_debug_show_sources(struct seq_file *m,
+ struct kvmppc_xive_src_block *sb)
+{
+ int i;
+
+ seq_puts(m, " LISN HW/CHIP TYPE PQ EISN CPU/PRIO\n");
+ for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
+ struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
+ struct xive_irq_data *xd;
+ u64 pq;
+ u32 hw_num;
+
+ if (!state->valid)
+ continue;
+
+ kvmppc_xive_select_irq(state, &hw_num, &xd);
+
+ pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
+
+ seq_printf(m, "%08x %08x/%02x", state->number, hw_num,
+ xd->src_chip);
+ if (state->lsi)
+ seq_printf(m, " %cLSI", state->asserted ? '^' : ' ');
+ else
+ seq_puts(m, " MSI");
+
+ seq_printf(m, " %s %c%c %08x % 4d/%d",
+ state->ipi_number == hw_num ? "IPI" : " PT",
+ pq & XIVE_ESB_VAL_P ? 'P' : '-',
+ pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
+ state->eisn, state->act_server,
+ state->act_priority);
+
+ seq_puts(m, "\n");
+ }
+}
+
static int xive_debug_show(struct seq_file *m, void *private)
{
struct kvmppc_xive *xive = m->private;
@@ -2169,12 +2899,12 @@ static int xive_debug_show(struct seq_file *m, void *private)
u64 t_vm_h_cppr = 0;
u64 t_vm_h_eoi = 0;
u64 t_vm_h_ipi = 0;
- unsigned int i;
+ unsigned long i;
if (!kvm)
return 0;
- seq_printf(m, "=========\nVCPU state\n=========\n");
+ seq_puts(m, "=========\nVCPU state\n=========\n");
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@@ -2182,11 +2912,12 @@ static int xive_debug_show(struct seq_file *m, void *private)
if (!xc)
continue;
- seq_printf(m, "cpu server %#x VP:%#x CPPR:%#x HWCPPR:%#x"
- " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
- xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr,
- xc->mfrr, xc->pending,
- xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
+ seq_printf(m, "VCPU %d: VP:%#x/%02x\n"
+ " CPPR:%#x HWCPPR:%#x MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
+ xc->server_num, xc->vp_id, xc->vp_chip_id,
+ xc->cppr, xc->hw_cppr,
+ xc->mfrr, xc->pending,
+ xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
kvmppc_xive_debug_show_queues(m, vcpu);
@@ -2202,13 +2933,25 @@ static int xive_debug_show(struct seq_file *m, void *private)
t_vm_h_ipi += xc->stat_vm_h_ipi;
}
- seq_printf(m, "Hcalls totals\n");
+ seq_puts(m, "Hcalls totals\n");
seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
+ seq_puts(m, "=========\nSources\n=========\n");
+
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+
+ if (sb) {
+ arch_spin_lock(&sb->lock);
+ kvmppc_xive_debug_show_sources(m, sb);
+ arch_spin_unlock(&sb->lock);
+ }
+ }
+
return 0;
}
@@ -2216,24 +2959,15 @@ DEFINE_SHOW_ATTRIBUTE(xive_debug);
static void xive_debugfs_init(struct kvmppc_xive *xive)
{
- char *name;
-
- name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
- if (!name) {
- pr_err("%s: no memory for name\n", __func__);
- return;
- }
-
- xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
+ xive->dentry = debugfs_create_file("xive", S_IRUGO, xive->kvm->debugfs_dentry,
xive, &xive_debug_fops);
- pr_debug("%s: created %s\n", __func__, name);
- kfree(name);
+ pr_debug("%s: created\n", __func__);
}
static void kvmppc_xive_init(struct kvm_device *dev)
{
- struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
+ struct kvmppc_xive *xive = dev->private;
/* Register some debug interfaces */
xive_debugfs_init(xive);
@@ -2248,21 +2982,3 @@ struct kvm_device_ops kvm_xive_ops = {
.get_attr = xive_get_attr,
.has_attr = xive_has_attr,
};
-
-void kvmppc_xive_init_module(void)
-{
- __xive_vm_h_xirr = xive_vm_h_xirr;
- __xive_vm_h_ipoll = xive_vm_h_ipoll;
- __xive_vm_h_ipi = xive_vm_h_ipi;
- __xive_vm_h_cppr = xive_vm_h_cppr;
- __xive_vm_h_eoi = xive_vm_h_eoi;
-}
-
-void kvmppc_xive_exit_module(void)
-{
- __xive_vm_h_xirr = NULL;
- __xive_vm_h_ipoll = NULL;
- __xive_vm_h_ipi = NULL;
- __xive_vm_h_cppr = NULL;
- __xive_vm_h_eoi = NULL;
-}
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index 382e3a56e789..1e48f72e8aa5 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -97,6 +97,9 @@ struct kvmppc_xive_ops {
int (*reset_mapped)(struct kvm *kvm, unsigned long guest_irq);
};
+#define KVMPPC_XIVE_FLAG_SINGLE_ESCALATION 0x1
+#define KVMPPC_XIVE_FLAG_SAVE_RESTORE 0x2
+
struct kvmppc_xive {
struct kvm *kvm;
struct kvm_device *dev;
@@ -133,7 +136,7 @@ struct kvmppc_xive {
u32 q_page_order;
/* Flags */
- u8 single_escalation;
+ u8 flags;
/* Number of entries in the VP block */
u32 nr_servers;
@@ -196,7 +199,7 @@ struct kvmppc_xive_vcpu {
static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
@@ -218,6 +221,17 @@ static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmpp
return xive->src_blocks[bid];
}
+/*
+ * When the XIVE resources are allocated at the HW level, the VP
+ * structures describing the vCPUs of a guest are distributed among
+ * the chips to optimize the PowerBUS usage. For best performance, the
+ * guest vCPUs can be pinned to match the VP structure distribution.
+ *
+ * Currently, the VP identifiers are deduced from the vCPU id using
+ * the kvmppc_pack_vcpu_id() routine which is not incorrect but not
+ * optimal either. It VSMT is used, the result is not continuous and
+ * the constraints on HW resources described above can not be met.
+ */
static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
{
return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
@@ -226,7 +240,7 @@ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
@@ -271,25 +285,13 @@ static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
return cur & 0x7fffffff;
}
-extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
-extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
-extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
-extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
-
-extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
-extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
-extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr);
-extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
-extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
-
/*
* Common Xive routines for XICS-over-XIVE and XIVE native
*/
void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu);
int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu);
+void kvmppc_xive_debug_show_sources(struct seq_file *m,
+ struct kvmppc_xive_src_block *sb);
struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
struct kvmppc_xive *xive, int irq);
void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb);
@@ -301,6 +303,12 @@ void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
struct kvmppc_xive_vcpu *xc, int irq);
int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp);
int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr);
+bool kvmppc_xive_check_save_restore(struct kvm_vcpu *vcpu);
+
+static inline bool kvmppc_xive_has_single_escalation(struct kvmppc_xive *xive)
+{
+ return xive->flags & KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
+}
#endif /* CONFIG_KVM_XICS */
#endif /* _KVM_PPC_BOOK3S_XICS_H */
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 6ef0151ff70a..5271c33fe79e 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -12,6 +12,7 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/file.h>
+#include <linux/irqdomain.h>
#include <asm/uaccess.h>
#include <asm/kvm_book3s.h>
#include <asm/kvm_ppc.h>
@@ -19,7 +20,6 @@
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/debug.h>
-#include <asm/debugfs.h>
#include <asm/opal.h>
#include <linux/debugfs.h>
@@ -31,8 +31,11 @@ static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
{
u64 val;
- if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
- offset |= offset << 4;
+ /*
+ * The KVM XIVE native device does not use the XIVE_ESB_SET_PQ_10
+ * load operation, so there is no need to enforce load-after-store
+ * ordering.
+ */
val = in_be64(xd->eoi_mmio + offset);
return (u8)val;
@@ -89,7 +92,7 @@ void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
/* Free the escalation irq */
if (xc->esc_virq[i]) {
- if (xc->xive->single_escalation)
+ if (kvmppc_xive_has_single_escalation(xc->xive))
xive_cleanup_single_escalation(vcpu, xc,
xc->esc_virq[i]);
free_irq(xc->esc_virq[i], vcpu);
@@ -164,11 +167,17 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
goto bail;
}
+ if (!kvmppc_xive_check_save_restore(vcpu)) {
+ pr_err("inconsistent save-restore setup for VCPU %d\n", server_num);
+ rc = -EIO;
+ goto bail;
+ }
+
/*
* Enable the VP first as the single escalation mode will
* affect escalation interrupts numbering
*/
- rc = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
+ rc = xive_native_enable_vp(xc->vp_id, kvmppc_xive_has_single_escalation(xive));
if (rc) {
pr_err("Failed to enable VP in OPAL: %d\n", rc);
goto bail;
@@ -200,7 +209,7 @@ static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
/*
* Clear the ESB pages of the IRQ number being mapped (or
- * unmapped) into the guest and let the the VM fault handler
+ * unmapped) into the guest and let the VM fault handler
* repopulate with the appropriate ESB pages (device or IC)
*/
pr_debug("clearing esb pages for girq 0x%lx\n", irq);
@@ -245,6 +254,13 @@ static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
}
state = &sb->irq_state[src];
+
+ /* Some sanity checking */
+ if (!state->valid) {
+ pr_devel("%s: source %lx invalid !\n", __func__, irq);
+ return VM_FAULT_SIGBUS;
+ }
+
kvmppc_xive_select_irq(state, &hw_num, &xd);
arch_spin_lock(&sb->lock);
@@ -682,7 +698,7 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
}
rc = kvmppc_xive_attach_escalation(vcpu, priority,
- xive->single_escalation);
+ kvmppc_xive_has_single_escalation(xive));
error:
if (rc)
kvmppc_xive_native_cleanup_queue(vcpu, priority);
@@ -791,7 +807,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive)
{
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
- unsigned int i;
+ unsigned long i;
pr_devel("%s\n", __func__);
@@ -809,7 +825,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive)
for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
/* Single escalation, no queue 7 */
- if (prio == 7 && xive->single_escalation)
+ if (prio == 7 && kvmppc_xive_has_single_escalation(xive))
break;
if (xc->esc_virq[prio]) {
@@ -900,7 +916,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
{
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
- unsigned int i;
+ unsigned long i;
pr_devel("%s\n", __func__);
@@ -1001,7 +1017,7 @@ static void kvmppc_xive_native_release(struct kvm_device *dev)
struct kvmppc_xive *xive = dev->private;
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
pr_devel("Releasing xive native device\n");
@@ -1100,7 +1116,12 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
*/
xive->nr_servers = KVM_MAX_VCPUS;
- xive->single_escalation = xive_native_has_single_escalation();
+ if (xive_native_has_single_escalation())
+ xive->flags |= KVMPPC_XIVE_FLAG_SINGLE_ESCALATION;
+
+ if (xive_native_has_save_restore())
+ xive->flags |= KVMPPC_XIVE_FLAG_SAVE_RESTORE;
+
xive->ops = &kvmppc_xive_native_ops;
kvm->arch.xive = xive;
@@ -1193,7 +1214,7 @@ static int xive_native_debug_show(struct seq_file *m, void *private)
struct kvmppc_xive *xive = m->private;
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
- unsigned int i;
+ unsigned long i;
if (!kvm)
return 0;
@@ -1206,53 +1227,47 @@ static int xive_native_debug_show(struct seq_file *m, void *private)
if (!xc)
continue;
- seq_printf(m, "cpu server %#x VP=%#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
- xc->server_num, xc->vp_id,
+ seq_printf(m, "VCPU %d: VP=%#x/%02x\n"
+ " NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
+ xc->server_num, xc->vp_id, xc->vp_chip_id,
vcpu->arch.xive_saved_state.nsr,
vcpu->arch.xive_saved_state.cppr,
vcpu->arch.xive_saved_state.ipb,
vcpu->arch.xive_saved_state.pipr,
- vcpu->arch.xive_saved_state.w01,
- (u32) vcpu->arch.xive_cam_word);
+ be64_to_cpu(vcpu->arch.xive_saved_state.w01),
+ be32_to_cpu(vcpu->arch.xive_cam_word));
kvmppc_xive_debug_show_queues(m, vcpu);
}
- return 0;
-}
+ seq_puts(m, "=========\nSources\n=========\n");
-static int xive_native_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, xive_native_debug_show, inode->i_private);
+ for (i = 0; i <= xive->max_sbid; i++) {
+ struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
+
+ if (sb) {
+ arch_spin_lock(&sb->lock);
+ kvmppc_xive_debug_show_sources(m, sb);
+ arch_spin_unlock(&sb->lock);
+ }
+ }
+
+ return 0;
}
-static const struct file_operations xive_native_debug_fops = {
- .open = xive_native_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(xive_native_debug);
static void xive_native_debugfs_init(struct kvmppc_xive *xive)
{
- char *name;
-
- name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
- if (!name) {
- pr_err("%s: no memory for name\n", __func__);
- return;
- }
-
- xive->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
+ xive->dentry = debugfs_create_file("xive", 0444, xive->kvm->debugfs_dentry,
xive, &xive_native_debug_fops);
- pr_debug("%s: created %s\n", __func__, name);
- kfree(name);
+ pr_debug("%s: created\n", __func__);
}
static void kvmppc_xive_native_init(struct kvm_device *dev)
{
- struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
+ struct kvmppc_xive *xive = dev->private;
/* Register some debug interfaces */
xive_native_debugfs_init(xive);
@@ -1268,13 +1283,3 @@ struct kvm_device_ops kvm_xive_native_ops = {
.has_attr = kvmppc_xive_native_has_attr,
.mmap = kvmppc_xive_native_mmap,
};
-
-void kvmppc_xive_native_init_module(void)
-{
- ;
-}
-
-void kvmppc_xive_native_exit_module(void)
-{
- ;
-}
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
deleted file mode 100644
index a8a900ace1e6..000000000000
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ /dev/null
@@ -1,638 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
- */
-
-/* File to be included by other .c files */
-
-#define XGLUE(a,b) a##b
-#define GLUE(a,b) XGLUE(a,b)
-
-/* Dummy interrupt used when taking interrupts out of a queue in H_CPPR */
-#define XICS_DUMMY 1
-
-static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
-{
- u8 cppr;
- u16 ack;
-
- /*
- * Ensure any previous store to CPPR is ordered vs.
- * the subsequent loads from PIPR or ACK.
- */
- eieio();
-
- /* Perform the acknowledge OS to register cycle. */
- ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
-
- /* Synchronize subsequent queue accesses */
- mb();
-
- /* XXX Check grouping level */
-
- /* Anything ? */
- if (!((ack >> 8) & TM_QW1_NSR_EO))
- return;
-
- /* Grab CPPR of the most favored pending interrupt */
- cppr = ack & 0xff;
- if (cppr < 8)
- xc->pending |= 1 << cppr;
-
-#ifdef XIVE_RUNTIME_CHECKS
- /* Check consistency */
- if (cppr >= xc->hw_cppr)
- pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
- smp_processor_id(), cppr, xc->hw_cppr);
-#endif
-
- /*
- * Update our image of the HW CPPR. We don't yet modify
- * xc->cppr, this will be done as we scan for interrupts
- * in the queues.
- */
- xc->hw_cppr = cppr;
-}
-
-static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
-{
- u64 val;
-
- if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
- offset |= offset << 4;
-
- val =__x_readq(__x_eoi_page(xd) + offset);
-#ifdef __LITTLE_ENDIAN__
- val >>= 64-8;
-#endif
- return (u8)val;
-}
-
-
-static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
-{
- /* If the XIVE supports the new "store EOI facility, use it */
- if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
- __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
- else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW)
- opal_int_eoi(hw_irq);
- else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
- /*
- * For LSIs the HW EOI cycle is used rather than PQ bits,
- * as they are automatically re-triggred in HW when still
- * pending.
- */
- __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
- } else {
- uint64_t eoi_val;
-
- /*
- * Otherwise for EOI, we use the special MMIO that does
- * a clear of both P and Q and returns the old Q,
- * except for LSIs where we use the "EOI cycle" special
- * load.
- *
- * This allows us to then do a re-trigger if Q was set
- * rather than synthetizing an interrupt in software
- */
- eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
-
- /* Re-trigger if needed */
- if ((eoi_val & 1) && __x_trig_page(xd))
- __x_writeq(0, __x_trig_page(xd));
- }
-}
-
-enum {
- scan_fetch,
- scan_poll,
- scan_eoi,
-};
-
-static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
- u8 pending, int scan_type)
-{
- u32 hirq = 0;
- u8 prio = 0xff;
-
- /* Find highest pending priority */
- while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
- struct xive_q *q;
- u32 idx, toggle;
- __be32 *qpage;
-
- /*
- * If pending is 0 this will return 0xff which is what
- * we want
- */
- prio = ffs(pending) - 1;
-
- /* Don't scan past the guest cppr */
- if (prio >= xc->cppr || prio > 7) {
- if (xc->mfrr < xc->cppr) {
- prio = xc->mfrr;
- hirq = XICS_IPI;
- }
- break;
- }
-
- /* Grab queue and pointers */
- q = &xc->queues[prio];
- idx = q->idx;
- toggle = q->toggle;
-
- /*
- * Snapshot the queue page. The test further down for EOI
- * must use the same "copy" that was used by __xive_read_eq
- * since qpage can be set concurrently and we don't want
- * to miss an EOI.
- */
- qpage = READ_ONCE(q->qpage);
-
-skip_ipi:
- /*
- * Try to fetch from the queue. Will return 0 for a
- * non-queueing priority (ie, qpage = 0).
- */
- hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
-
- /*
- * If this was a signal for an MFFR change done by
- * H_IPI we skip it. Additionally, if we were fetching
- * we EOI it now, thus re-enabling reception of a new
- * such signal.
- *
- * We also need to do that if prio is 0 and we had no
- * page for the queue. In this case, we have non-queued
- * IPI that needs to be EOId.
- *
- * This is safe because if we have another pending MFRR
- * change that wasn't observed above, the Q bit will have
- * been set and another occurrence of the IPI will trigger.
- */
- if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
- if (scan_type == scan_fetch) {
- GLUE(X_PFX,source_eoi)(xc->vp_ipi,
- &xc->vp_ipi_data);
- q->idx = idx;
- q->toggle = toggle;
- }
- /* Loop back on same queue with updated idx/toggle */
-#ifdef XIVE_RUNTIME_CHECKS
- WARN_ON(hirq && hirq != XICS_IPI);
-#endif
- if (hirq)
- goto skip_ipi;
- }
-
- /* If it's the dummy interrupt, continue searching */
- if (hirq == XICS_DUMMY)
- goto skip_ipi;
-
- /* Clear the pending bit if the queue is now empty */
- if (!hirq) {
- pending &= ~(1 << prio);
-
- /*
- * Check if the queue count needs adjusting due to
- * interrupts being moved away.
- */
- if (atomic_read(&q->pending_count)) {
- int p = atomic_xchg(&q->pending_count, 0);
- if (p) {
-#ifdef XIVE_RUNTIME_CHECKS
- WARN_ON(p > atomic_read(&q->count));
-#endif
- atomic_sub(p, &q->count);
- }
- }
- }
-
- /*
- * If the most favoured prio we found pending is less
- * favored (or equal) than a pending IPI, we return
- * the IPI instead.
- */
- if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
- prio = xc->mfrr;
- hirq = XICS_IPI;
- break;
- }
-
- /* If fetching, update queue pointers */
- if (scan_type == scan_fetch) {
- q->idx = idx;
- q->toggle = toggle;
- }
- }
-
- /* If we are just taking a "peek", do nothing else */
- if (scan_type == scan_poll)
- return hirq;
-
- /* Update the pending bits */
- xc->pending = pending;
-
- /*
- * If this is an EOI that's it, no CPPR adjustment done here,
- * all we needed was cleanup the stale pending bits and check
- * if there's anything left.
- */
- if (scan_type == scan_eoi)
- return hirq;
-
- /*
- * If we found an interrupt, adjust what the guest CPPR should
- * be as if we had just fetched that interrupt from HW.
- *
- * Note: This can only make xc->cppr smaller as the previous
- * loop will only exit with hirq != 0 if prio is lower than
- * the current xc->cppr. Thus we don't need to re-check xc->mfrr
- * for pending IPIs.
- */
- if (hirq)
- xc->cppr = prio;
- /*
- * If it was an IPI the HW CPPR might have been lowered too much
- * as the HW interrupt we use for IPIs is routed to priority 0.
- *
- * We re-sync it here.
- */
- if (xc->cppr != xc->hw_cppr) {
- xc->hw_cppr = xc->cppr;
- __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
- }
-
- return hirq;
-}
-
-X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
-{
- struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- u8 old_cppr;
- u32 hirq;
-
- pr_devel("H_XIRR\n");
-
- xc->GLUE(X_STAT_PFX,h_xirr)++;
-
- /* First collect pending bits from HW */
- GLUE(X_PFX,ack_pending)(xc);
-
- pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
- xc->pending, xc->hw_cppr, xc->cppr);
-
- /* Grab previous CPPR and reverse map it */
- old_cppr = xive_prio_to_guest(xc->cppr);
-
- /* Scan for actual interrupts */
- hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
-
- pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
- hirq, xc->hw_cppr, xc->cppr);
-
-#ifdef XIVE_RUNTIME_CHECKS
- /* That should never hit */
- if (hirq & 0xff000000)
- pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
-#endif
-
- /*
- * XXX We could check if the interrupt is masked here and
- * filter it. If we chose to do so, we would need to do:
- *
- * if (masked) {
- * lock();
- * if (masked) {
- * old_Q = true;
- * hirq = 0;
- * }
- * unlock();
- * }
- */
-
- /* Return interrupt and old CPPR in GPR4 */
- vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
-
- return H_SUCCESS;
-}
-
-X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
-{
- struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- u8 pending = xc->pending;
- u32 hirq;
-
- pr_devel("H_IPOLL(server=%ld)\n", server);
-
- xc->GLUE(X_STAT_PFX,h_ipoll)++;
-
- /* Grab the target VCPU if not the current one */
- if (xc->server_num != server) {
- vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
- if (!vcpu)
- return H_PARAMETER;
- xc = vcpu->arch.xive_vcpu;
-
- /* Scan all priorities */
- pending = 0xff;
- } else {
- /* Grab pending interrupt if any */
- __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
- u8 pipr = be64_to_cpu(qw1) & 0xff;
- if (pipr < 8)
- pending |= 1 << pipr;
- }
-
- hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
-
- /* Return interrupt and old CPPR in GPR4 */
- vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
-
- return H_SUCCESS;
-}
-
-static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
-{
- u8 pending, prio;
-
- pending = xc->pending;
- if (xc->mfrr != 0xff) {
- if (xc->mfrr < 8)
- pending |= 1 << xc->mfrr;
- else
- pending |= 0x80;
- }
- if (!pending)
- return;
- prio = ffs(pending) - 1;
-
- __x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
-}
-
-static void GLUE(X_PFX,scan_for_rerouted_irqs)(struct kvmppc_xive *xive,
- struct kvmppc_xive_vcpu *xc)
-{
- unsigned int prio;
-
- /* For each priority that is now masked */
- for (prio = xc->cppr; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
- struct xive_q *q = &xc->queues[prio];
- struct kvmppc_xive_irq_state *state;
- struct kvmppc_xive_src_block *sb;
- u32 idx, toggle, entry, irq, hw_num;
- struct xive_irq_data *xd;
- __be32 *qpage;
- u16 src;
-
- idx = q->idx;
- toggle = q->toggle;
- qpage = READ_ONCE(q->qpage);
- if (!qpage)
- continue;
-
- /* For each interrupt in the queue */
- for (;;) {
- entry = be32_to_cpup(qpage + idx);
-
- /* No more ? */
- if ((entry >> 31) == toggle)
- break;
- irq = entry & 0x7fffffff;
-
- /* Skip dummies and IPIs */
- if (irq == XICS_DUMMY || irq == XICS_IPI)
- goto next;
- sb = kvmppc_xive_find_source(xive, irq, &src);
- if (!sb)
- goto next;
- state = &sb->irq_state[src];
-
- /* Has it been rerouted ? */
- if (xc->server_num == state->act_server)
- goto next;
-
- /*
- * Allright, it *has* been re-routed, kill it from
- * the queue.
- */
- qpage[idx] = cpu_to_be32((entry & 0x80000000) | XICS_DUMMY);
-
- /* Find the HW interrupt */
- kvmppc_xive_select_irq(state, &hw_num, &xd);
-
- /* If it's not an LSI, set PQ to 11 the EOI will force a resend */
- if (!(xd->flags & XIVE_IRQ_FLAG_LSI))
- GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_11);
-
- /* EOI the source */
- GLUE(X_PFX,source_eoi)(hw_num, xd);
-
- next:
- idx = (idx + 1) & q->msk;
- if (idx == 0)
- toggle ^= 1;
- }
- }
-}
-
-X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
-{
- struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
- u8 old_cppr;
-
- pr_devel("H_CPPR(cppr=%ld)\n", cppr);
-
- xc->GLUE(X_STAT_PFX,h_cppr)++;
-
- /* Map CPPR */
- cppr = xive_prio_from_guest(cppr);
-
- /* Remember old and update SW state */
- old_cppr = xc->cppr;
- xc->cppr = cppr;
-
- /*
- * Order the above update of xc->cppr with the subsequent
- * read of xc->mfrr inside push_pending_to_hw()
- */
- smp_mb();
-
- if (cppr > old_cppr) {
- /*
- * We are masking less, we need to look for pending things
- * to deliver and set VP pending bits accordingly to trigger
- * a new interrupt otherwise we might miss MFRR changes for
- * which we have optimized out sending an IPI signal.
- */
- GLUE(X_PFX,push_pending_to_hw)(xc);
- } else {
- /*
- * We are masking more, we need to check the queue for any
- * interrupt that has been routed to another CPU, take
- * it out (replace it with the dummy) and retrigger it.
- *
- * This is necessary since those interrupts may otherwise
- * never be processed, at least not until this CPU restores
- * its CPPR.
- *
- * This is in theory racy vs. HW adding new interrupts to
- * the queue. In practice this works because the interesting
- * cases are when the guest has done a set_xive() to move the
- * interrupt away, which flushes the xive, followed by the
- * target CPU doing a H_CPPR. So any new interrupt coming into
- * the queue must still be routed to us and isn't a source
- * of concern.
- */
- GLUE(X_PFX,scan_for_rerouted_irqs)(xive, xc);
- }
-
- /* Apply new CPPR */
- xc->hw_cppr = cppr;
- __x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
-
- return H_SUCCESS;
-}
-
-X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
-{
- struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
- struct kvmppc_xive_src_block *sb;
- struct kvmppc_xive_irq_state *state;
- struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
- struct xive_irq_data *xd;
- u8 new_cppr = xirr >> 24;
- u32 irq = xirr & 0x00ffffff, hw_num;
- u16 src;
- int rc = 0;
-
- pr_devel("H_EOI(xirr=%08lx)\n", xirr);
-
- xc->GLUE(X_STAT_PFX,h_eoi)++;
-
- xc->cppr = xive_prio_from_guest(new_cppr);
-
- /*
- * IPIs are synthetized from MFRR and thus don't need
- * any special EOI handling. The underlying interrupt
- * used to signal MFRR changes is EOId when fetched from
- * the queue.
- */
- if (irq == XICS_IPI || irq == 0) {
- /*
- * This barrier orders the setting of xc->cppr vs.
- * subsquent test of xc->mfrr done inside
- * scan_interrupts and push_pending_to_hw
- */
- smp_mb();
- goto bail;
- }
-
- /* Find interrupt source */
- sb = kvmppc_xive_find_source(xive, irq, &src);
- if (!sb) {
- pr_devel(" source not found !\n");
- rc = H_PARAMETER;
- /* Same as above */
- smp_mb();
- goto bail;
- }
- state = &sb->irq_state[src];
- kvmppc_xive_select_irq(state, &hw_num, &xd);
-
- state->in_eoi = true;
-
- /*
- * This barrier orders both setting of in_eoi above vs,
- * subsequent test of guest_priority, and the setting
- * of xc->cppr vs. subsquent test of xc->mfrr done inside
- * scan_interrupts and push_pending_to_hw
- */
- smp_mb();
-
-again:
- if (state->guest_priority == MASKED) {
- arch_spin_lock(&sb->lock);
- if (state->guest_priority != MASKED) {
- arch_spin_unlock(&sb->lock);
- goto again;
- }
- pr_devel(" EOI on saved P...\n");
-
- /* Clear old_p, that will cause unmask to perform an EOI */
- state->old_p = false;
-
- arch_spin_unlock(&sb->lock);
- } else {
- pr_devel(" EOI on source...\n");
-
- /* Perform EOI on the source */
- GLUE(X_PFX,source_eoi)(hw_num, xd);
-
- /* If it's an emulated LSI, check level and resend */
- if (state->lsi && state->asserted)
- __x_writeq(0, __x_trig_page(xd));
-
- }
-
- /*
- * This barrier orders the above guest_priority check
- * and spin_lock/unlock with clearing in_eoi below.
- *
- * It also has to be a full mb() as it must ensure
- * the MMIOs done in source_eoi() are completed before
- * state->in_eoi is visible.
- */
- mb();
- state->in_eoi = false;
-bail:
-
- /* Re-evaluate pending IRQs and update HW */
- GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
- GLUE(X_PFX,push_pending_to_hw)(xc);
- pr_devel(" after scan pending=%02x\n", xc->pending);
-
- /* Apply new CPPR */
- xc->hw_cppr = xc->cppr;
- __x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
-
- return rc;
-}
-
-X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
- unsigned long mfrr)
-{
- struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
-
- pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
-
- xc->GLUE(X_STAT_PFX,h_ipi)++;
-
- /* Find target */
- vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
- if (!vcpu)
- return H_PARAMETER;
- xc = vcpu->arch.xive_vcpu;
-
- /* Locklessly write over MFRR */
- xc->mfrr = mfrr;
-
- /*
- * The load of xc->cppr below and the subsequent MMIO store
- * to the IPI must happen after the above mfrr update is
- * globally visible so that:
- *
- * - Synchronize with another CPU doing an H_EOI or a H_CPPR
- * updating xc->cppr then reading xc->mfrr.
- *
- * - The target of the IPI sees the xc->mfrr update
- */
- mb();
-
- /* Shoot the IPI if most favored than target cppr */
- if (mfrr < xc->cppr)
- __x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
-
- return H_SUCCESS;
-}
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 7b27604adadf..7b4920e9fd26 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -20,6 +20,7 @@
#include <asm/cputable.h>
#include <linux/uaccess.h>
+#include <asm/interrupt.h>
#include <asm/kvm_ppc.h>
#include <asm/cacheflush.h>
#include <asm/dbell.h>
@@ -35,30 +36,54 @@
unsigned long kvmppc_booke_handlers;
-#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
-
-struct kvm_stats_debugfs_item debugfs_entries[] = {
- { "mmio", VCPU_STAT(mmio_exits) },
- { "sig", VCPU_STAT(signal_exits) },
- { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
- { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
- { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
- { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
- { "sysc", VCPU_STAT(syscall_exits) },
- { "isi", VCPU_STAT(isi_exits) },
- { "dsi", VCPU_STAT(dsi_exits) },
- { "inst_emu", VCPU_STAT(emulated_inst_exits) },
- { "dec", VCPU_STAT(dec_exits) },
- { "ext_intr", VCPU_STAT(ext_intr_exits) },
- { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
- { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
- { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
- { "halt_wakeup", VCPU_STAT(halt_wakeup) },
- { "doorbell", VCPU_STAT(dbell_exits) },
- { "guest doorbell", VCPU_STAT(gdbell_exits) },
- { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
- { NULL }
+const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+ KVM_GENERIC_VM_STATS(),
+ STATS_DESC_ICOUNTER(VM, num_2M_pages),
+ STATS_DESC_ICOUNTER(VM, num_1G_pages)
+};
+
+const struct kvm_stats_header kvm_vm_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vm_stats_desc),
+};
+
+const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+ KVM_GENERIC_VCPU_STATS(),
+ STATS_DESC_COUNTER(VCPU, sum_exits),
+ STATS_DESC_COUNTER(VCPU, mmio_exits),
+ STATS_DESC_COUNTER(VCPU, signal_exits),
+ STATS_DESC_COUNTER(VCPU, light_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
+ STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
+ STATS_DESC_COUNTER(VCPU, syscall_exits),
+ STATS_DESC_COUNTER(VCPU, isi_exits),
+ STATS_DESC_COUNTER(VCPU, dsi_exits),
+ STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
+ STATS_DESC_COUNTER(VCPU, dec_exits),
+ STATS_DESC_COUNTER(VCPU, ext_intr_exits),
+ STATS_DESC_COUNTER(VCPU, halt_successful_wait),
+ STATS_DESC_COUNTER(VCPU, dbell_exits),
+ STATS_DESC_COUNTER(VCPU, gdbell_exits),
+ STATS_DESC_COUNTER(VCPU, ld),
+ STATS_DESC_COUNTER(VCPU, st),
+ STATS_DESC_COUNTER(VCPU, pthru_all),
+ STATS_DESC_COUNTER(VCPU, pthru_host),
+ STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
+};
+
+const struct kvm_stats_header kvm_vcpu_stats_header = {
+ .name_size = KVM_STATS_NAME_SIZE,
+ .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
+ .id_offset = sizeof(struct kvm_stats_header),
+ .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
+ .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
+ sizeof(kvm_vcpu_stats_desc),
};
/* TODO: use vcpu_printf() */
@@ -421,11 +446,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_DATA_STORAGE:
case BOOKE_IRQPRIO_ALIGNMENT:
update_dear = true;
- /* fall through */
+ fallthrough;
case BOOKE_IRQPRIO_INST_STORAGE:
case BOOKE_IRQPRIO_PROGRAM:
update_esr = true;
- /* fall through */
+ fallthrough;
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
@@ -459,7 +484,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_DECREMENTER:
case BOOKE_IRQPRIO_FIT:
keep_irq = true;
- /* fall through */
+ fallthrough;
case BOOKE_IRQPRIO_EXTERNAL:
case BOOKE_IRQPRIO_DBELL:
allowed = vcpu->arch.shared->msr & MSR_EE;
@@ -501,11 +526,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
vcpu->arch.regs.nip = vcpu->arch.ivpr |
vcpu->arch.ivor[priority];
- if (update_esr == true)
+ if (update_esr)
kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
- if (update_dear == true)
+ if (update_dear)
kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
- if (update_epr == true) {
+ if (update_epr) {
if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
@@ -693,13 +718,12 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable();
- kvm_vcpu_block(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ kvm_vcpu_halt(vcpu);
hard_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
r = 1;
- };
+ }
return r;
}
@@ -730,13 +754,13 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
return r;
}
-int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
{
int ret, s;
struct debug_reg debug;
if (!vcpu->arch.sane) {
- kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
@@ -778,7 +802,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
kvmppc_fix_ee_before_entry();
- ret = __kvmppc_vcpu_run(kvm_run, vcpu);
+ ret = __kvmppc_vcpu_run(vcpu);
/* No need for guest_exit. It's done in handle_exit.
We also get here with interrupts enabled. */
@@ -800,11 +824,11 @@ out:
return ret;
}
-static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int emulation_exit(struct kvm_vcpu *vcpu)
{
enum emulation_result er;
- er = kvmppc_emulate_instruction(run, vcpu);
+ er = kvmppc_emulate_instruction(vcpu);
switch (er) {
case EMULATE_DONE:
/* don't overwrite subtypes, just account kvm_stats */
@@ -821,8 +845,8 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
__func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
/* For debugging, encode the failing instruction and
* report it to userspace. */
- run->hw.hardware_exit_reason = ~0ULL << 32;
- run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
+ vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
+ vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
kvmppc_core_queue_program(vcpu, ESR_PIL);
return RESUME_HOST;
@@ -834,8 +858,9 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
}
-static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
+static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
u32 dbsr = vcpu->arch.dbsr;
@@ -954,7 +979,7 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
}
}
-static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
enum emulation_result emulated, u32 last_inst)
{
switch (emulated) {
@@ -966,8 +991,8 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
__func__, vcpu->arch.regs.nip);
/* For debugging, encode the failing instruction and
* report it to userspace. */
- run->hw.hardware_exit_reason = ~0ULL << 32;
- run->hw.hardware_exit_reason |= last_inst;
+ vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
+ vcpu->run->hw.hardware_exit_reason |= last_inst;
kvmppc_core_queue_program(vcpu, ESR_PIL);
return RESUME_HOST;
@@ -981,9 +1006,9 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
*
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
*/
-int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned int exit_nr)
+int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
{
+ struct kvm_run *run = vcpu->run;
int r = RESUME_HOST;
int s;
int idx;
@@ -1016,7 +1041,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
trace_kvm_exit(exit_nr, vcpu);
- guest_exit_irqoff();
+
+ context_tracking_guest_exit();
+ if (!vtime_accounting_enabled_this_cpu()) {
+ local_irq_enable();
+ /*
+ * Service IRQs here before vtime_account_guest_exit() so any
+ * ticks that occurred while running the guest are accounted to
+ * the guest. If vtime accounting is enabled, accounting uses
+ * TB rather than ticks, so it can be done without enabling
+ * interrupts here, which has the problem that it accounts
+ * interrupt processing overhead to the host.
+ */
+ local_irq_disable();
+ }
+ vtime_account_guest_exit();
local_irq_enable();
@@ -1024,7 +1063,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1;
if (emulated != EMULATE_DONE) {
- r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
+ r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
goto out;
}
@@ -1084,7 +1123,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOKE_INTERRUPT_HV_PRIV:
- r = emulation_exit(run, vcpu);
+ r = emulation_exit(vcpu);
break;
case BOOKE_INTERRUPT_PROGRAM:
@@ -1094,7 +1133,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* We are here because of an SW breakpoint instr,
* so lets return to host to handle.
*/
- r = kvmppc_handle_debug(run, vcpu);
+ r = kvmppc_handle_debug(vcpu);
run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
break;
@@ -1115,7 +1154,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
- r = emulation_exit(run, vcpu);
+ r = emulation_exit(vcpu);
break;
case BOOKE_INTERRUPT_FP_UNAVAIL:
@@ -1282,7 +1321,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
* actually RAM. */
vcpu->arch.paddr_accessed = gpaddr;
vcpu->arch.vaddr_accessed = eaddr;
- r = kvmppc_emulate_mmio(run, vcpu);
+ r = kvmppc_emulate_mmio(vcpu);
kvmppc_account_exit(vcpu, MMIO_EXITS);
}
@@ -1333,7 +1372,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
case BOOKE_INTERRUPT_DEBUG: {
- r = kvmppc_handle_debug(run, vcpu);
+ r = kvmppc_handle_debug(vcpu);
if (r == RESUME_HOST)
run->exit_reason = KVM_EXIT_DEBUG;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
@@ -1747,12 +1786,12 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
@@ -1766,32 +1805,30 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
return r;
}
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
- return -ENOTSUPP;
+
}
-void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
+ return -EOPNOTSUPP;
}
-int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
+void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- return 0;
}
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem)
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
return 0;
}
void kvmppc_core_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
@@ -2074,11 +2111,6 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
kvmppc_clear_dbsr();
}
-void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
-{
- vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
-}
-
int kvmppc_core_init_vm(struct kvm *kvm)
{
return kvm->arch.kvm_ops->init_vm(kvm);
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 9d3169fbce55..be9da96d9f06 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -70,7 +70,7 @@ void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
-int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
@@ -94,18 +94,12 @@ enum int_class {
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
-extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
-extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
+extern int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong spr_val);
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong *spr_val);
-extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
-extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
- struct kvm_vcpu *vcpu,
- unsigned int inst, int *advance);
extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong spr_val);
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 689ff5f90e9e..d8d38aca71bd 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -39,7 +39,7 @@ static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
}
-int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 2e56ab5a5f55..205545d820a1 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -223,7 +223,7 @@ _GLOBAL(kvmppc_resume_host)
lwz r3, VCPU_HOST_PID(r4)
mtspr SPRN_PID, r3
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_85xx
/* we cheat and know that Linux doesn't use PID1 which is always 0 */
lis r3, 0
mtspr SPRN_PID1, r3
@@ -237,7 +237,7 @@ _GLOBAL(kvmppc_resume_host)
/* Switch to kernel stack and jump to handler. */
LOAD_REG_ADDR(r3, kvmppc_handle_exit)
mtctr r3
- lwz r3, HOST_RUN(r1)
+ mr r3, r4
lwz r2, HOST_R2(r1)
mr r14, r4 /* Save vcpu pointer. */
@@ -337,15 +337,14 @@ heavyweight_exit:
/* Registers:
- * r3: kvm_run pointer
- * r4: vcpu pointer
+ * r3: vcpu pointer
*/
_GLOBAL(__kvmppc_vcpu_run)
stwu r1, -HOST_STACK_SIZE(r1)
- stw r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
+ stw r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */
/* Save host state to stack. */
- stw r3, HOST_RUN(r1)
+ mr r4, r3
mflr r3
stw r3, HOST_STACK_LR(r1)
mfcr r5
@@ -407,7 +406,7 @@ lightweight_exit:
lwz r3, VCPU_SHADOW_PID(r4)
mtspr SPRN_PID, r3
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_85xx
lwz r3, VCPU_SHADOW_PID1(r4)
mtspr SPRN_PID1, r3
#endif
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index c577ba4b3169..8262c14fc9e6 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -434,9 +434,10 @@ _GLOBAL(kvmppc_resume_host)
#endif
/* Switch to kernel stack and jump to handler. */
- PPC_LL r3, HOST_RUN(r1)
+ mr r3, r4
mr r5, r14 /* intno */
mr r14, r4 /* Save vcpu pointer. */
+ mr r4, r5
bl kvmppc_handle_exit
/* Restore vcpu pointer and the nonvolatiles we used. */
@@ -525,15 +526,14 @@ heavyweight_exit:
blr
/* Registers:
- * r3: kvm_run pointer
- * r4: vcpu pointer
+ * r3: vcpu pointer
*/
_GLOBAL(__kvmppc_vcpu_run)
stwu r1, -HOST_STACK_SIZE(r1)
- PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
+ PPC_STL r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */
/* Save host state to stack. */
- PPC_STL r3, HOST_RUN(r1)
+ mr r4, r3
mflr r3
mfcr r5
PPC_STL r3, HOST_STACK_LR(r1)
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index f2b4feaff6d2..c8b2b4478545 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -490,12 +490,12 @@ static struct kvmppc_ops kvm_ops_e500 = {
.vcpu_put = kvmppc_core_vcpu_put_e500,
.vcpu_create = kvmppc_core_vcpu_create_e500,
.vcpu_free = kvmppc_core_vcpu_free_e500,
- .mmu_destroy = kvmppc_mmu_destroy_e500,
.init_vm = kvmppc_core_init_vm_e500,
.destroy_vm = kvmppc_core_destroy_vm_e500,
.emulate_op = kvmppc_core_emulate_op_e500,
.emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
.emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
+ .create_vcpu_debugfs = kvmppc_create_vcpu_debugfs_e500,
};
static int __init kvmppc_e500_init(void)
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index c3ef751465fb..6d0d329cbb35 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -17,7 +17,7 @@
#define KVM_E500_H
#include <linux/kvm_host.h>
-#include <asm/nohash/mmu-book3e.h>
+#include <asm/nohash/mmu-e500.h>
#include <asm/tlb.h>
#include <asm/cputhreads.h>
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 3d0d3ec5be96..051102d50c31 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
ulong param = vcpu->arch.regs.gpr[rb];
int prio = dbell2prio(rb);
int pir = param & PPC_DBELL_PIR_MASK;
- int i;
+ unsigned long i;
struct kvm_vcpu *cvcpu;
if (prio < 0)
@@ -83,16 +83,16 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
}
#endif
-static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int kvmppc_e500_emul_ehpriv(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
switch (get_oc(inst)) {
case EHPRIV_OC_DEBUG:
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.address = vcpu->arch.regs.nip;
- run->debug.arch.status = 0;
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ vcpu->run->debug.arch.address = vcpu->arch.regs.nip;
+ vcpu->run->debug.arch.status = 0;
kvmppc_account_exit(vcpu, DEBUG_EXITS);
emulated = EMULATE_EXIT_USER;
*advance = 0;
@@ -125,7 +125,7 @@ static int kvmppc_e500_emul_mftmr(struct kvm_vcpu *vcpu, unsigned int inst,
return EMULATE_FAIL;
}
-int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_core_emulate_op_e500(struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
@@ -182,8 +182,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case XOP_EHPRIV:
- emulated = kvmppc_e500_emul_ehpriv(run, vcpu, inst,
- advance);
+ emulated = kvmppc_e500_emul_ehpriv(vcpu, inst, advance);
break;
default:
@@ -197,7 +196,7 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
if (emulated == EMULATE_FAIL)
- emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
+ emulated = kvmppc_booke_emulate_op(vcpu, inst, advance);
return emulated;
}
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 2d910b87e441..e131fbecdcc4 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -533,10 +533,6 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
}
-void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
-{
-}
-
/*****************************************/
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 425d13806645..05668e964140 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -339,7 +339,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
unsigned long flags;
/* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/*
@@ -355,7 +355,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
if (tlbsel == 1) {
struct vm_area_struct *vma;
- down_read(&kvm->mm->mmap_sem);
+ mmap_read_lock(kvm->mm);
vma = find_vma(kvm->mm, hva);
if (vma && hva >= vma->vm_start &&
@@ -422,7 +422,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
break;
}
} else if (vma && hva >= vma->vm_start &&
- (vma->vm_flags & VM_HUGETLB)) {
+ is_vm_hugetlb_page(vma)) {
unsigned long psize = vma_kernel_pagesize(vma);
tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
@@ -441,7 +441,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
}
- up_read(&kvm->mm->mmap_sem);
+ mmap_read_unlock(kvm->mm);
}
if (likely(!pfnmap)) {
@@ -460,7 +460,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
spin_lock(&kvm->mmu_lock);
- if (mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_invalidate_retry(kvm, mmu_seq)) {
ret = -EAGAIN;
goto out;
}
@@ -721,44 +721,36 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
/************* MMU Notifiers *************/
-static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
+static bool kvm_e500_mmu_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- trace_kvm_unmap_hva(hva);
-
/*
* Flush all shadow tlb entries everywhere. This is slow, but
* we are 100% sure that we catch the to be unmapped page
*/
- kvm_flush_remote_tlbs(kvm);
-
- return 0;
+ return true;
}
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
- /* kvm_unmap_hva flushes everything anyways */
- kvm_unmap_hva(kvm, start);
-
- return 0;
+ return kvm_e500_mmu_unmap_gfn(kvm, range);
}
-int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* XXX could be more clever ;) */
- return 0;
+ return false;
}
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* XXX could be more clever ;) */
- return 0;
+ return false;
}
-int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
/* The page will get remapped properly on its next fault */
- kvm_unmap_hva(kvm, hva);
- return 0;
+ return kvm_e500_mmu_unmap_gfn(kvm, range);
}
/*****************************************/
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index e6b06cb2b92c..57e0ad6a2ca3 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -309,7 +309,7 @@ static int kvmppc_core_vcpu_create_e500mc(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(offsetof(struct kvmppc_vcpu_e500, vcpu) != 0);
vcpu_e500 = to_e500(vcpu);
- /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
+ /* Invalid PIR value -- this LPID doesn't have valid state on any cpu */
vcpu->arch.oldpir = 0xffffffff;
err = kvmppc_e500_tlb_init(vcpu_e500);
@@ -376,12 +376,12 @@ static struct kvmppc_ops kvm_ops_e500mc = {
.vcpu_put = kvmppc_core_vcpu_put_e500mc,
.vcpu_create = kvmppc_core_vcpu_create_e500mc,
.vcpu_free = kvmppc_core_vcpu_free_e500mc,
- .mmu_destroy = kvmppc_mmu_destroy_e500,
.init_vm = kvmppc_core_init_vm_e500mc,
.destroy_vm = kvmppc_core_destroy_vm_e500mc,
.emulate_op = kvmppc_core_emulate_op_e500,
.emulate_mtspr = kvmppc_core_emulate_mtspr_e500,
.emulate_mfspr = kvmppc_core_emulate_mfspr_e500,
+ .create_vcpu_debugfs = kvmppc_create_vcpu_debugfs_e500,
};
static int __init kvmppc_e500mc_init(void)
@@ -399,7 +399,6 @@ static int __init kvmppc_e500mc_init(void)
* allocator.
*/
kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
- kvmppc_claim_lpid(0); /* host */
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
if (r)
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 6fca38ca791f..ee1147c98cd8 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -191,7 +191,7 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
/* XXX Should probably auto-generate instruction decoding for a particular core
* from opcode tables in the future. */
-int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu)
{
u32 inst;
int rs, rt, sprn;
@@ -270,9 +270,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
* these are illegal instructions.
*/
if (inst == KVMPPC_INST_SW_BREAKPOINT) {
- run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.status = 0;
- run->debug.arch.address = kvmppc_get_pc(vcpu);
+ vcpu->run->exit_reason = KVM_EXIT_DEBUG;
+ vcpu->run->debug.arch.status = 0;
+ vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu);
emulated = EMULATE_EXIT_USER;
advance = 0;
} else
@@ -285,7 +285,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
if (emulated == EMULATE_FAIL) {
- emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
+ emulated = vcpu->kvm->arch.kvm_ops->emulate_op(vcpu, inst,
&advance);
if (emulated == EMULATE_AGAIN) {
advance = 0;
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 1139bc56e004..cfc9114b87d0 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -71,10 +71,8 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
*/
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{
- struct kvm_run *run = vcpu->run;
u32 inst;
enum emulation_result emulated = EMULATE_FAIL;
- int advance = 1;
struct instruction_op op;
/* this default type might be overwritten by subcategories */
@@ -95,19 +93,21 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = EMULATE_FAIL;
vcpu->arch.regs.msr = vcpu->arch.shared->msr;
- if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
+ if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
+ vcpu->mmio_is_write = OP_IS_STORE(type);
+
switch (type) {
case LOAD: {
int instr_byte_swap = op.type & BYTEREV;
if (op.type & SIGNEXT)
- emulated = kvmppc_handle_loads(run, vcpu,
+ emulated = kvmppc_handle_loads(vcpu,
op.reg, size, !instr_byte_swap);
else
- emulated = kvmppc_handle_load(run, vcpu,
+ emulated = kvmppc_handle_load(vcpu,
op.reg, size, !instr_byte_swap);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
@@ -124,10 +124,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 1;
if (op.type & SIGNEXT)
- emulated = kvmppc_handle_loads(run, vcpu,
+ emulated = kvmppc_handle_loads(vcpu,
KVM_MMIO_REG_FPR|op.reg, size, 1);
else
- emulated = kvmppc_handle_load(run, vcpu,
+ emulated = kvmppc_handle_load(vcpu,
KVM_MMIO_REG_FPR|op.reg, size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
@@ -164,12 +164,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (size == 16) {
vcpu->arch.mmio_vmx_copy_nums = 2;
- emulated = kvmppc_handle_vmx_load(run,
- vcpu, KVM_MMIO_REG_VMX|op.reg,
+ emulated = kvmppc_handle_vmx_load(vcpu,
+ KVM_MMIO_REG_VMX|op.reg,
8, 1);
} else {
vcpu->arch.mmio_vmx_copy_nums = 1;
- emulated = kvmppc_handle_vmx_load(run, vcpu,
+ emulated = kvmppc_handle_vmx_load(vcpu,
KVM_MMIO_REG_VMX|op.reg,
size, 1);
}
@@ -217,7 +217,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
io_size_each = op.element_size;
}
- emulated = kvmppc_handle_vsx_load(run, vcpu,
+ emulated = kvmppc_handle_vsx_load(vcpu,
KVM_MMIO_REG_VSX|op.reg, io_size_each,
1, op.type & SIGNEXT);
break;
@@ -227,8 +227,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
/* if need byte reverse, op.val has been reversed by
* analyse_instr().
*/
- emulated = kvmppc_handle_store(run, vcpu, op.val,
- size, 1);
+ emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
@@ -250,7 +249,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (op.type & FPCONV)
vcpu->arch.mmio_sp64_extend = 1;
- emulated = kvmppc_handle_store(run, vcpu,
+ emulated = kvmppc_handle_store(vcpu,
VCPU_FPR(vcpu, op.reg), size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
@@ -290,12 +289,12 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
if (size == 16) {
vcpu->arch.mmio_vmx_copy_nums = 2;
- emulated = kvmppc_handle_vmx_store(run,
- vcpu, op.reg, 8, 1);
+ emulated = kvmppc_handle_vmx_store(vcpu,
+ op.reg, 8, 1);
} else {
vcpu->arch.mmio_vmx_copy_nums = 1;
- emulated = kvmppc_handle_vmx_store(run,
- vcpu, op.reg, size, 1);
+ emulated = kvmppc_handle_vmx_store(vcpu,
+ op.reg, size, 1);
}
break;
@@ -338,7 +337,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
io_size_each = op.element_size;
}
- emulated = kvmppc_handle_vsx_store(run, vcpu,
+ emulated = kvmppc_handle_vsx_store(vcpu,
op.reg, io_size_each, 1);
break;
}
@@ -357,15 +356,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
}
- if (emulated == EMULATE_FAIL) {
- advance = 0;
- kvmppc_core_queue_program(vcpu, 0);
- }
-
trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
/* Advance past emulated instruction. */
- if (advance)
+ if (emulated != EMULATE_FAIL)
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
return emulated;
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S
index 3dfae0cb6228..315c94946bad 100644
--- a/arch/powerpc/kvm/fpu.S
+++ b/arch/powerpc/kvm/fpu.S
@@ -5,10 +5,10 @@
* Copyright (C) 2010 Alexander Graf (agraf@suse.de)
*/
+#include <linux/pgtable.h>
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index fe312c160d97..23e9c2bd9f27 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -32,7 +32,6 @@
#include <linux/uaccess.h>
#include <asm/mpic.h>
#include <asm/kvm_para.h>
-#include <asm/kvm_host.h>
#include <asm/kvm_ppc.h>
#include <kvm/iodev.h>
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 302e9dccdd6d..b850b0efa201 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/irqbypass.h>
#include <linux/kvm_irqfd.h>
+#include <linux/of.h>
#include <asm/cputable.h>
#include <linux/uaccess.h>
#include <asm/kvm_ppc.h>
@@ -32,7 +33,7 @@
#include <asm/plpar_wrappers.h>
#endif
#include <asm/ultravisor.h>
-#include <asm/kvm_host.h>
+#include <asm/setup.h>
#include "timing.h"
#include "irq.h"
@@ -237,8 +238,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
break;
case EV_HCALL_TOKEN(EV_IDLE):
r = EV_SUCCESS;
- kvm_vcpu_block(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ kvm_vcpu_halt(vcpu);
break;
default:
r = EV_UNIMPLEMENTED;
@@ -280,7 +280,7 @@ out:
}
EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
-int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
{
enum emulation_result er;
int r;
@@ -296,7 +296,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
r = RESUME_GUEST;
break;
case EMULATE_DO_MMIO:
- run->exit_reason = KVM_EXIT_MMIO;
+ vcpu->run->exit_reason = KVM_EXIT_MMIO;
/* We must reload nonvolatiles because "update" load/store
* instructions modify register state. */
/* Future optimization: only reload non-volatiles if they were
@@ -308,9 +308,31 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
u32 last_inst;
kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
- /* XXX Deliver Program interrupt to guest. */
- pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
- r = RESUME_HOST;
+ kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
+ last_inst);
+
+ /*
+ * Injecting a Data Storage here is a bit more
+ * accurate since the instruction that caused the
+ * access could still be a valid one.
+ */
+ if (!IS_ENABLED(CONFIG_BOOKE)) {
+ ulong dsisr = DSISR_BADACCESS;
+
+ if (vcpu->mmio_is_write)
+ dsisr |= DSISR_ISSTORE;
+
+ kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
+ } else {
+ /*
+ * BookE does not send a SIGBUS on a bad
+ * fault, so use a Program interrupt instead
+ * to avoid a fault loop.
+ */
+ kvmppc_core_queue_program(vcpu, 0);
+ }
+
+ r = RESUME_GUEST;
break;
}
default:
@@ -404,7 +426,10 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
return EMULATE_DONE;
}
- if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
+ kvm_vcpu_srcu_read_lock(vcpu);
+ rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
+ kvm_vcpu_srcu_read_unlock(vcpu);
+ if (rc)
return EMULATE_DO_MMIO;
return EMULATE_DONE;
@@ -416,12 +441,12 @@ int kvm_arch_hardware_enable(void)
return 0;
}
-int kvm_arch_hardware_setup(void)
+int kvm_arch_hardware_setup(void *opaque)
{
return 0;
}
-int kvm_arch_check_processor_compat(void)
+int kvm_arch_check_processor_compat(void *opaque)
{
return kvmppc_core_check_processor_compat();
}
@@ -429,6 +454,8 @@ int kvm_arch_check_processor_compat(void)
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
struct kvmppc_ops *kvm_ops = NULL;
+ int r;
+
/*
* if we have both HV and PR enabled, default is HV
*/
@@ -450,20 +477,20 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
} else
goto err_out;
- if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
+ if (!try_module_get(kvm_ops->owner))
return -ENOENT;
kvm->arch.kvm_ops = kvm_ops;
- return kvmppc_core_init_vm(kvm);
+ r = kvmppc_core_init_vm(kvm);
+ if (r)
+ module_put(kvm_ops->owner);
+ return r;
err_out:
return -EINVAL;
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- unsigned int i;
- struct kvm_vcpu *vcpu;
-
#ifdef CONFIG_KVM_XICS
/*
* We call kick_all_cpus_sync() to ensure that all
@@ -474,14 +501,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kick_all_cpus_sync();
#endif
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vcpu_destroy(vcpu);
+ kvm_destroy_vcpus(kvm);
mutex_lock(&kvm->lock);
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
kvmppc_core_destroy_vm(kvm);
@@ -522,10 +544,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IOEVENTFD:
case KVM_CAP_DEVICE_CTRL:
case KVM_CAP_IMMEDIATE_EXIT:
+ case KVM_CAP_SET_GUEST_DEBUG:
r = 1;
break;
case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
- /* fall through */
case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
@@ -609,8 +631,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!(hv_enabled && radix_enabled());
break;
case KVM_CAP_PPC_MMU_HASH_V3:
- r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
- cpu_has_feature(CPU_FTR_HVMODE));
+ r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
+ kvmppc_hv_ops->hash_v3_possible());
break;
case KVM_CAP_PPC_NESTED_HV:
r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
@@ -639,15 +661,15 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
* implementations just count online CPUs.
*/
if (hv_enabled)
- r = num_present_cpus();
+ r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
else
- r = num_online_cpus();
+ r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPU_ID:
- r = KVM_MAX_VCPU_ID;
+ r = KVM_MAX_VCPU_IDS;
break;
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_PPC_GET_SMMU_INFO:
@@ -671,6 +693,36 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
(hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
break;
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
+ case KVM_CAP_PPC_SECURE_GUEST:
+ r = hv_enabled && kvmppc_hv_ops->enable_svm &&
+ !kvmppc_hv_ops->enable_svm(NULL);
+ break;
+ case KVM_CAP_PPC_DAWR1:
+ r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
+ !kvmppc_hv_ops->enable_dawr1(NULL));
+ break;
+ case KVM_CAP_PPC_RPT_INVALIDATE:
+ r = 1;
+ break;
+#endif
+ case KVM_CAP_PPC_AIL_MODE_3:
+ r = 0;
+ /*
+ * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
+ * The POWER9s can support it if the guest runs in hash mode,
+ * but QEMU doesn't necessarily query the capability in time.
+ */
+ if (hv_enabled) {
+ if (kvmhv_on_pseries()) {
+ if (pseries_reloc_on_exception())
+ r = 1;
+ } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+ !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
+ r = 1;
+ }
+ }
+ break;
default:
r = 0;
break;
@@ -685,33 +737,25 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
- struct kvm_memory_slot *dont)
-{
- kvmppc_core_free_memslot(kvm, free, dont);
-}
-
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
- unsigned long npages)
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- return kvmppc_core_create_memslot(kvm, slot, npages);
+ kvmppc_core_free_memslot(kvm, slot);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
+ return kvmppc_core_prepare_memory_region(kvm, old, new, change);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
+ kvmppc_core_commit_memory_region(kvm, old, new, change);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
@@ -741,7 +785,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
- vcpu->arch.dec_expires = get_tb();
#ifdef CONFIG_KVM_EXIT_TIMING
mutex_init(&vcpu->arch.exit_timing_lock);
@@ -754,8 +797,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err)
goto out_vcpu_uninit;
- vcpu->arch.wqp = &vcpu->wq;
- kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
+ rcuwait_init(&vcpu->arch.wait);
+ vcpu->arch.waitp = &vcpu->arch.wait;
return 0;
out_vcpu_uninit:
@@ -772,8 +815,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
/* Make sure we're not using the vcpu anymore */
hrtimer_cancel(&vcpu->arch.dec_timer);
- kvmppc_remove_vcpu_debugfs(vcpu);
-
switch (vcpu->arch.irq_type) {
case KVMPPC_IRQ_MPIC:
kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
@@ -1086,7 +1127,7 @@ static inline u64 sp_to_dp(u32 fprs)
preempt_disable();
enable_kernel_fp();
- asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
+ asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
: "fr0");
preempt_enable();
return fprd;
@@ -1098,7 +1139,7 @@ static inline u32 dp_to_sp(u64 fprd)
preempt_disable();
enable_kernel_fp();
- asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
+ asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
: "fr0");
preempt_enable();
return fprs;
@@ -1109,15 +1150,13 @@ static inline u32 dp_to_sp(u64 fprd)
#define dp_to_sp(x) (x)
#endif /* CONFIG_PPC_FPU */
-static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
{
- u64 uninitialized_var(gpr);
+ struct kvm_run *run = vcpu->run;
+ u64 gpr;
- if (run->mmio.len > sizeof(gpr)) {
- printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
+ if (run->mmio.len > sizeof(gpr))
return;
- }
if (!vcpu->arch.mmio_host_swabbed) {
switch (run->mmio.len) {
@@ -1221,10 +1260,11 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
}
-static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int sign_extend)
{
+ struct kvm_run *run = vcpu->run;
int idx, ret;
bool host_swabbed;
@@ -1235,10 +1275,8 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
host_swabbed = !is_default_endian;
}
- if (bytes > sizeof(run->mmio.data)) {
- printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
- run->mmio.len);
- }
+ if (bytes > sizeof(run->mmio.data))
+ return EMULATE_FAIL;
run->mmio.phys_addr = vcpu->arch.paddr_accessed;
run->mmio.len = bytes;
@@ -1258,7 +1296,7 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (!ret) {
- kvmppc_complete_mmio_load(vcpu, run);
+ kvmppc_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0;
return EMULATE_DONE;
}
@@ -1266,24 +1304,24 @@ static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_DO_MMIO;
}
-int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian)
{
- return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
+ return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
}
EXPORT_SYMBOL_GPL(kvmppc_handle_load);
/* Same as above, but sign extends */
-int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian)
{
- return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
+ return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
}
#ifdef CONFIG_VSX
-int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes,
int is_default_endian, int mmio_sign_extend)
{
@@ -1294,13 +1332,13 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
while (vcpu->arch.mmio_vsx_copy_nums) {
- emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
+ emulated = __kvmppc_handle_load(vcpu, rt, bytes,
is_default_endian, mmio_sign_extend);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
@@ -1309,9 +1347,10 @@ int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
#endif /* CONFIG_VSX */
-int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_store(struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_default_endian)
{
+ struct kvm_run *run = vcpu->run;
void *data = run->mmio.data;
int idx, ret;
bool host_swabbed;
@@ -1323,10 +1362,8 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
host_swabbed = !is_default_endian;
}
- if (bytes > sizeof(run->mmio.data)) {
- printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
- run->mmio.len);
- }
+ if (bytes > sizeof(run->mmio.data))
+ return EMULATE_FAIL;
run->mmio.phys_addr = vcpu->arch.paddr_accessed;
run->mmio.len = bytes;
@@ -1425,7 +1462,7 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
return result;
}
-int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
int rs, unsigned int bytes, int is_default_endian)
{
u64 val;
@@ -1441,13 +1478,13 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
return EMULATE_FAIL;
- emulated = kvmppc_handle_store(run, vcpu,
+ emulated = kvmppc_handle_store(vcpu,
val, bytes, is_default_endian);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vsx_copy_nums--;
vcpu->arch.mmio_vsx_offset++;
@@ -1456,19 +1493,19 @@ int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
enum emulation_result emulated = EMULATE_FAIL;
int r;
vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) {
- emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
+ emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
} else {
- emulated = kvmppc_handle_vsx_store(run, vcpu,
+ emulated = kvmppc_handle_vsx_store(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
}
@@ -1492,22 +1529,22 @@ static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
#endif /* CONFIG_VSX */
#ifdef CONFIG_ALTIVEC
-int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
unsigned int rt, unsigned int bytes, int is_default_endian)
{
enum emulation_result emulated = EMULATE_DONE;
- if (vcpu->arch.mmio_vsx_copy_nums > 2)
+ if (vcpu->arch.mmio_vmx_copy_nums > 2)
return EMULATE_FAIL;
while (vcpu->arch.mmio_vmx_copy_nums) {
- emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
+ emulated = __kvmppc_handle_load(vcpu, rt, bytes,
is_default_endian, 0);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--;
vcpu->arch.mmio_vmx_offset++;
}
@@ -1515,7 +1552,7 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1533,7 +1570,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1551,7 +1588,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1569,7 +1606,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
+static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
{
union kvmppc_one_reg reg;
int vmx_offset = 0;
@@ -1587,14 +1624,14 @@ int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
return result;
}
-int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
unsigned int rs, unsigned int bytes, int is_default_endian)
{
u64 val = 0;
unsigned int index = rs & KVM_MMIO_REG_MASK;
enum emulation_result emulated = EMULATE_DONE;
- if (vcpu->arch.mmio_vsx_copy_nums > 2)
+ if (vcpu->arch.mmio_vmx_copy_nums > 2)
return EMULATE_FAIL;
vcpu->arch.io_gpr = rs;
@@ -1622,12 +1659,12 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return EMULATE_FAIL;
}
- emulated = kvmppc_handle_store(run, vcpu, val, bytes,
+ emulated = kvmppc_handle_store(vcpu, val, bytes,
is_default_endian);
if (emulated != EMULATE_DONE)
break;
- vcpu->arch.paddr_accessed += run->mmio.len;
+ vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
vcpu->arch.mmio_vmx_copy_nums--;
vcpu->arch.mmio_vmx_offset++;
}
@@ -1635,19 +1672,19 @@ int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
-static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
enum emulation_result emulated = EMULATE_FAIL;
int r;
vcpu->arch.paddr_accessed += run->mmio.len;
if (!vcpu->mmio_is_write) {
- emulated = kvmppc_handle_vmx_load(run, vcpu,
+ emulated = kvmppc_handle_vmx_load(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
} else {
- emulated = kvmppc_handle_vmx_store(run, vcpu,
+ emulated = kvmppc_handle_vmx_store(vcpu,
vcpu->arch.io_gpr, run->mmio.len, 1);
}
@@ -1767,8 +1804,9 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
return r;
}
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
+ struct kvm_run *run = vcpu->run;
int r;
vcpu_load(vcpu);
@@ -1776,7 +1814,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (vcpu->mmio_needed) {
vcpu->mmio_needed = 0;
if (!vcpu->mmio_is_write)
- kvmppc_complete_mmio_load(vcpu, run);
+ kvmppc_complete_mmio_load(vcpu);
#ifdef CONFIG_VSX
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
vcpu->arch.mmio_vsx_copy_nums--;
@@ -1784,7 +1822,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
if (vcpu->arch.mmio_vsx_copy_nums > 0) {
- r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
+ r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
if (r == RESUME_HOST) {
vcpu->mmio_needed = 1;
goto out;
@@ -1798,7 +1836,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
if (vcpu->arch.mmio_vmx_copy_nums > 0) {
- r = kvmppc_emulate_mmio_vmx_loadstore(vcpu, run);
+ r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
if (r == RESUME_HOST) {
vcpu->mmio_needed = 1;
goto out;
@@ -1831,13 +1869,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (run->immediate_exit)
r = -EINTR;
else
- r = kvmppc_vcpu_run(run, vcpu);
+ r = kvmppc_vcpu_run(vcpu);
kvm_sigset_deactivate(vcpu);
#ifdef CONFIG_ALTIVEC
out:
#endif
+
+ /*
+ * We're already returning to userspace, don't pass the
+ * RESUME_HOST flags along.
+ */
+ if (r > 0)
+ r = 0;
+
vcpu_put(vcpu);
return r;
}
@@ -2037,9 +2083,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
{
struct kvm_enable_cap cap;
r = -EFAULT;
- vcpu_load(vcpu);
if (copy_from_user(&cap, argp, sizeof(cap)))
goto out;
+ vcpu_load(vcpu);
r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
vcpu_put(vcpu);
break;
@@ -2063,9 +2109,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
case KVM_DIRTY_TLB: {
struct kvm_dirty_tlb dirty;
r = -EFAULT;
- vcpu_load(vcpu);
if (copy_from_user(&dirty, argp, sizeof(dirty)))
goto out;
+ vcpu_load(vcpu);
r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
vcpu_put(vcpu);
break;
@@ -2176,6 +2222,20 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = kvm->arch.kvm_ops->enable_nested(kvm);
break;
#endif
+#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
+ case KVM_CAP_PPC_SECURE_GUEST:
+ r = -EINVAL;
+ if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
+ break;
+ r = kvm->arch.kvm_ops->enable_svm(kvm);
+ break;
+ case KVM_CAP_PPC_DAWR1:
+ r = -EINVAL;
+ if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
+ break;
+ r = kvm->arch.kvm_ops->enable_dawr1(kvm);
+ break;
+#endif
default:
r = -EINVAL;
break;
@@ -2436,41 +2496,37 @@ out:
return r;
}
-static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
+static DEFINE_IDA(lpid_inuse);
static unsigned long nr_lpids;
long kvmppc_alloc_lpid(void)
{
- long lpid;
+ int lpid;
- do {
- lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
- if (lpid >= nr_lpids) {
+ /* The host LPID must always be 0 (allocation starts at 1) */
+ lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
+ if (lpid < 0) {
+ if (lpid == -ENOMEM)
+ pr_err("%s: Out of memory\n", __func__);
+ else
pr_err("%s: No LPIDs free\n", __func__);
- return -ENOMEM;
- }
- } while (test_and_set_bit(lpid, lpid_inuse));
+ return -ENOMEM;
+ }
return lpid;
}
EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
-void kvmppc_claim_lpid(long lpid)
-{
- set_bit(lpid, lpid_inuse);
-}
-EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
-
void kvmppc_free_lpid(long lpid)
{
- clear_bit(lpid, lpid_inuse);
+ ida_free(&lpid_inuse, lpid);
}
EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
+/* nr_lpids_param includes the host LPID */
void kvmppc_init_lpid(unsigned long nr_lpids_param)
{
- nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
- memset(lpid_inuse, 0, sizeof(lpid_inuse));
+ nr_lpids = nr_lpids_param;
}
EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
@@ -2480,3 +2536,16 @@ int kvm_arch_init(void *opaque)
}
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
+
+void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
+{
+ if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
+ vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
+}
+
+int kvm_arch_create_vm_debugfs(struct kvm *kvm)
+{
+ if (kvm->arch.kvm_ops->create_vm_debugfs)
+ kvm->arch.kvm_ops->create_vm_debugfs(kvm);
+ return 0;
+}
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index bfe4f106cffc..25071331f8c1 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -204,30 +204,10 @@ static const struct file_operations kvmppc_exit_timing_fops = {
.release = single_release,
};
-void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id)
+int kvmppc_create_vcpu_debugfs_e500(struct kvm_vcpu *vcpu,
+ struct dentry *debugfs_dentry)
{
- static char dbg_fname[50];
- struct dentry *debugfs_file;
-
- snprintf(dbg_fname, sizeof(dbg_fname), "vm%u_vcpu%u_timing",
- current->pid, id);
- debugfs_file = debugfs_create_file(dbg_fname, 0666,
- kvm_debugfs_dir, vcpu,
- &kvmppc_exit_timing_fops);
-
- if (!debugfs_file) {
- printk(KERN_ERR"%s: error creating debugfs file %s\n",
- __func__, dbg_fname);
- return;
- }
-
- vcpu->arch.debugfs_exit_timing = debugfs_file;
-}
-
-void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu)
-{
- if (vcpu->arch.debugfs_exit_timing) {
- debugfs_remove(vcpu->arch.debugfs_exit_timing);
- vcpu->arch.debugfs_exit_timing = NULL;
- }
+ debugfs_create_file("timing", 0666, debugfs_dentry,
+ vcpu, &kvmppc_exit_timing_fops);
+ return 0;
}
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
index ace65f9fed30..45817ab82bb4 100644
--- a/arch/powerpc/kvm/timing.h
+++ b/arch/powerpc/kvm/timing.h
@@ -10,13 +10,12 @@
#define __POWERPC_KVM_EXITTIMING_H__
#include <linux/kvm_host.h>
-#include <asm/kvm_host.h>
#ifdef CONFIG_KVM_EXIT_TIMING
void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu);
void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu);
-void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id);
-void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu);
+int kvmppc_create_vcpu_debugfs_e500(struct kvm_vcpu *vcpu,
+ struct dentry *debugfs_dentry);
static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type)
{
@@ -27,9 +26,11 @@ static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type)
/* if exit timing is not configured there is no need to build the c file */
static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {}
-static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu,
- unsigned int id) {}
-static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {}
+static inline int kvmppc_create_vcpu_debugfs_e500(struct kvm_vcpu *vcpu,
+ struct dentry *debugfs_dentry)
+{
+ return 0;
+}
static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) {}
#endif /* CONFIG_KVM_EXIT_TIMING */
diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S
index 3bf17c854be4..2158f61e317f 100644
--- a/arch/powerpc/kvm/tm.S
+++ b/arch/powerpc/kvm/tm.S
@@ -110,7 +110,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
mtmsrd r2, 1
/* Reload TOC pointer. */
- ld r2, PACATOC(r13)
+ LOAD_PACA_TOC()
/* Save all but r0-r2, r9 & r13 */
reg = 3
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
index 3837842986aa..eff6e82dbcd4 100644
--- a/arch/powerpc/kvm/trace_booke.h
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -69,21 +69,6 @@ TRACE_EVENT(kvm_exit,
)
);
-TRACE_EVENT(kvm_unmap_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("unmap hva 0x%lx\n", __entry->hva)
-);
-
TRACE_EVENT(kvm_booke206_stlb_write,
TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index 8a1e3b0047f1..8d57c8428531 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -89,11 +89,12 @@
{H_CREATE_RPT, "H_CREATE_RPT"}, \
{H_REMOVE_RPT, "H_REMOVE_RPT"}, \
{H_REGISTER_RPAGES, "H_REGISTER_RPAGES"}, \
- {H_DISABLE_AND_GETC, "H_DISABLE_AND_GETC"}, \
+ {H_DISABLE_AND_GET, "H_DISABLE_AND_GET"}, \
{H_ERROR_DATA, "H_ERROR_DATA"}, \
{H_GET_HCA_INFO, "H_GET_HCA_INFO"}, \
{H_GET_PERF_COUNT, "H_GET_PERF_COUNT"}, \
{H_MANAGE_TRACE, "H_MANAGE_TRACE"}, \
+ {H_GET_CPU_CHARACTERISTICS, "H_GET_CPU_CHARACTERISTICS"}, \
{H_FREE_LOGICAL_LAN_BUFFER, "H_FREE_LOGICAL_LAN_BUFFER"}, \
{H_QUERY_INT_STATE, "H_QUERY_INT_STATE"}, \
{H_POLL_PENDING, "H_POLL_PENDING"}, \
@@ -115,6 +116,7 @@
{H_VASI_STATE, "H_VASI_STATE"}, \
{H_ENABLE_CRQ, "H_ENABLE_CRQ"}, \
{H_GET_EM_PARMS, "H_GET_EM_PARMS"}, \
+ {H_GET_ENERGY_SCALE_INFO, "H_GET_ENERGY_SCALE_INFO"}, \
{H_SET_MPP, "H_SET_MPP"}, \
{H_GET_MPP, "H_GET_MPP"}, \
{H_HOME_NODE_ASSOCIATIVITY, "H_HOME_NODE_ASSOCIATIVITY"}, \
@@ -124,7 +126,25 @@
{H_COP, "H_COP"}, \
{H_GET_MPP_X, "H_GET_MPP_X"}, \
{H_SET_MODE, "H_SET_MODE"}, \
- {H_RTAS, "H_RTAS"}
+ {H_REGISTER_PROC_TBL, "H_REGISTER_PROC_TBL"}, \
+ {H_QUERY_VAS_CAPABILITIES, "H_QUERY_VAS_CAPABILITIES"}, \
+ {H_INT_GET_SOURCE_INFO, "H_INT_GET_SOURCE_INFO"}, \
+ {H_INT_SET_SOURCE_CONFIG, "H_INT_SET_SOURCE_CONFIG"}, \
+ {H_INT_GET_QUEUE_INFO, "H_INT_GET_QUEUE_INFO"}, \
+ {H_INT_SET_QUEUE_CONFIG, "H_INT_SET_QUEUE_CONFIG"}, \
+ {H_INT_ESB, "H_INT_ESB"}, \
+ {H_INT_RESET, "H_INT_RESET"}, \
+ {H_RPT_INVALIDATE, "H_RPT_INVALIDATE"}, \
+ {H_RTAS, "H_RTAS"}, \
+ {H_LOGICAL_MEMOP, "H_LOGICAL_MEMOP"}, \
+ {H_CAS, "H_CAS"}, \
+ {H_UPDATE_DT, "H_UPDATE_DT"}, \
+ {H_GET_PERF_COUNTER_INFO, "H_GET_PERF_COUNTER_INFO"}, \
+ {H_SET_PARTITION_TABLE, "H_SET_PARTITION_TABLE"}, \
+ {H_ENTER_NESTED, "H_ENTER_NESTED"}, \
+ {H_TLB_INVALIDATE, "H_TLB_INVALIDATE"}, \
+ {H_COPY_TOFROM_GUEST, "H_COPY_TOFROM_GUEST"}
+
#define kvm_trace_symbol_kvmret \
{RESUME_GUEST, "RESUME_GUEST"}, \
@@ -408,9 +428,9 @@ TRACE_EVENT(kvmppc_run_core,
);
TRACE_EVENT(kvmppc_vcore_blocked,
- TP_PROTO(struct kvmppc_vcore *vc, int where),
+ TP_PROTO(struct kvm_vcpu *vcpu, int where),
- TP_ARGS(vc, where),
+ TP_ARGS(vcpu, where),
TP_STRUCT__entry(
__field(int, n_runnable)
@@ -420,8 +440,8 @@ TRACE_EVENT(kvmppc_vcore_blocked,
),
TP_fast_assign(
- __entry->runner_vcpu = vc->runner->vcpu_id;
- __entry->n_runnable = vc->n_runnable;
+ __entry->runner_vcpu = vcpu->vcpu_id;
+ __entry->n_runnable = vcpu->arch.vcore->n_runnable;
__entry->where = where;
__entry->tgid = current->tgid;
),
@@ -472,9 +492,9 @@ TRACE_EVENT(kvmppc_run_vcpu_enter,
);
TRACE_EVENT(kvmppc_run_vcpu_exit,
- TP_PROTO(struct kvm_vcpu *vcpu, struct kvm_run *run),
+ TP_PROTO(struct kvm_vcpu *vcpu),
- TP_ARGS(vcpu, run),
+ TP_ARGS(vcpu),
TP_STRUCT__entry(
__field(int, vcpu_id)
@@ -484,7 +504,7 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
TP_fast_assign(
__entry->vcpu_id = vcpu->vcpu_id;
- __entry->exit = run->exit_reason;
+ __entry->exit = vcpu->run->exit_reason;
__entry->ret = vcpu->arch.ret;
),
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index b8de3be10eb4..8560c912186d 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -5,19 +5,30 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
+CFLAGS_code-patching.o += -fno-stack-protector
+CFLAGS_feature-fixups.o += -fno-stack-protector
+
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
KASAN_SANITIZE_code-patching.o := n
KASAN_SANITIZE_feature-fixups.o := n
+# restart_table.o contains functions called in the NMI interrupt path
+# which can be in real mode. Disable KASAN.
+KASAN_SANITIZE_restart_table.o := n
ifdef CONFIG_KASAN
CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
endif
+CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+
obj-y += alloc.o code-patching.o feature-fixups.o pmem.o
+obj-$(CONFIG_CODE_PATCHING_SELFTEST) += test-code-patching.o
+
ifndef CONFIG_KASAN
obj-y += string.o memcmp_$(BITS).o
obj-$(CONFIG_PPC32) += strlen_32.o
@@ -31,17 +42,20 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
# 64-bit linker creates .sfpr on demand for final link (vmlinux),
# so it is only needed for modules, and only for older linkers which
# do not support --save-restore-funcs
-ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
+ifeq ($(call ld-ifversion, -lt, 22500, y),y)
extra-$(CONFIG_PPC64) += crtsavres.o
endif
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
- memcpy_power7.o
+ memcpy_power7.o restart_table.o
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
- memcpy_64.o memcpy_mcsafe_64.o
+ memcpy_64.o copy_mc_64.o
+ifndef CONFIG_PPC_QUEUED_SPINLOCKS
obj64-$(CONFIG_SMP) += locks.o
+endif
+
obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o \
test_emulate_step_exec_instr.o
@@ -59,5 +73,7 @@ obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
+# Enable <altivec.h>
+CFLAGS_xor_vmx.o += -isystem $(shell $(CC) -print-file-name=include)
obj-$(CONFIG_PPC64) += $(obj64-y)
diff --git a/arch/powerpc/lib/checksum_32.S b/arch/powerpc/lib/checksum_32.S
index ecd150dc3ed9..4541e8e29467 100644
--- a/arch/powerpc/lib/checksum_32.S
+++ b/arch/powerpc/lib/checksum_32.S
@@ -78,12 +78,10 @@ EXPORT_SYMBOL(__csum_partial)
/*
* Computes the checksum of a memory block at src, length len,
- * and adds in "sum" (32-bit), while copying the block to dst.
- * If an access exception occurs on src or dst, it stores -EFAULT
- * to *src_err or *dst_err respectively, and (for an error on
- * src) zeroes the rest of dst.
+ * and adds in 0xffffffff, while copying the block to dst.
+ * If an access exception occurs it returns zero.
*
- * csum_partial_copy_generic(src, dst, len, sum, src_err, dst_err)
+ * csum_partial_copy_generic(src, dst, len)
*/
#define CSUM_COPY_16_BYTES_WITHEX(n) \
8 ## n ## 0: \
@@ -108,30 +106,24 @@ EXPORT_SYMBOL(__csum_partial)
adde r12,r12,r10
#define CSUM_COPY_16_BYTES_EXCODE(n) \
- EX_TABLE(8 ## n ## 0b, src_error); \
- EX_TABLE(8 ## n ## 1b, src_error); \
- EX_TABLE(8 ## n ## 2b, src_error); \
- EX_TABLE(8 ## n ## 3b, src_error); \
- EX_TABLE(8 ## n ## 4b, dst_error); \
- EX_TABLE(8 ## n ## 5b, dst_error); \
- EX_TABLE(8 ## n ## 6b, dst_error); \
- EX_TABLE(8 ## n ## 7b, dst_error);
+ EX_TABLE(8 ## n ## 0b, fault); \
+ EX_TABLE(8 ## n ## 1b, fault); \
+ EX_TABLE(8 ## n ## 2b, fault); \
+ EX_TABLE(8 ## n ## 3b, fault); \
+ EX_TABLE(8 ## n ## 4b, fault); \
+ EX_TABLE(8 ## n ## 5b, fault); \
+ EX_TABLE(8 ## n ## 6b, fault); \
+ EX_TABLE(8 ## n ## 7b, fault);
.text
- .stabs "arch/powerpc/lib/",N_SO,0,0,0f
- .stabs "checksum_32.S",N_SO,0,0,0f
-0:
CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
_GLOBAL(csum_partial_copy_generic)
- stwu r1,-16(r1)
- stw r7,12(r1)
- stw r8,8(r1)
-
- addic r12,r6,0
+ li r12,-1
+ addic r0,r0,0 /* clear carry */
addi r6,r4,-4
neg r0,r4
addi r4,r3,-4
@@ -241,39 +233,23 @@ _GLOBAL(csum_partial_copy_generic)
slwi r0,r0,8
adde r12,r12,r0
66: addze r3,r12
- addi r1,r1,16
beqlr+ cr7
rlwinm r3,r3,8,0,31 /* odd destination address: rotate one byte */
blr
-/* read fault */
-src_error:
- lwz r7,12(r1)
- addi r1,r1,16
- cmpwi cr0,r7,0
- beqlr
- li r0,-EFAULT
- stw r0,0(r7)
- blr
-/* write fault */
-dst_error:
- lwz r8,8(r1)
- addi r1,r1,16
- cmpwi cr0,r8,0
- beqlr
- li r0,-EFAULT
- stw r0,0(r8)
+fault:
+ li r3,0
blr
- EX_TABLE(70b, src_error);
- EX_TABLE(71b, dst_error);
- EX_TABLE(72b, src_error);
- EX_TABLE(73b, dst_error);
- EX_TABLE(54b, dst_error);
+ EX_TABLE(70b, fault);
+ EX_TABLE(71b, fault);
+ EX_TABLE(72b, fault);
+ EX_TABLE(73b, fault);
+ EX_TABLE(54b, fault);
/*
* this stuff handles faults in the cacheline loop and branches to either
- * src_error (if in read part) or dst_error (if in write part)
+ * fault (if in read part) or fault (if in write part)
*/
CSUM_COPY_16_BYTES_EXCODE(0)
#if L1_CACHE_BYTES >= 32
@@ -290,12 +266,12 @@ dst_error:
#endif
#endif
- EX_TABLE(30b, src_error);
- EX_TABLE(31b, dst_error);
- EX_TABLE(40b, src_error);
- EX_TABLE(41b, dst_error);
- EX_TABLE(50b, src_error);
- EX_TABLE(51b, dst_error);
+ EX_TABLE(30b, fault);
+ EX_TABLE(31b, fault);
+ EX_TABLE(40b, fault);
+ EX_TABLE(41b, fault);
+ EX_TABLE(50b, fault);
+ EX_TABLE(51b, fault);
EXPORT_SYMBOL(csum_partial_copy_generic)
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 514978f908d4..98ff51bd2f7d 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -182,34 +182,33 @@ EXPORT_SYMBOL(__csum_partial)
.macro srcnr
100:
- EX_TABLE(100b,.Lsrc_error_nr)
+ EX_TABLE(100b,.Lerror_nr)
.endm
.macro source
150:
- EX_TABLE(150b,.Lsrc_error)
+ EX_TABLE(150b,.Lerror)
.endm
.macro dstnr
200:
- EX_TABLE(200b,.Ldest_error_nr)
+ EX_TABLE(200b,.Lerror_nr)
.endm
.macro dest
250:
- EX_TABLE(250b,.Ldest_error)
+ EX_TABLE(250b,.Lerror)
.endm
/*
* Computes the checksum of a memory block at src, length len,
- * and adds in "sum" (32-bit), while copying the block to dst.
- * If an access exception occurs on src or dst, it stores -EFAULT
- * to *src_err or *dst_err respectively. The caller must take any action
- * required in this case (zeroing memory, recalculating partial checksum etc).
+ * and adds in 0xffffffff (32-bit), while copying the block to dst.
+ * If an access exception occurs, it returns 0.
*
- * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err)
+ * csum_partial_copy_generic(r3=src, r4=dst, r5=len)
*/
_GLOBAL(csum_partial_copy_generic)
+ li r6,-1
addic r0,r6,0 /* clear carry */
srdi. r6,r5,3 /* less than 8 bytes? */
@@ -401,29 +400,15 @@ dstnr; stb r6,0(r4)
srdi r3,r3,32
blr
-.Lsrc_error:
+.Lerror:
ld r14,STK_REG(R14)(r1)
ld r15,STK_REG(R15)(r1)
ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
-.Lsrc_error_nr:
- cmpdi 0,r7,0
- beqlr
- li r6,-EFAULT
- stw r6,0(r7)
+.Lerror_nr:
+ li r3,0
blr
-.Ldest_error:
- ld r14,STK_REG(R14)(r1)
- ld r15,STK_REG(R15)(r1)
- ld r16,STK_REG(R16)(r1)
- addi r1,r1,STACKFRAMESIZE
-.Ldest_error_nr:
- cmpdi 0,r8,0
- beqlr
- li r6,-EFAULT
- stw r6,0(r8)
- blr
EXPORT_SYMBOL(csum_partial_copy_generic)
/*
diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
index fabe4db28726..1a14c8780278 100644
--- a/arch/powerpc/lib/checksum_wrappers.c
+++ b/arch/powerpc/lib/checksum_wrappers.c
@@ -12,83 +12,28 @@
#include <linux/uaccess.h>
__wsum csum_and_copy_from_user(const void __user *src, void *dst,
- int len, __wsum sum, int *err_ptr)
+ int len)
{
- unsigned int csum;
+ __wsum csum;
- might_sleep();
- allow_read_from_user(src, len);
+ if (unlikely(!user_read_access_begin(src, len)))
+ return 0;
- *err_ptr = 0;
+ csum = csum_partial_copy_generic((void __force *)src, dst, len);
- if (!len) {
- csum = 0;
- goto out;
- }
-
- if (unlikely((len < 0) || !access_ok(src, len))) {
- *err_ptr = -EFAULT;
- csum = (__force unsigned int)sum;
- goto out;
- }
-
- csum = csum_partial_copy_generic((void __force *)src, dst,
- len, sum, err_ptr, NULL);
-
- if (unlikely(*err_ptr)) {
- int missing = __copy_from_user(dst, src, len);
-
- if (missing) {
- memset(dst + len - missing, 0, missing);
- *err_ptr = -EFAULT;
- } else {
- *err_ptr = 0;
- }
-
- csum = csum_partial(dst, len, sum);
- }
-
-out:
- prevent_read_from_user(src, len);
- return (__force __wsum)csum;
+ user_read_access_end();
+ return csum;
}
-EXPORT_SYMBOL(csum_and_copy_from_user);
-__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
- __wsum sum, int *err_ptr)
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len)
{
- unsigned int csum;
-
- might_sleep();
- allow_write_to_user(dst, len);
-
- *err_ptr = 0;
-
- if (!len) {
- csum = 0;
- goto out;
- }
-
- if (unlikely((len < 0) || !access_ok(dst, len))) {
- *err_ptr = -EFAULT;
- csum = -1; /* invalid checksum */
- goto out;
- }
-
- csum = csum_partial_copy_generic(src, (void __force *)dst,
- len, sum, NULL, err_ptr);
+ __wsum csum;
- if (unlikely(*err_ptr)) {
- csum = csum_partial(src, len, sum);
+ if (unlikely(!user_write_access_begin(dst, len)))
+ return 0;
- if (copy_to_user(dst, src, len)) {
- *err_ptr = -EFAULT;
- csum = -1; /* invalid checksum */
- }
- }
+ csum = csum_partial_copy_generic(src, (void __force *)dst, len);
-out:
- prevent_write_to_user(dst, len);
- return (__force __wsum)csum;
+ user_write_access_end();
+ return csum;
}
-EXPORT_SYMBOL(csum_and_copy_to_user);
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 3345f039a876..ad0cf3108dd0 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -3,37 +3,40 @@
* Copyright 2008 Michael Ellerman, IBM Corporation.
*/
-#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
-#include <linux/mm.h>
#include <linux/cpuhotplug.h>
-#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/jump_label.h>
-#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/code-patching.h>
-#include <asm/setup.h>
+#include <asm/inst.h>
-static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
- unsigned int *patch_addr)
+static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr)
{
- int err = 0;
+ if (!ppc_inst_prefixed(instr)) {
+ u32 val = ppc_inst_val(instr);
- __put_user_asm(instr, patch_addr, err, "stw");
- if (err)
- return err;
+ __put_kernel_nofault(patch_addr, &val, u32, failed);
+ } else {
+ u64 val = ppc_inst_as_ulong(instr);
+
+ __put_kernel_nofault(patch_addr, &val, u64, failed);
+ }
asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr),
"r" (exec_addr));
return 0;
+
+failed:
+ return -EPERM;
}
-int raw_patch_instruction(unsigned int *addr, unsigned int instr)
+int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
{
return __patch_instruction(addr, instr, addr);
}
@@ -41,9 +44,14 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr)
#ifdef CONFIG_STRICT_KERNEL_RWX
static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
+static int map_patch_area(void *addr, unsigned long text_poke_addr);
+static void unmap_patch_area(unsigned long addr);
+
static int text_area_cpu_up(unsigned int cpu)
{
struct vm_struct *area;
+ unsigned long addr;
+ int err;
area = get_vm_area(PAGE_SIZE, VM_ALLOC);
if (!area) {
@@ -51,6 +59,15 @@ static int text_area_cpu_up(unsigned int cpu)
cpu);
return -1;
}
+
+ // Map/unmap the area to ensure all page tables are pre-allocated
+ addr = (unsigned long)area->addr;
+ err = map_patch_area(empty_zero_page, addr);
+ if (err)
+ return err;
+
+ unmap_patch_area(addr);
+
this_cpu_write(text_poke_area, area);
return 0;
@@ -62,177 +79,161 @@ static int text_area_cpu_down(unsigned int cpu)
return 0;
}
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done);
+
/*
- * Run as a late init call. This allows all the boot time patching to be done
- * simply by patching the code, and then we're called here prior to
- * mark_rodata_ro(), which happens after all init calls are run. Although
- * BUG_ON() is rude, in this case it should only happen if ENOMEM, and we judge
- * it as being preferable to a kernel that will crash later when someone tries
- * to use patch_instruction().
+ * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
+ * we judge it as being preferable to a kernel that will crash later when
+ * someone tries to use patch_instruction().
*/
-static int __init setup_text_poke_area(void)
+void __init poking_init(void)
{
BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"powerpc/text_poke:online", text_area_cpu_up,
text_area_cpu_down));
+ static_branch_enable(&poking_init_done);
+}
- return 0;
+static unsigned long get_patch_pfn(void *addr)
+{
+ if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr))
+ return vmalloc_to_pfn(addr);
+ else
+ return __pa_symbol(addr) >> PAGE_SHIFT;
}
-late_initcall(setup_text_poke_area);
/*
* This can be called for kernel text or a module.
*/
static int map_patch_area(void *addr, unsigned long text_poke_addr)
{
- unsigned long pfn;
- int err;
-
- if (is_vmalloc_addr(addr))
- pfn = vmalloc_to_pfn(addr);
- else
- pfn = __pa_symbol(addr) >> PAGE_SHIFT;
-
- err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
-
- pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err);
- if (err)
- return -1;
+ unsigned long pfn = get_patch_pfn(addr);
- return 0;
+ return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
}
-static inline int unmap_patch_area(unsigned long addr)
+static void unmap_patch_area(unsigned long addr)
{
pte_t *ptep;
pmd_t *pmdp;
pud_t *pudp;
+ p4d_t *p4dp;
pgd_t *pgdp;
pgdp = pgd_offset_k(addr);
- if (unlikely(!pgdp))
- return -EINVAL;
+ if (WARN_ON(pgd_none(*pgdp)))
+ return;
+
+ p4dp = p4d_offset(pgdp, addr);
+ if (WARN_ON(p4d_none(*p4dp)))
+ return;
- pudp = pud_offset(pgdp, addr);
- if (unlikely(!pudp))
- return -EINVAL;
+ pudp = pud_offset(p4dp, addr);
+ if (WARN_ON(pud_none(*pudp)))
+ return;
pmdp = pmd_offset(pudp, addr);
- if (unlikely(!pmdp))
- return -EINVAL;
+ if (WARN_ON(pmd_none(*pmdp)))
+ return;
ptep = pte_offset_kernel(pmdp, addr);
- if (unlikely(!ptep))
- return -EINVAL;
-
- pr_devel("clearing mm %p, pte %p, addr %lx\n", &init_mm, ptep, addr);
+ if (WARN_ON(pte_none(*ptep)))
+ return;
/*
* In hash, pte_clear flushes the tlb, in radix, we have to
*/
pte_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+}
- return 0;
+static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
+{
+ int err;
+ u32 *patch_addr;
+ unsigned long text_poke_addr;
+ pte_t *pte;
+ unsigned long pfn = get_patch_pfn(addr);
+
+ text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr & PAGE_MASK;
+ patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+ pte = virt_to_kpte(text_poke_addr);
+ __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
+ /* See ptesync comment in radix__set_pte_at() */
+ if (radix_enabled())
+ asm volatile("ptesync": : :"memory");
+
+ err = __patch_instruction(addr, instr, patch_addr);
+
+ pte_clear(&init_mm, text_poke_addr, pte);
+ flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
+
+ return err;
}
-static int do_patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
{
int err;
- unsigned int *patch_addr = NULL;
unsigned long flags;
- unsigned long text_poke_addr;
- unsigned long kaddr = (unsigned long)addr;
/*
* During early early boot patch_instruction is called
* when text_poke_area is not ready, but we still need
* to allow patching. We just do the plain old patching
*/
- if (!this_cpu_read(text_poke_area))
+ if (!static_branch_likely(&poking_init_done))
return raw_patch_instruction(addr, instr);
local_irq_save(flags);
-
- text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr;
- if (map_patch_area(addr, text_poke_addr)) {
- err = -1;
- goto out;
- }
-
- patch_addr = (unsigned int *)(text_poke_addr) +
- ((kaddr & ~PAGE_MASK) / sizeof(unsigned int));
-
- __patch_instruction(addr, instr, patch_addr);
-
- err = unmap_patch_area(text_poke_addr);
- if (err)
- pr_warn("failed to unmap %lx\n", text_poke_addr);
-
-out:
+ err = __do_patch_instruction(addr, instr);
local_irq_restore(flags);
return err;
}
#else /* !CONFIG_STRICT_KERNEL_RWX */
-static int do_patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
{
return raw_patch_instruction(addr, instr);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
-int patch_instruction(unsigned int *addr, unsigned int instr)
+__ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free);
+
+int patch_instruction(u32 *addr, ppc_inst_t instr)
{
/* Make sure we aren't patching a freed init section */
- if (init_mem_is_free && init_section_contains(addr, 4)) {
- pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+ if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4))
return 0;
- }
+
return do_patch_instruction(addr, instr);
}
NOKPROBE_SYMBOL(patch_instruction);
-int patch_branch(unsigned int *addr, unsigned long target, int flags)
+int patch_branch(u32 *addr, unsigned long target, int flags)
{
- return patch_instruction(addr, create_branch(addr, target, flags));
-}
+ ppc_inst_t instr;
-bool is_offset_in_branch_range(long offset)
-{
- /*
- * Powerpc branch instruction is :
- *
- * 0 6 30 31
- * +---------+----------------+---+---+
- * | opcode | LI |AA |LK |
- * +---------+----------------+---+---+
- * Where AA = 0 and LK = 0
- *
- * LI is a signed 24 bits integer. The real branch offset is computed
- * by: imm32 = SignExtend(LI:'0b00', 32);
- *
- * So the maximum forward branch should be:
- * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
- * The maximum backward branch should be:
- * (0xff800000 << 2) = 0xfe000000 = -0x2000000
- */
- return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
+ if (create_branch(&instr, addr, target, flags))
+ return -ERANGE;
+
+ return patch_instruction(addr, instr);
}
/*
* Helper to check if a given instruction is a conditional branch
* Derived from the conditional checks in analyse_instr()
*/
-bool is_conditional_branch(unsigned int instr)
+bool is_conditional_branch(ppc_inst_t instr)
{
- unsigned int opcode = instr >> 26;
+ unsigned int opcode = ppc_inst_primary_opcode(instr);
if (opcode == 16) /* bc, bca, bcl, bcla */
return true;
if (opcode == 19) {
- switch ((instr >> 1) & 0x3ff) {
+ switch ((ppc_inst_val(instr) >> 1) & 0x3ff) {
case 16: /* bclr, bclrl */
case 528: /* bcctr, bcctrl */
case 560: /* bctar, bctarl */
@@ -243,10 +244,9 @@ bool is_conditional_branch(unsigned int instr)
}
NOKPROBE_SYMBOL(is_conditional_branch);
-unsigned int create_branch(const unsigned int *addr,
- unsigned long target, int flags)
+int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
+ unsigned long target, int flags)
{
- unsigned int instruction;
long offset;
offset = target;
@@ -254,433 +254,81 @@ unsigned int create_branch(const unsigned int *addr,
offset = offset - (unsigned long)addr;
/* Check we can represent the target in the instruction format */
- if (!is_offset_in_branch_range(offset))
- return 0;
+ if (!is_offset_in_cond_branch_range(offset))
+ return 1;
/* Mask out the flags and target, so they don't step on each other. */
- instruction = 0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC);
+ *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC));
- return instruction;
-}
-
-unsigned int create_cond_branch(const unsigned int *addr,
- unsigned long target, int flags)
-{
- unsigned int instruction;
- long offset;
-
- offset = target;
- if (! (flags & BRANCH_ABSOLUTE))
- offset = offset - (unsigned long)addr;
-
- /* Check we can represent the target in the instruction format */
- if (offset < -0x8000 || offset > 0x7FFF || offset & 0x3)
- return 0;
-
- /* Mask out the flags and target, so they don't step on each other. */
- instruction = 0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC);
-
- return instruction;
-}
-
-static unsigned int branch_opcode(unsigned int instr)
-{
- return (instr >> 26) & 0x3F;
-}
-
-static int instr_is_branch_iform(unsigned int instr)
-{
- return branch_opcode(instr) == 18;
-}
-
-static int instr_is_branch_bform(unsigned int instr)
-{
- return branch_opcode(instr) == 16;
+ return 0;
}
-int instr_is_relative_branch(unsigned int instr)
+int instr_is_relative_branch(ppc_inst_t instr)
{
- if (instr & BRANCH_ABSOLUTE)
+ if (ppc_inst_val(instr) & BRANCH_ABSOLUTE)
return 0;
return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
}
-int instr_is_relative_link_branch(unsigned int instr)
+int instr_is_relative_link_branch(ppc_inst_t instr)
{
- return instr_is_relative_branch(instr) && (instr & BRANCH_SET_LINK);
+ return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
}
-static unsigned long branch_iform_target(const unsigned int *instr)
+static unsigned long branch_iform_target(const u32 *instr)
{
signed long imm;
- imm = *instr & 0x3FFFFFC;
+ imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x2000000)
imm -= 0x4000000;
- if ((*instr & BRANCH_ABSOLUTE) == 0)
+ if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
-static unsigned long branch_bform_target(const unsigned int *instr)
+static unsigned long branch_bform_target(const u32 *instr)
{
signed long imm;
- imm = *instr & 0xFFFC;
+ imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC;
/* If the top bit of the immediate value is set this is negative */
if (imm & 0x8000)
imm -= 0x10000;
- if ((*instr & BRANCH_ABSOLUTE) == 0)
+ if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
imm += (unsigned long)instr;
return (unsigned long)imm;
}
-unsigned long branch_target(const unsigned int *instr)
+unsigned long branch_target(const u32 *instr)
{
- if (instr_is_branch_iform(*instr))
+ if (instr_is_branch_iform(ppc_inst_read(instr)))
return branch_iform_target(instr);
- else if (instr_is_branch_bform(*instr))
+ else if (instr_is_branch_bform(ppc_inst_read(instr)))
return branch_bform_target(instr);
return 0;
}
-int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr)
-{
- if (instr_is_branch_iform(*instr) || instr_is_branch_bform(*instr))
- return branch_target(instr) == addr;
-
- return 0;
-}
-
-unsigned int translate_branch(const unsigned int *dest, const unsigned int *src)
+int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src)
{
unsigned long target;
-
target = branch_target(src);
- if (instr_is_branch_iform(*src))
- return create_branch(dest, target, *src);
- else if (instr_is_branch_bform(*src))
- return create_cond_branch(dest, target, *src);
-
- return 0;
-}
-
-#ifdef CONFIG_PPC_BOOK3E_64
-void __patch_exception(int exc, unsigned long addr)
-{
- extern unsigned int interrupt_base_book3e;
- unsigned int *ibase = &interrupt_base_book3e;
-
- /* Our exceptions vectors start with a NOP and -then- a branch
- * to deal with single stepping from userspace which stops on
- * the second instruction. Thus we need to patch the second
- * instruction of the exception, not the first one
- */
-
- patch_branch(ibase + (exc / 4) + 1, addr, 0);
-}
-#endif
-
-#ifdef CONFIG_CODE_PATCHING_SELFTEST
-
-static void __init test_trampoline(void)
-{
- asm ("nop;\n");
-}
-
-#define check(x) \
- if (!(x)) printk("code-patching: test failed at line %d\n", __LINE__);
-
-static void __init test_branch_iform(void)
-{
- unsigned int instr;
- unsigned long addr;
-
- addr = (unsigned long)&instr;
-
- /* The simplest case, branch to self, no flags */
- check(instr_is_branch_iform(0x48000000));
- /* All bits of target set, and flags */
- check(instr_is_branch_iform(0x4bffffff));
- /* High bit of opcode set, which is wrong */
- check(!instr_is_branch_iform(0xcbffffff));
- /* Middle bits of opcode set, which is wrong */
- check(!instr_is_branch_iform(0x7bffffff));
-
- /* Simplest case, branch to self with link */
- check(instr_is_branch_iform(0x48000001));
- /* All bits of targets set */
- check(instr_is_branch_iform(0x4bfffffd));
- /* Some bits of targets set */
- check(instr_is_branch_iform(0x4bff00fd));
- /* Must be a valid branch to start with */
- check(!instr_is_branch_iform(0x7bfffffd));
-
- /* Absolute branch to 0x100 */
- instr = 0x48000103;
- check(instr_is_branch_to_addr(&instr, 0x100));
- /* Absolute branch to 0x420fc */
- instr = 0x480420ff;
- check(instr_is_branch_to_addr(&instr, 0x420fc));
- /* Maximum positive relative branch, + 20MB - 4B */
- instr = 0x49fffffc;
- check(instr_is_branch_to_addr(&instr, addr + 0x1FFFFFC));
- /* Smallest negative relative branch, - 4B */
- instr = 0x4bfffffc;
- check(instr_is_branch_to_addr(&instr, addr - 4));
- /* Largest negative relative branch, - 32 MB */
- instr = 0x4a000000;
- check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
-
- /* Branch to self, with link */
- instr = create_branch(&instr, addr, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr));
-
- /* Branch to self - 0x100, with link */
- instr = create_branch(&instr, addr - 0x100, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr - 0x100));
-
- /* Branch to self + 0x100, no link */
- instr = create_branch(&instr, addr + 0x100, 0);
- check(instr_is_branch_to_addr(&instr, addr + 0x100));
-
- /* Maximum relative negative offset, - 32 MB */
- instr = create_branch(&instr, addr - 0x2000000, BRANCH_SET_LINK);
- check(instr_is_branch_to_addr(&instr, addr - 0x2000000));
-
- /* Out of range relative negative offset, - 32 MB + 4*/
- instr = create_branch(&instr, addr - 0x2000004, BRANCH_SET_LINK);
- check(instr == 0);
-
- /* Out of range relative positive offset, + 32 MB */
- instr = create_branch(&instr, addr + 0x2000000, BRANCH_SET_LINK);
- check(instr == 0);
-
- /* Unaligned target */
- instr = create_branch(&instr, addr + 3, BRANCH_SET_LINK);
- check(instr == 0);
-
- /* Check flags are masked correctly */
- instr = create_branch(&instr, addr, 0xFFFFFFFC);
- check(instr_is_branch_to_addr(&instr, addr));
- check(instr == 0x48000000);
-}
-
-static void __init test_create_function_call(void)
-{
- unsigned int *iptr;
- unsigned long dest;
-
- /* Check we can create a function call */
- iptr = (unsigned int *)ppc_function_entry(test_trampoline);
- dest = ppc_function_entry(test_create_function_call);
- patch_instruction(iptr, create_branch(iptr, dest, BRANCH_SET_LINK));
- check(instr_is_branch_to_addr(iptr, dest));
-}
+ if (instr_is_branch_iform(ppc_inst_read(src)))
+ return create_branch(instr, dest, target,
+ ppc_inst_val(ppc_inst_read(src)));
+ else if (instr_is_branch_bform(ppc_inst_read(src)))
+ return create_cond_branch(instr, dest, target,
+ ppc_inst_val(ppc_inst_read(src)));
-static void __init test_branch_bform(void)
-{
- unsigned long addr;
- unsigned int *iptr, instr, flags;
-
- iptr = &instr;
- addr = (unsigned long)iptr;
-
- /* The simplest case, branch to self, no flags */
- check(instr_is_branch_bform(0x40000000));
- /* All bits of target set, and flags */
- check(instr_is_branch_bform(0x43ffffff));
- /* High bit of opcode set, which is wrong */
- check(!instr_is_branch_bform(0xc3ffffff));
- /* Middle bits of opcode set, which is wrong */
- check(!instr_is_branch_bform(0x7bffffff));
-
- /* Absolute conditional branch to 0x100 */
- instr = 0x43ff0103;
- check(instr_is_branch_to_addr(&instr, 0x100));
- /* Absolute conditional branch to 0x20fc */
- instr = 0x43ff20ff;
- check(instr_is_branch_to_addr(&instr, 0x20fc));
- /* Maximum positive relative conditional branch, + 32 KB - 4B */
- instr = 0x43ff7ffc;
- check(instr_is_branch_to_addr(&instr, addr + 0x7FFC));
- /* Smallest negative relative conditional branch, - 4B */
- instr = 0x43fffffc;
- check(instr_is_branch_to_addr(&instr, addr - 4));
- /* Largest negative relative conditional branch, - 32 KB */
- instr = 0x43ff8000;
- check(instr_is_branch_to_addr(&instr, addr - 0x8000));
-
- /* All condition code bits set & link */
- flags = 0x3ff000 | BRANCH_SET_LINK;
-
- /* Branch to self */
- instr = create_cond_branch(iptr, addr, flags);
- check(instr_is_branch_to_addr(&instr, addr));
-
- /* Branch to self - 0x100 */
- instr = create_cond_branch(iptr, addr - 0x100, flags);
- check(instr_is_branch_to_addr(&instr, addr - 0x100));
-
- /* Branch to self + 0x100 */
- instr = create_cond_branch(iptr, addr + 0x100, flags);
- check(instr_is_branch_to_addr(&instr, addr + 0x100));
-
- /* Maximum relative negative offset, - 32 KB */
- instr = create_cond_branch(iptr, addr - 0x8000, flags);
- check(instr_is_branch_to_addr(&instr, addr - 0x8000));
-
- /* Out of range relative negative offset, - 32 KB + 4*/
- instr = create_cond_branch(iptr, addr - 0x8004, flags);
- check(instr == 0);
-
- /* Out of range relative positive offset, + 32 KB */
- instr = create_cond_branch(iptr, addr + 0x8000, flags);
- check(instr == 0);
-
- /* Unaligned target */
- instr = create_cond_branch(iptr, addr + 3, flags);
- check(instr == 0);
-
- /* Check flags are masked correctly */
- instr = create_cond_branch(iptr, addr, 0xFFFFFFFC);
- check(instr_is_branch_to_addr(&instr, addr));
- check(instr == 0x43FF0000);
-}
-
-static void __init test_translate_branch(void)
-{
- unsigned long addr;
- unsigned int *p, *q;
- void *buf;
-
- buf = vmalloc(PAGE_ALIGN(0x2000000 + 1));
- check(buf);
- if (!buf)
- return;
-
- /* Simple case, branch to self moved a little */
- p = buf;
- addr = (unsigned long)p;
- patch_branch(p, addr, 0);
- check(instr_is_branch_to_addr(p, addr));
- q = p + 1;
- patch_instruction(q, translate_branch(q, p));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Maximum negative case, move b . to addr + 32 MB */
- p = buf;
- addr = (unsigned long)p;
- patch_branch(p, addr, 0);
- q = buf + 0x2000000;
- patch_instruction(q, translate_branch(q, p));
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(*q == 0x4a000000);
-
- /* Maximum positive case, move x to x - 32 MB + 4 */
- p = buf + 0x2000000;
- addr = (unsigned long)p;
- patch_branch(p, addr, 0);
- q = buf + 4;
- patch_instruction(q, translate_branch(q, p));
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(*q == 0x49fffffc);
-
- /* Jump to x + 16 MB moved to x + 20 MB */
- p = buf;
- addr = 0x1000000 + (unsigned long)buf;
- patch_branch(p, addr, BRANCH_SET_LINK);
- q = buf + 0x1400000;
- patch_instruction(q, translate_branch(q, p));
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Jump to x + 16 MB moved to x - 16 MB + 4 */
- p = buf + 0x1000000;
- addr = 0x2000000 + (unsigned long)buf;
- patch_branch(p, addr, 0);
- q = buf + 4;
- patch_instruction(q, translate_branch(q, p));
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
-
- /* Conditional branch tests */
-
- /* Simple case, branch to self moved a little */
- p = buf;
- addr = (unsigned long)p;
- patch_instruction(p, create_cond_branch(p, addr, 0));
- check(instr_is_branch_to_addr(p, addr));
- q = p + 1;
- patch_instruction(q, translate_branch(q, p));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Maximum negative case, move b . to addr + 32 KB */
- p = buf;
- addr = (unsigned long)p;
- patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC));
- q = buf + 0x8000;
- patch_instruction(q, translate_branch(q, p));
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(*q == 0x43ff8000);
-
- /* Maximum positive case, move x to x - 32 KB + 4 */
- p = buf + 0x8000;
- addr = (unsigned long)p;
- patch_instruction(p, create_cond_branch(p, addr, 0xFFFFFFFC));
- q = buf + 4;
- patch_instruction(q, translate_branch(q, p));
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(*q == 0x43ff7ffc);
-
- /* Jump to x + 12 KB moved to x + 20 KB */
- p = buf;
- addr = 0x3000 + (unsigned long)buf;
- patch_instruction(p, create_cond_branch(p, addr, BRANCH_SET_LINK));
- q = buf + 0x5000;
- patch_instruction(q, translate_branch(q, p));
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Jump to x + 8 KB moved to x - 8 KB + 4 */
- p = buf + 0x2000;
- addr = 0x4000 + (unsigned long)buf;
- patch_instruction(p, create_cond_branch(p, addr, 0));
- q = buf + 4;
- patch_instruction(q, translate_branch(q, p));
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Free the buffer we were using */
- vfree(buf);
-}
-
-static int __init test_code_patching(void)
-{
- printk(KERN_DEBUG "Running code patching self-tests ...\n");
-
- test_branch_iform();
- test_branch_bform();
- test_create_function_call();
- test_translate_branch();
-
- return 0;
+ return 1;
}
-late_initcall(test_code_patching);
-
-#endif /* CONFIG_CODE_PATCHING_SELFTEST */
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index a3bcf4786e4a..3e9c27c46331 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -57,9 +57,6 @@
EX_TABLE(8 ## n ## 7b,9 ## n ## 1b)
.text
- .stabs "arch/powerpc/lib/",N_SO,0,0,0f
- .stabs "copy_32.S",N_SO,0,0,0f
-0:
CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
diff --git a/arch/powerpc/lib/memcpy_mcsafe_64.S b/arch/powerpc/lib/copy_mc_64.S
index cb882d9a6d8a..88d46c471493 100644
--- a/arch/powerpc/lib/memcpy_mcsafe_64.S
+++ b/arch/powerpc/lib/copy_mc_64.S
@@ -50,7 +50,7 @@ err3; stb r0,0(r3)
blr
-_GLOBAL(memcpy_mcsafe)
+_GLOBAL(copy_mc_generic)
mr r7,r5
cmpldi r5,16
blt .Lshort_copy
@@ -239,4 +239,4 @@ err1; stb r0,0(r3)
15: li r3,0
blr
-EXPORT_SYMBOL_GPL(memcpy_mcsafe);
+EXPORT_SYMBOL_GPL(copy_mc_generic);
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index d1091b5ee5da..6812cb19d04a 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -9,11 +9,6 @@
#include <asm/export.h>
#include <asm/feature-fixups.h>
- .section ".toc","aw"
-PPC64_CACHES:
- .tc ppc64_caches[TC],ppc64_caches
- .section ".text"
-
_GLOBAL_TOC(copy_page)
BEGIN_FTR_SECTION
lis r5,PAGE_SIZE@h
@@ -24,7 +19,7 @@ FTR_SECTION_ELSE
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
ori r5,r5,PAGE_SIZE@l
BEGIN_FTR_SECTION
- ld r10,PPC64_CACHES@toc(r2)
+ LOAD_REG_ADDR(r10, ppc64_caches)
lwz r11,DCACHEL1LOGBLOCKSIZE(r10) /* log2 of cache block size */
lwz r12,DCACHEL1BLOCKSIZE(r10) /* get cache block size */
li r9,0
diff --git a/arch/powerpc/lib/error-inject.c b/arch/powerpc/lib/error-inject.c
index 407b992fb02f..e834079d2b5c 100644
--- a/arch/powerpc/lib/error-inject.c
+++ b/arch/powerpc/lib/error-inject.c
@@ -11,6 +11,6 @@ void override_function_with_return(struct pt_regs *regs)
* function in the kernel/module, captured on a kprobe. We don't need
* to worry about 32-bit userspace on a 64-bit kernel.
*/
- regs->nip = regs->link;
+ regs_set_return_ip(regs, regs->link);
}
NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S
index b12168c2447a..480172fbd024 100644
--- a/arch/powerpc/lib/feature-fixups-test.S
+++ b/arch/powerpc/lib/feature-fixups-test.S
@@ -7,6 +7,7 @@
#include <asm/ppc_asm.h>
#include <asm/synch.h>
#include <asm/asm-compat.h>
+#include <asm/ppc-opcode.h>
.text
@@ -791,3 +792,71 @@ globl(lwsync_fixup_test_expected_SYNC)
1: or 1,1,1
sync
+globl(ftr_fixup_prefix1)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+globl(end_ftr_fixup_prefix1)
+
+globl(ftr_fixup_prefix1_orig)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+
+globl(ftr_fixup_prefix1_expected)
+ or 1,1,1
+ nop
+ nop
+ or 2,2,2
+
+globl(ftr_fixup_prefix2)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+globl(end_ftr_fixup_prefix2)
+
+globl(ftr_fixup_prefix2_orig)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+
+globl(ftr_fixup_prefix2_alt)
+ .long OP_PREFIX << 26
+ .long 0x0000001
+
+globl(ftr_fixup_prefix2_expected)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000001
+ or 2,2,2
+
+globl(ftr_fixup_prefix3)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+ or 3,3,3
+globl(end_ftr_fixup_prefix3)
+
+globl(ftr_fixup_prefix3_orig)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000000
+ or 2,2,2
+ or 3,3,3
+
+globl(ftr_fixup_prefix3_alt)
+ .long OP_PREFIX << 26
+ .long 0x0000001
+ nop
+
+globl(ftr_fixup_prefix3_expected)
+ or 1,1,1
+ .long OP_PREFIX << 26
+ .long 0x0000001
+ nop
+ or 3,3,3
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 4ba634b89ce5..31f40f544de5 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -14,13 +14,16 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
+#include <linux/stop_machine.h>
#include <asm/cputable.h>
#include <asm/code-patching.h>
+#include <asm/interrupt.h>
#include <asm/page.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/security_features.h>
#include <asm/firmware.h>
+#include <asm/inst.h>
struct fixup_entry {
unsigned long mask;
@@ -31,30 +34,30 @@ struct fixup_entry {
long alt_end_off;
};
-static unsigned int *calc_addr(struct fixup_entry *fcur, long offset)
+static u32 *calc_addr(struct fixup_entry *fcur, long offset)
{
/*
* We store the offset to the code as a negative offset from
* the start of the alt_entry, to support the VDSO. This
* routine converts that back into an actual address.
*/
- return (unsigned int *)((unsigned long)fcur + offset);
+ return (u32 *)((unsigned long)fcur + offset);
}
-static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
- unsigned int *alt_start, unsigned int *alt_end)
+static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end)
{
- unsigned int instr;
+ int err;
+ ppc_inst_t instr;
- instr = *src;
+ instr = ppc_inst_read(src);
- if (instr_is_relative_branch(*src)) {
- unsigned int *target = (unsigned int *)branch_target(src);
+ if (instr_is_relative_branch(ppc_inst_read(src))) {
+ u32 *target = (u32 *)branch_target(src);
/* Branch within the section doesn't need translating */
if (target < alt_start || target > alt_end) {
- instr = translate_branch(dest, src);
- if (!instr)
+ err = translate_branch(&instr, dest, src);
+ if (err)
return 1;
}
}
@@ -66,7 +69,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
{
- unsigned int *start, *end, *alt_start, *alt_end, *src, *dest;
+ u32 *start, *end, *alt_start, *alt_end, *src, *dest;
start = calc_addr(fcur, fcur->start_off);
end = calc_addr(fcur, fcur->end_off);
@@ -82,13 +85,14 @@ static int patch_feature_section(unsigned long value, struct fixup_entry *fcur)
src = alt_start;
dest = start;
- for (; src < alt_end; src++, dest++) {
+ for (; src < alt_end; src = ppc_inst_next(src, src),
+ dest = ppc_inst_next(dest, dest)) {
if (patch_alt_instruction(src, dest, alt_start, alt_end))
return 1;
}
for (; dest < end; dest++)
- raw_patch_instruction(dest, PPC_INST_NOP);
+ raw_patch_instruction(dest, ppc_inst(PPC_RAW_NOP()));
return 0;
}
@@ -120,24 +124,24 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
long *start, *end;
int i;
- start = PTRRELOC(&__start___stf_entry_barrier_fixup),
+ start = PTRRELOC(&__start___stf_entry_barrier_fixup);
end = PTRRELOC(&__stop___stf_entry_barrier_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
i = 0;
if (types & STF_BARRIER_FALLBACK) {
- instrs[i++] = 0x7d4802a6; /* mflr r10 */
- instrs[i++] = 0x60000000; /* branch patched below */
- instrs[i++] = 0x7d4803a6; /* mtlr r10 */
+ instrs[i++] = PPC_RAW_MFLR(_R10);
+ instrs[i++] = PPC_RAW_NOP(); /* branch patched below */
+ instrs[i++] = PPC_RAW_MTLR(_R10);
} else if (types & STF_BARRIER_EIEIO) {
- instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
+ instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
} else if (types & STF_BARRIER_SYNC_ORI) {
- instrs[i++] = 0x7c0004ac; /* hwsync */
- instrs[i++] = 0xe94d0000; /* ld r10,0(r13) */
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instrs[i++] = PPC_RAW_SYNC();
+ instrs[i++] = PPC_RAW_LD(_R10, _R13, 0);
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
}
for (i = 0; start < end; start++, i++) {
@@ -145,15 +149,17 @@ static void do_stf_entry_barrier_fixups(enum stf_barrier_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, instrs[0]);
-
- if (types & STF_BARRIER_FALLBACK)
- patch_branch(dest + 1, (unsigned long)&stf_barrier_fallback,
- BRANCH_SET_LINK);
- else
- patch_instruction(dest + 1, instrs[1]);
-
- patch_instruction(dest + 2, instrs[2]);
+ // See comment in do_entry_flush_fixups() RE order of patching
+ if (types & STF_BARRIER_FALLBACK) {
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
+ (unsigned long)&stf_barrier_fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ }
}
printk(KERN_DEBUG "stf-barrier: patched %d entry locations (%s barrier)\n", i,
@@ -170,35 +176,34 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
long *start, *end;
int i;
- start = PTRRELOC(&__start___stf_exit_barrier_fixup),
+ start = PTRRELOC(&__start___stf_exit_barrier_fixup);
end = PTRRELOC(&__stop___stf_exit_barrier_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
- instrs[3] = 0x60000000; /* nop */
- instrs[4] = 0x60000000; /* nop */
- instrs[5] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
+ instrs[3] = PPC_RAW_NOP();
+ instrs[4] = PPC_RAW_NOP();
+ instrs[5] = PPC_RAW_NOP();
i = 0;
if (types & STF_BARRIER_FALLBACK || types & STF_BARRIER_SYNC_ORI) {
if (cpu_has_feature(CPU_FTR_HVMODE)) {
- instrs[i++] = 0x7db14ba6; /* mtspr 0x131, r13 (HSPRG1) */
- instrs[i++] = 0x7db04aa6; /* mfspr r13, 0x130 (HSPRG0) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_HSPRG1, _R13);
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG0);
} else {
- instrs[i++] = 0x7db243a6; /* mtsprg 2,r13 */
- instrs[i++] = 0x7db142a6; /* mfsprg r13,1 */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_SPRG2, _R13);
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG1);
}
- instrs[i++] = 0x7c0004ac; /* hwsync */
- instrs[i++] = 0xe9ad0000; /* ld r13,0(r13) */
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- if (cpu_has_feature(CPU_FTR_HVMODE)) {
- instrs[i++] = 0x7db14aa6; /* mfspr r13, 0x131 (HSPRG1) */
- } else {
- instrs[i++] = 0x7db242a6; /* mfsprg r13,2 */
- }
+ instrs[i++] = PPC_RAW_SYNC();
+ instrs[i++] = PPC_RAW_LD(_R13, _R13, 0);
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_HSPRG1);
+ else
+ instrs[i++] = PPC_RAW_MFSPR(_R13, SPRN_SPRG2);
} else if (types & STF_BARRIER_EIEIO) {
- instrs[i++] = 0x7e0006ac; /* eieio + bit 6 hint */
+ instrs[i++] = PPC_RAW_EIEIO() | 0x02000000; /* eieio + bit 6 hint */
}
for (i = 0; start < end; start++, i++) {
@@ -206,12 +211,12 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, instrs[0]);
- patch_instruction(dest + 1, instrs[1]);
- patch_instruction(dest + 2, instrs[2]);
- patch_instruction(dest + 3, instrs[3]);
- patch_instruction(dest + 4, instrs[4]);
- patch_instruction(dest + 5, instrs[5]);
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest + 3, ppc_inst(instrs[3]));
+ patch_instruction(dest + 4, ppc_inst(instrs[4]));
+ patch_instruction(dest + 5, ppc_inst(instrs[5]));
}
printk(KERN_DEBUG "stf-barrier: patched %d exit locations (%s barrier)\n", i,
(types == STF_BARRIER_NONE) ? "no" :
@@ -221,47 +226,250 @@ static void do_stf_exit_barrier_fixups(enum stf_barrier_type types)
: "unknown");
}
+static bool stf_exit_reentrant = false;
+static bool rfi_exit_reentrant = false;
+static DEFINE_MUTEX(exit_flush_lock);
+
+static int __do_stf_barrier_fixups(void *data)
+{
+ enum stf_barrier_type *types = data;
+
+ do_stf_entry_barrier_fixups(*types);
+ do_stf_exit_barrier_fixups(*types);
+
+ return 0;
+}
void do_stf_barrier_fixups(enum stf_barrier_type types)
{
- do_stf_entry_barrier_fixups(types);
- do_stf_exit_barrier_fixups(types);
+ /*
+ * The call to the fallback entry flush, and the fallback/sync-ori exit
+ * flush can not be safely patched in/out while other CPUs are
+ * executing them. So call __do_stf_barrier_fixups() on one CPU while
+ * all other CPUs spin in the stop machine core with interrupts hard
+ * disabled.
+ *
+ * The branch to mark interrupt exits non-reentrant is enabled first,
+ * then stop_machine runs which will ensure all CPUs are out of the
+ * low level interrupt exit code before patching. After the patching,
+ * if allowed, then flip the branch to allow fast exits.
+ */
+
+ // Prevent static key update races with do_rfi_flush_fixups()
+ mutex_lock(&exit_flush_lock);
+ static_branch_enable(&interrupt_exit_not_reentrant);
+
+ stop_machine(__do_stf_barrier_fixups, &types, NULL);
+
+ if ((types & STF_BARRIER_FALLBACK) || (types & STF_BARRIER_SYNC_ORI))
+ stf_exit_reentrant = false;
+ else
+ stf_exit_reentrant = true;
+
+ if (stf_exit_reentrant && rfi_exit_reentrant)
+ static_branch_disable(&interrupt_exit_not_reentrant);
+
+ mutex_unlock(&exit_flush_lock);
}
-void do_rfi_flush_fixups(enum l1d_flush_type types)
+void do_uaccess_flush_fixups(enum l1d_flush_type types)
+{
+ unsigned int instrs[4], *dest;
+ long *start, *end;
+ int i;
+
+ start = PTRRELOC(&__start___uaccess_flush_fixup);
+ end = PTRRELOC(&__stop___uaccess_flush_fixup);
+
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
+ instrs[3] = PPC_RAW_BLR();
+
+ i = 0;
+ if (types == L1D_FLUSH_FALLBACK) {
+ instrs[3] = PPC_RAW_NOP();
+ /* fallthrough to fallback flush */
+ }
+
+ if (types & L1D_FLUSH_ORI) {
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
+ }
+
+ if (types & L1D_FLUSH_MTTRIG)
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
+
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ patch_instruction(dest, ppc_inst(instrs[0]));
+
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest + 3, ppc_inst(instrs[3]));
+ }
+
+ printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
+ (types == L1D_FLUSH_NONE) ? "no" :
+ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
+ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
+ ? "ori+mttrig type"
+ : "ori type" :
+ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
+ : "unknown");
+}
+
+static int __do_entry_flush_fixups(void *data)
{
+ enum l1d_flush_type types = *(enum l1d_flush_type *)data;
unsigned int instrs[3], *dest;
long *start, *end;
int i;
- start = PTRRELOC(&__start___rfi_flush_fixup),
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
+
+ i = 0;
+ if (types == L1D_FLUSH_FALLBACK) {
+ instrs[i++] = PPC_RAW_MFLR(_R10);
+ instrs[i++] = PPC_RAW_NOP(); /* branch patched below */
+ instrs[i++] = PPC_RAW_MTLR(_R10);
+ }
+
+ if (types & L1D_FLUSH_ORI) {
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
+ }
+
+ if (types & L1D_FLUSH_MTTRIG)
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
+
+ /*
+ * If we're patching in or out the fallback flush we need to be careful about the
+ * order in which we patch instructions. That's because it's possible we could
+ * take a page fault after patching one instruction, so the sequence of
+ * instructions must be safe even in a half patched state.
+ *
+ * To make that work, when patching in the fallback flush we patch in this order:
+ * - the mflr (dest)
+ * - the mtlr (dest + 2)
+ * - the branch (dest + 1)
+ *
+ * That ensures the sequence is safe to execute at any point. In contrast if we
+ * patch the mtlr last, it's possible we could return from the branch and not
+ * restore LR, leading to a crash later.
+ *
+ * When patching out the fallback flush (either with nops or another flush type),
+ * we patch in this order:
+ * - the branch (dest + 1)
+ * - the mtlr (dest + 2)
+ * - the mflr (dest)
+ *
+ * Note we are protected by stop_machine() from other CPUs executing the code in a
+ * semi-patched state.
+ */
+
+ start = PTRRELOC(&__start___entry_flush_fixup);
+ end = PTRRELOC(&__stop___entry_flush_fixup);
+ for (i = 0; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ if (types == L1D_FLUSH_FALLBACK) {
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
+ (unsigned long)&entry_flush_fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ }
+ }
+
+ start = PTRRELOC(&__start___scv_entry_flush_fixup);
+ end = PTRRELOC(&__stop___scv_entry_flush_fixup);
+ for (; start < end; start++, i++) {
+ dest = (void *)start + *start;
+
+ pr_devel("patching dest %lx\n", (unsigned long)dest);
+
+ if (types == L1D_FLUSH_FALLBACK) {
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_branch(dest + 1,
+ (unsigned long)&scv_entry_flush_fallback, BRANCH_SET_LINK);
+ } else {
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ }
+ }
+
+
+ printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
+ (types == L1D_FLUSH_NONE) ? "no" :
+ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
+ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
+ ? "ori+mttrig type"
+ : "ori type" :
+ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
+ : "unknown");
+
+ return 0;
+}
+
+void do_entry_flush_fixups(enum l1d_flush_type types)
+{
+ /*
+ * The call to the fallback flush can not be safely patched in/out while
+ * other CPUs are executing it. So call __do_entry_flush_fixups() on one
+ * CPU while all other CPUs spin in the stop machine core with interrupts
+ * hard disabled.
+ */
+ stop_machine(__do_entry_flush_fixups, &types, NULL);
+}
+
+static int __do_rfi_flush_fixups(void *data)
+{
+ enum l1d_flush_type types = *(enum l1d_flush_type *)data;
+ unsigned int instrs[3], *dest;
+ long *start, *end;
+ int i;
+
+ start = PTRRELOC(&__start___rfi_flush_fixup);
end = PTRRELOC(&__stop___rfi_flush_fixup);
- instrs[0] = 0x60000000; /* nop */
- instrs[1] = 0x60000000; /* nop */
- instrs[2] = 0x60000000; /* nop */
+ instrs[0] = PPC_RAW_NOP();
+ instrs[1] = PPC_RAW_NOP();
+ instrs[2] = PPC_RAW_NOP();
if (types & L1D_FLUSH_FALLBACK)
/* b .+16 to fallback flush */
- instrs[0] = 0x48000010;
+ instrs[0] = PPC_RAW_BRANCH(16);
i = 0;
if (types & L1D_FLUSH_ORI) {
- instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
- instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
+ instrs[i++] = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
+ instrs[i++] = PPC_RAW_ORI(_R30, _R30, 0); /* L1d flush */
}
if (types & L1D_FLUSH_MTTRIG)
- instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
+ instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, instrs[0]);
- patch_instruction(dest + 1, instrs[1]);
- patch_instruction(dest + 2, instrs[2]);
+ patch_instruction(dest, ppc_inst(instrs[0]));
+ patch_instruction(dest + 1, ppc_inst(instrs[1]));
+ patch_instruction(dest + 2, ppc_inst(instrs[2]));
}
printk(KERN_DEBUG "rfi-flush: patched %d locations (%s flush)\n", i,
@@ -272,6 +480,34 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
+
+ return 0;
+}
+
+void do_rfi_flush_fixups(enum l1d_flush_type types)
+{
+ /*
+ * stop_machine gets all CPUs out of the interrupt exit handler same
+ * as do_stf_barrier_fixups. do_rfi_flush_fixups patching can run
+ * without stop_machine, so this could be achieved with a broadcast
+ * IPI instead, but this matches the stf sequence.
+ */
+
+ // Prevent static key update races with do_stf_barrier_fixups()
+ mutex_lock(&exit_flush_lock);
+ static_branch_enable(&interrupt_exit_not_reentrant);
+
+ stop_machine(__do_rfi_flush_fixups, &types, NULL);
+
+ if (types & L1D_FLUSH_FALLBACK)
+ rfi_exit_reentrant = false;
+ else
+ rfi_exit_reentrant = true;
+
+ if (stf_exit_reentrant && rfi_exit_reentrant)
+ static_branch_disable(&interrupt_exit_not_reentrant);
+
+ mutex_unlock(&exit_flush_lock);
}
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
@@ -283,18 +519,18 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
start = fixup_start;
end = fixup_end;
- instr = 0x60000000; /* nop */
+ instr = PPC_RAW_NOP();
if (enable) {
pr_info("barrier-nospec: using ORI speculation barrier\n");
- instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
+ instr = PPC_RAW_ORI(_R31, _R31, 0); /* speculation barrier */
}
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, instr);
+ patch_instruction(dest, ppc_inst(instr));
}
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
@@ -307,14 +543,14 @@ void do_barrier_nospec_fixups(bool enable)
{
void *start, *end;
- start = PTRRELOC(&__start___barrier_nospec_fixup),
+ start = PTRRELOC(&__start___barrier_nospec_fixup);
end = PTRRELOC(&__stop___barrier_nospec_fixup);
do_barrier_nospec_fixups_range(enable, start, end);
}
#endif /* CONFIG_PPC_BARRIER_NOSPEC */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
{
unsigned int instr[2], *dest;
@@ -324,27 +560,27 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
start = fixup_start;
end = fixup_end;
- instr[0] = PPC_INST_NOP;
- instr[1] = PPC_INST_NOP;
+ instr[0] = PPC_RAW_NOP();
+ instr[1] = PPC_RAW_NOP();
if (enable) {
pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
- instr[0] = PPC_INST_ISYNC;
- instr[1] = PPC_INST_SYNC;
+ instr[0] = PPC_RAW_ISYNC();
+ instr[1] = PPC_RAW_SYNC();
}
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
- patch_instruction(dest, instr[0]);
- patch_instruction(dest + 1, instr[1]);
+ patch_instruction(dest, ppc_inst(instr[0]));
+ patch_instruction(dest + 1, ppc_inst(instr[1]));
}
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
}
-static void patch_btb_flush_section(long *curr)
+static void __init patch_btb_flush_section(long *curr)
{
unsigned int *start, *end;
@@ -352,11 +588,11 @@ static void patch_btb_flush_section(long *curr)
end = (void *)curr + *(curr + 1);
for (; start < end; start++) {
pr_devel("patching dest %lx\n", (unsigned long)start);
- patch_instruction(start, PPC_INST_NOP);
+ patch_instruction(start, ppc_inst(PPC_RAW_NOP()));
}
}
-void do_btb_flush_fixups(void)
+void __init do_btb_flush_fixups(void)
{
long *start, *end;
@@ -366,12 +602,12 @@ void do_btb_flush_fixups(void)
for (; start < end; start += 2)
patch_btb_flush_section(start);
}
-#endif /* CONFIG_PPC_FSL_BOOK3E */
+#endif /* CONFIG_PPC_E500 */
void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
{
long *start, *end;
- unsigned int *dest;
+ u32 *dest;
if (!(value & CPU_FTR_LWSYNC))
return ;
@@ -381,27 +617,28 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
for (; start < end; start++) {
dest = (void *)start + *start;
- raw_patch_instruction(dest, PPC_INST_LWSYNC);
+ raw_patch_instruction(dest, ppc_inst(PPC_INST_LWSYNC));
}
}
-static void do_final_fixups(void)
+static void __init do_final_fixups(void)
{
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
- int *src, *dest;
- unsigned long length;
+ ppc_inst_t inst;
+ u32 *src, *dest, *end;
if (PHYSICAL_START == 0)
return;
- src = (int *)(KERNELBASE + PHYSICAL_START);
- dest = (int *)KERNELBASE;
- length = (__end_interrupts - _stext) / sizeof(int);
+ src = (u32 *)(KERNELBASE + PHYSICAL_START);
+ dest = (u32 *)KERNELBASE;
+ end = (void *)src + (__end_interrupts - _stext);
- while (length--) {
- raw_patch_instruction(dest, *src);
- src++;
- dest++;
+ while (src < end) {
+ inst = ppc_inst_read(src);
+ raw_patch_instruction(dest, inst);
+ src = ppc_inst_next(src, src);
+ dest = ppc_inst_next(dest, dest);
}
#endif
}
@@ -478,12 +715,12 @@ late_initcall(check_features);
/* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
static struct fixup_entry fixup;
-static long calc_offset(struct fixup_entry *entry, unsigned int *p)
+static long __init calc_offset(struct fixup_entry *entry, unsigned int *p)
{
return (unsigned long)p - (unsigned long)entry;
}
-static void test_basic_patching(void)
+static void __init test_basic_patching(void)
{
extern unsigned int ftr_fixup_test1[];
extern unsigned int end_ftr_fixup_test1[];
@@ -514,7 +751,7 @@ static void test_basic_patching(void)
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
}
-static void test_alternative_patching(void)
+static void __init test_alternative_patching(void)
{
extern unsigned int ftr_fixup_test2[];
extern unsigned int end_ftr_fixup_test2[];
@@ -547,7 +784,7 @@ static void test_alternative_patching(void)
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
}
-static void test_alternative_case_too_big(void)
+static void __init test_alternative_case_too_big(void)
{
extern unsigned int ftr_fixup_test3[];
extern unsigned int end_ftr_fixup_test3[];
@@ -573,7 +810,7 @@ static void test_alternative_case_too_big(void)
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
}
-static void test_alternative_case_too_small(void)
+static void __init test_alternative_case_too_small(void)
{
extern unsigned int ftr_fixup_test4[];
extern unsigned int end_ftr_fixup_test4[];
@@ -619,7 +856,7 @@ static void test_alternative_case_with_branch(void)
check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
}
-static void test_alternative_case_with_external_branch(void)
+static void __init test_alternative_case_with_external_branch(void)
{
extern unsigned int ftr_fixup_test6[];
extern unsigned int end_ftr_fixup_test6[];
@@ -629,7 +866,7 @@ static void test_alternative_case_with_external_branch(void)
check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
}
-static void test_alternative_case_with_branch_to_end(void)
+static void __init test_alternative_case_with_branch_to_end(void)
{
extern unsigned int ftr_fixup_test7[];
extern unsigned int end_ftr_fixup_test7[];
@@ -639,7 +876,7 @@ static void test_alternative_case_with_branch_to_end(void)
check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
}
-static void test_cpu_macros(void)
+static void __init test_cpu_macros(void)
{
extern u8 ftr_fixup_test_FTR_macros[];
extern u8 ftr_fixup_test_FTR_macros_expected[];
@@ -651,7 +888,7 @@ static void test_cpu_macros(void)
ftr_fixup_test_FTR_macros_expected, size) == 0);
}
-static void test_fw_macros(void)
+static void __init test_fw_macros(void)
{
#ifdef CONFIG_PPC64
extern u8 ftr_fixup_test_FW_FTR_macros[];
@@ -665,7 +902,7 @@ static void test_fw_macros(void)
#endif
}
-static void test_lwsync_macros(void)
+static void __init test_lwsync_macros(void)
{
extern u8 lwsync_fixup_test[];
extern u8 end_lwsync_fixup_test[];
@@ -684,6 +921,78 @@ static void test_lwsync_macros(void)
}
}
+#ifdef CONFIG_PPC64
+static void __init test_prefix_patching(void)
+{
+ extern unsigned int ftr_fixup_prefix1[];
+ extern unsigned int end_ftr_fixup_prefix1[];
+ extern unsigned int ftr_fixup_prefix1_orig[];
+ extern unsigned int ftr_fixup_prefix1_expected[];
+ int size = sizeof(unsigned int) * (end_ftr_fixup_prefix1 - ftr_fixup_prefix1);
+
+ fixup.value = fixup.mask = 8;
+ fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix1 + 1);
+ fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix1 + 3);
+ fixup.alt_start_off = fixup.alt_end_off = 0;
+
+ /* Sanity check */
+ check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) == 0);
+
+ patch_feature_section(0, &fixup);
+ check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_expected, size) == 0);
+ check(memcmp(ftr_fixup_prefix1, ftr_fixup_prefix1_orig, size) != 0);
+}
+
+static void __init test_prefix_alt_patching(void)
+{
+ extern unsigned int ftr_fixup_prefix2[];
+ extern unsigned int end_ftr_fixup_prefix2[];
+ extern unsigned int ftr_fixup_prefix2_orig[];
+ extern unsigned int ftr_fixup_prefix2_expected[];
+ extern unsigned int ftr_fixup_prefix2_alt[];
+ int size = sizeof(unsigned int) * (end_ftr_fixup_prefix2 - ftr_fixup_prefix2);
+
+ fixup.value = fixup.mask = 8;
+ fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix2 + 1);
+ fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix2 + 3);
+ fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix2_alt);
+ fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix2_alt + 2);
+ /* Sanity check */
+ check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) == 0);
+
+ patch_feature_section(0, &fixup);
+ check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_expected, size) == 0);
+ check(memcmp(ftr_fixup_prefix2, ftr_fixup_prefix2_orig, size) != 0);
+}
+
+static void __init test_prefix_word_alt_patching(void)
+{
+ extern unsigned int ftr_fixup_prefix3[];
+ extern unsigned int end_ftr_fixup_prefix3[];
+ extern unsigned int ftr_fixup_prefix3_orig[];
+ extern unsigned int ftr_fixup_prefix3_expected[];
+ extern unsigned int ftr_fixup_prefix3_alt[];
+ int size = sizeof(unsigned int) * (end_ftr_fixup_prefix3 - ftr_fixup_prefix3);
+
+ fixup.value = fixup.mask = 8;
+ fixup.start_off = calc_offset(&fixup, ftr_fixup_prefix3 + 1);
+ fixup.end_off = calc_offset(&fixup, ftr_fixup_prefix3 + 4);
+ fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_prefix3_alt);
+ fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_prefix3_alt + 3);
+ /* Sanity check */
+ check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) == 0);
+
+ patch_feature_section(0, &fixup);
+ check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_expected, size) == 0);
+ patch_feature_section(0, &fixup);
+ check(memcmp(ftr_fixup_prefix3, ftr_fixup_prefix3_orig, size) != 0);
+}
+#else
+static inline void test_prefix_patching(void) {}
+static inline void test_prefix_alt_patching(void) {}
+static inline void test_prefix_word_alt_patching(void) {}
+#endif /* CONFIG_PPC64 */
+
static int __init test_feature_fixups(void)
{
printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
@@ -698,6 +1007,9 @@ static int __init test_feature_fixups(void)
test_cpu_macros();
test_fw_macros();
test_lwsync_macros();
+ test_prefix_patching();
+ test_prefix_alt_patching();
+ test_prefix_word_alt_patching();
return 0;
}
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 6440d5943c00..04165b7a163f 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -27,14 +27,14 @@ void splpar_spin_yield(arch_spinlock_t *lock)
return;
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
- yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
+
+ yield_count = yield_count_of(holder_cpu);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
if (lock->slock != lock_value)
return; /* something has changed */
- plpar_hcall_norets(H_CONFER,
- get_hard_smp_processor_id(holder_cpu), yield_count);
+ yield_to_preempted(holder_cpu, yield_count);
}
EXPORT_SYMBOL_GPL(splpar_spin_yield);
@@ -53,13 +53,13 @@ void splpar_rw_yield(arch_rwlock_t *rw)
return; /* no write lock at present */
holder_cpu = lock_value & 0xffff;
BUG_ON(holder_cpu >= NR_CPUS);
- yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
+
+ yield_count = yield_count_of(holder_cpu);
if ((yield_count & 1) == 0)
return; /* virtual cpu is currently running */
rmb();
if (rw->lock != lock_value)
return; /* something has changed */
- plpar_hcall_norets(H_CONFER,
- get_hard_smp_processor_id(holder_cpu), yield_count);
+ yield_to_preempted(holder_cpu, yield_count);
}
#endif
diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c
index 0666a8d29596..eb2919ddf9b9 100644
--- a/arch/powerpc/lib/pmem.c
+++ b/arch/powerpc/lib/pmem.c
@@ -6,23 +6,60 @@
#include <linux/string.h>
#include <linux/export.h>
#include <linux/uaccess.h>
+#include <linux/libnvdimm.h>
#include <asm/cacheflush.h>
+static inline void __clean_pmem_range(unsigned long start, unsigned long stop)
+{
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
+ void *addr = (void *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> shift; i++, addr += bytes)
+ asm volatile(PPC_DCBSTPS(%0, %1): :"i"(0), "r"(addr): "memory");
+}
+
+static inline void __flush_pmem_range(unsigned long start, unsigned long stop)
+{
+ unsigned long shift = l1_dcache_shift();
+ unsigned long bytes = l1_dcache_bytes();
+ void *addr = (void *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> shift; i++, addr += bytes)
+ asm volatile(PPC_DCBFPS(%0, %1): :"i"(0), "r"(addr): "memory");
+}
+
+static inline void clean_pmem_range(unsigned long start, unsigned long stop)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ return __clean_pmem_range(start, stop);
+}
+
+static inline void flush_pmem_range(unsigned long start, unsigned long stop)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ return __flush_pmem_range(start, stop);
+}
+
/*
* CONFIG_ARCH_HAS_PMEM_API symbols
*/
void arch_wb_cache_pmem(void *addr, size_t size)
{
unsigned long start = (unsigned long) addr;
- flush_dcache_range(start, start + size);
+ clean_pmem_range(start, start + size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
unsigned long start = (unsigned long) addr;
- flush_dcache_range(start, start + size);
+ flush_pmem_range(start, start + size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
@@ -35,19 +72,17 @@ long __copy_from_user_flushcache(void *dest, const void __user *src,
unsigned long copied, start = (unsigned long) dest;
copied = __copy_from_user(dest, src, size);
- flush_dcache_range(start, start + size);
+ clean_pmem_range(start, start + size);
return copied;
}
-void *memcpy_flushcache(void *dest, const void *src, size_t size)
+void memcpy_flushcache(void *dest, const void *src, size_t size)
{
unsigned long start = (unsigned long) dest;
memcpy(dest, src, size);
- flush_dcache_range(start, start + size);
-
- return dest;
+ clean_pmem_range(start, start + size);
}
EXPORT_SYMBOL(memcpy_flushcache);
diff --git a/arch/powerpc/lib/restart_table.c b/arch/powerpc/lib/restart_table.c
new file mode 100644
index 000000000000..bccb662c1b7b
--- /dev/null
+++ b/arch/powerpc/lib/restart_table.c
@@ -0,0 +1,56 @@
+#include <asm/interrupt.h>
+#include <asm/kprobes.h>
+
+struct soft_mask_table_entry {
+ unsigned long start;
+ unsigned long end;
+};
+
+struct restart_table_entry {
+ unsigned long start;
+ unsigned long end;
+ unsigned long fixup;
+};
+
+extern struct soft_mask_table_entry __start___soft_mask_table[];
+extern struct soft_mask_table_entry __stop___soft_mask_table[];
+
+extern struct restart_table_entry __start___restart_table[];
+extern struct restart_table_entry __stop___restart_table[];
+
+/* Given an address, look for it in the soft mask table */
+bool search_kernel_soft_mask_table(unsigned long addr)
+{
+ struct soft_mask_table_entry *smte = __start___soft_mask_table;
+
+ while (smte < __stop___soft_mask_table) {
+ unsigned long start = smte->start;
+ unsigned long end = smte->end;
+
+ if (addr >= start && addr < end)
+ return true;
+
+ smte++;
+ }
+ return false;
+}
+NOKPROBE_SYMBOL(search_kernel_soft_mask_table);
+
+/* Given an address, look for it in the kernel exception table */
+unsigned long search_kernel_restart_table(unsigned long addr)
+{
+ struct restart_table_entry *rte = __start___restart_table;
+
+ while (rte < __stop___restart_table) {
+ unsigned long start = rte->start;
+ unsigned long end = rte->end;
+ unsigned long fixup = rte->fixup;
+
+ if (addr >= start && addr < end)
+ return fixup;
+
+ rte++;
+ }
+ return 0;
+}
+NOKPROBE_SYMBOL(search_kernel_restart_table);
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index c077acb983a1..398b5694aeb7 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -13,8 +13,7 @@
#include <linux/uaccess.h>
#include <asm/cpu_has_feature.h>
#include <asm/cputable.h>
-
-extern char system_call_common[];
+#include <asm/disassemble.h>
#ifdef CONFIG_PPC64
/* Bits in SRR1 that are copied from MSR */
@@ -30,6 +29,10 @@ extern char system_call_common[];
#define XER_OV32 0x00080000U
#define XER_CA32 0x00040000U
+#ifdef CONFIG_VSX
+#define VSX_REGISTER_XTP(rd) ((((rd) & 1) << 5) | ((rd) & 0xfe))
+#endif
+
#ifdef CONFIG_PPC_FPU
/*
* Functions in ldstfp.S
@@ -69,10 +72,8 @@ extern int do_stqcx(unsigned long ea, unsigned long val0, unsigned long val1,
static nokprobe_inline unsigned long truncate_if_32bit(unsigned long msr,
unsigned long val)
{
-#ifdef __powerpc64__
if ((msr & MSR_64BIT) == 0)
val &= 0xffffffffUL;
-#endif
return val;
}
@@ -106,11 +107,11 @@ static nokprobe_inline long address_ok(struct pt_regs *regs,
{
if (!user_mode(regs))
return 1;
- if (__access_ok(ea, nb, USER_DS))
+ if (access_ok((void __user *)ea, nb))
return 1;
- if (__access_ok(ea, 1, USER_DS))
+ if (access_ok((void __user *)ea, 1))
/* Access overlaps the end of the user region */
- regs->dar = USER_DS.seg;
+ regs->dar = TASK_SIZE_MAX - 1;
else
regs->dar = ea;
return 0;
@@ -188,6 +189,47 @@ static nokprobe_inline unsigned long xform_ea(unsigned int instr,
}
/*
+ * Calculate effective address for a MLS:D-form / 8LS:D-form
+ * prefixed instruction
+ */
+static nokprobe_inline unsigned long mlsd_8lsd_ea(unsigned int instr,
+ unsigned int suffix,
+ const struct pt_regs *regs)
+{
+ int ra, prefix_r;
+ unsigned int dd;
+ unsigned long ea, d0, d1, d;
+
+ prefix_r = GET_PREFIX_R(instr);
+ ra = GET_PREFIX_RA(suffix);
+
+ d0 = instr & 0x3ffff;
+ d1 = suffix & 0xffff;
+ d = (d0 << 16) | d1;
+
+ /*
+ * sign extend a 34 bit number
+ */
+ dd = (unsigned int)(d >> 2);
+ ea = (signed int)dd;
+ ea = (ea << 2) | (d & 0x3);
+
+ if (!prefix_r && ra)
+ ea += regs->gpr[ra];
+ else if (!prefix_r && !ra)
+ ; /* Leave ea as is */
+ else if (prefix_r)
+ ea += regs->nip;
+
+ /*
+ * (prefix_r && ra) is an invalid form. Should already be
+ * checked for by caller!
+ */
+
+ return ea;
+}
+
+/*
* Return the largest power of 2, not greater than sizeof(unsigned long),
* such that x is a multiple of it.
*/
@@ -236,39 +278,70 @@ static nokprobe_inline void do_byte_reverse(void *ptr, int nb)
up[1] = tmp;
break;
}
+ case 32: {
+ unsigned long *up = (unsigned long *)ptr;
+ unsigned long tmp;
+
+ tmp = byterev_8(up[0]);
+ up[0] = byterev_8(up[3]);
+ up[3] = tmp;
+ tmp = byterev_8(up[2]);
+ up[2] = byterev_8(up[1]);
+ up[1] = tmp;
+ break;
+ }
+
#endif
default:
WARN_ON_ONCE(1);
}
}
-static nokprobe_inline int read_mem_aligned(unsigned long *dest,
- unsigned long ea, int nb,
- struct pt_regs *regs)
+static __always_inline int
+__read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
unsigned long x = 0;
switch (nb) {
case 1:
- err = __get_user(x, (unsigned char __user *) ea);
+ unsafe_get_user(x, (unsigned char __user *)ea, Efault);
break;
case 2:
- err = __get_user(x, (unsigned short __user *) ea);
+ unsafe_get_user(x, (unsigned short __user *)ea, Efault);
break;
case 4:
- err = __get_user(x, (unsigned int __user *) ea);
+ unsafe_get_user(x, (unsigned int __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __get_user(x, (unsigned long __user *) ea);
+ unsafe_get_user(x, (unsigned long __user *)ea, Efault);
break;
#endif
}
- if (!err)
- *dest = x;
- else
+ *dest = x;
+ return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int
+read_mem_aligned(unsigned long *dest, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __read_mem_aligned(dest, ea, nb, regs);
+
+ if (user_read_access_begin((void __user *)ea, nb)) {
+ err = __read_mem_aligned(dest, ea, nb, regs);
+ user_read_access_end();
+ } else {
+ err = -EFAULT;
regs->dar = ea;
+ }
+
return err;
}
@@ -276,10 +349,8 @@ static nokprobe_inline int read_mem_aligned(unsigned long *dest,
* Copy from userspace to a buffer, using the largest possible
* aligned accesses, up to sizeof(long).
*/
-static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
- struct pt_regs *regs)
+static __always_inline int __copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
int c;
for (; nb > 0; nb -= c) {
@@ -288,31 +359,46 @@ static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb,
c = max_align(nb);
switch (c) {
case 1:
- err = __get_user(*dest, (unsigned char __user *) ea);
+ unsafe_get_user(*dest, (u8 __user *)ea, Efault);
break;
case 2:
- err = __get_user(*(u16 *)dest,
- (unsigned short __user *) ea);
+ unsafe_get_user(*(u16 *)dest, (u16 __user *)ea, Efault);
break;
case 4:
- err = __get_user(*(u32 *)dest,
- (unsigned int __user *) ea);
+ unsafe_get_user(*(u32 *)dest, (u32 __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __get_user(*(unsigned long *)dest,
- (unsigned long __user *) ea);
+ unsafe_get_user(*(u64 *)dest, (u64 __user *)ea, Efault);
break;
#endif
}
- if (err) {
- regs->dar = ea;
- return err;
- }
dest += c;
ea += c;
}
return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int copy_mem_in(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __copy_mem_in(dest, ea, nb, regs);
+
+ if (user_read_access_begin((void __user *)ea, nb)) {
+ err = __copy_mem_in(dest, ea, nb, regs);
+ user_read_access_end();
+ } else {
+ err = -EFAULT;
+ regs->dar = ea;
+ }
+
+ return err;
}
static nokprobe_inline int read_mem_unaligned(unsigned long *dest,
@@ -350,30 +436,48 @@ static int read_mem(unsigned long *dest, unsigned long ea, int nb,
}
NOKPROBE_SYMBOL(read_mem);
-static nokprobe_inline int write_mem_aligned(unsigned long val,
- unsigned long ea, int nb,
- struct pt_regs *regs)
+static __always_inline int
+__write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
-
switch (nb) {
case 1:
- err = __put_user(val, (unsigned char __user *) ea);
+ unsafe_put_user(val, (unsigned char __user *)ea, Efault);
break;
case 2:
- err = __put_user(val, (unsigned short __user *) ea);
+ unsafe_put_user(val, (unsigned short __user *)ea, Efault);
break;
case 4:
- err = __put_user(val, (unsigned int __user *) ea);
+ unsafe_put_user(val, (unsigned int __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __put_user(val, (unsigned long __user *) ea);
+ unsafe_put_user(val, (unsigned long __user *)ea, Efault);
break;
#endif
}
- if (err)
+ return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int
+write_mem_aligned(unsigned long val, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __write_mem_aligned(val, ea, nb, regs);
+
+ if (user_write_access_begin((void __user *)ea, nb)) {
+ err = __write_mem_aligned(val, ea, nb, regs);
+ user_write_access_end();
+ } else {
+ err = -EFAULT;
regs->dar = ea;
+ }
+
return err;
}
@@ -381,10 +485,8 @@ static nokprobe_inline int write_mem_aligned(unsigned long val,
* Copy from a buffer to userspace, using the largest possible
* aligned accesses, up to sizeof(long).
*/
-static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
- struct pt_regs *regs)
+static nokprobe_inline int __copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
{
- int err = 0;
int c;
for (; nb > 0; nb -= c) {
@@ -393,31 +495,46 @@ static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb,
c = max_align(nb);
switch (c) {
case 1:
- err = __put_user(*dest, (unsigned char __user *) ea);
+ unsafe_put_user(*dest, (u8 __user *)ea, Efault);
break;
case 2:
- err = __put_user(*(u16 *)dest,
- (unsigned short __user *) ea);
+ unsafe_put_user(*(u16 *)dest, (u16 __user *)ea, Efault);
break;
case 4:
- err = __put_user(*(u32 *)dest,
- (unsigned int __user *) ea);
+ unsafe_put_user(*(u32 *)dest, (u32 __user *)ea, Efault);
break;
#ifdef __powerpc64__
case 8:
- err = __put_user(*(unsigned long *)dest,
- (unsigned long __user *) ea);
+ unsafe_put_user(*(u64 *)dest, (u64 __user *)ea, Efault);
break;
#endif
}
- if (err) {
- regs->dar = ea;
- return err;
- }
dest += c;
ea += c;
}
return 0;
+
+Efault:
+ regs->dar = ea;
+ return -EFAULT;
+}
+
+static nokprobe_inline int copy_mem_out(u8 *dest, unsigned long ea, int nb, struct pt_regs *regs)
+{
+ int err;
+
+ if (is_kernel_addr(ea))
+ return __copy_mem_out(dest, ea, nb, regs);
+
+ if (user_write_access_begin((void __user *)ea, nb)) {
+ err = __copy_mem_out(dest, ea, nb, regs);
+ user_write_access_end();
+ } else {
+ err = -EFAULT;
+ regs->dar = ea;
+ }
+
+ return err;
}
static nokprobe_inline int write_mem_unaligned(unsigned long val,
@@ -666,6 +783,8 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
reg->d[0] = reg->d[1] = 0;
switch (op->element_size) {
+ case 32:
+ /* [p]lxvp[x] */
case 16:
/* whole vector; lxv[x] or lxvl[l] */
if (size == 0)
@@ -674,7 +793,7 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
if (IS_LE && (op->vsx_flags & VSX_LDLEFT))
rev = !rev;
if (rev)
- do_byte_reverse(reg, 16);
+ do_byte_reverse(reg, size);
break;
case 8:
/* scalar loads, lxvd2x, lxvdsx */
@@ -750,6 +869,22 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
size = GETSIZE(op->type);
switch (op->element_size) {
+ case 32:
+ /* [p]stxvp[x] */
+ if (size == 0)
+ break;
+ if (rev) {
+ /* reverse 32 bytes */
+ union vsx_reg buf32[2];
+ buf32[0].d[0] = byterev_8(reg[1].d[1]);
+ buf32[0].d[1] = byterev_8(reg[1].d[0]);
+ buf32[1].d[0] = byterev_8(reg[0].d[1]);
+ buf32[1].d[1] = byterev_8(reg[0].d[0]);
+ memcpy(mem, buf32, size);
+ } else {
+ memcpy(mem, reg, size);
+ }
+ break;
case 16:
/* stxv, stxvx, stxvl, stxvll */
if (size == 0)
@@ -818,28 +953,43 @@ static nokprobe_inline int do_vsx_load(struct instruction_op *op,
bool cross_endian)
{
int reg = op->reg;
- u8 mem[16];
- union vsx_reg buf;
+ int i, j, nr_vsx_regs;
+ u8 mem[32];
+ union vsx_reg buf[2];
int size = GETSIZE(op->type);
if (!address_ok(regs, ea, size) || copy_mem_in(mem, ea, size, regs))
return -EFAULT;
- emulate_vsx_load(op, &buf, mem, cross_endian);
+ nr_vsx_regs = max(1ul, size / sizeof(__vector128));
+ emulate_vsx_load(op, buf, mem, cross_endian);
preempt_disable();
if (reg < 32) {
/* FP regs + extensions */
if (regs->msr & MSR_FP) {
- load_vsrn(reg, &buf);
+ for (i = 0; i < nr_vsx_regs; i++) {
+ j = IS_LE ? nr_vsx_regs - i - 1 : i;
+ load_vsrn(reg + i, &buf[j].v);
+ }
} else {
- current->thread.fp_state.fpr[reg][0] = buf.d[0];
- current->thread.fp_state.fpr[reg][1] = buf.d[1];
+ for (i = 0; i < nr_vsx_regs; i++) {
+ j = IS_LE ? nr_vsx_regs - i - 1 : i;
+ current->thread.fp_state.fpr[reg + i][0] = buf[j].d[0];
+ current->thread.fp_state.fpr[reg + i][1] = buf[j].d[1];
+ }
}
} else {
- if (regs->msr & MSR_VEC)
- load_vsrn(reg, &buf);
- else
- current->thread.vr_state.vr[reg - 32] = buf.v;
+ if (regs->msr & MSR_VEC) {
+ for (i = 0; i < nr_vsx_regs; i++) {
+ j = IS_LE ? nr_vsx_regs - i - 1 : i;
+ load_vsrn(reg + i, &buf[j].v);
+ }
+ } else {
+ for (i = 0; i < nr_vsx_regs; i++) {
+ j = IS_LE ? nr_vsx_regs - i - 1 : i;
+ current->thread.vr_state.vr[reg - 32 + i] = buf[j].v;
+ }
+ }
}
preempt_enable();
return 0;
@@ -850,63 +1000,96 @@ static nokprobe_inline int do_vsx_store(struct instruction_op *op,
bool cross_endian)
{
int reg = op->reg;
- u8 mem[16];
- union vsx_reg buf;
+ int i, j, nr_vsx_regs;
+ u8 mem[32];
+ union vsx_reg buf[2];
int size = GETSIZE(op->type);
if (!address_ok(regs, ea, size))
return -EFAULT;
+ nr_vsx_regs = max(1ul, size / sizeof(__vector128));
preempt_disable();
if (reg < 32) {
/* FP regs + extensions */
if (regs->msr & MSR_FP) {
- store_vsrn(reg, &buf);
+ for (i = 0; i < nr_vsx_regs; i++) {
+ j = IS_LE ? nr_vsx_regs - i - 1 : i;
+ store_vsrn(reg + i, &buf[j].v);
+ }
} else {
- buf.d[0] = current->thread.fp_state.fpr[reg][0];
- buf.d[1] = current->thread.fp_state.fpr[reg][1];
+ for (i = 0; i < nr_vsx_regs; i++) {
+ j = IS_LE ? nr_vsx_regs - i - 1 : i;
+ buf[j].d[0] = current->thread.fp_state.fpr[reg + i][0];
+ buf[j].d[1] = current->thread.fp_state.fpr[reg + i][1];
+ }
}
} else {
- if (regs->msr & MSR_VEC)
- store_vsrn(reg, &buf);
- else
- buf.v = current->thread.vr_state.vr[reg - 32];
+ if (regs->msr & MSR_VEC) {
+ for (i = 0; i < nr_vsx_regs; i++) {
+ j = IS_LE ? nr_vsx_regs - i - 1 : i;
+ store_vsrn(reg + i, &buf[j].v);
+ }
+ } else {
+ for (i = 0; i < nr_vsx_regs; i++) {
+ j = IS_LE ? nr_vsx_regs - i - 1 : i;
+ buf[j].v = current->thread.vr_state.vr[reg - 32 + i];
+ }
+ }
}
preempt_enable();
- emulate_vsx_store(op, &buf, mem, cross_endian);
+ emulate_vsx_store(op, buf, mem, cross_endian);
return copy_mem_out(mem, ea, size, regs);
}
#endif /* CONFIG_VSX */
+static int __emulate_dcbz(unsigned long ea)
+{
+ unsigned long i;
+ unsigned long size = l1_dcache_bytes();
+
+ for (i = 0; i < size; i += sizeof(long))
+ unsafe_put_user(0, (unsigned long __user *)(ea + i), Efault);
+
+ return 0;
+
+Efault:
+ return -EFAULT;
+}
+
int emulate_dcbz(unsigned long ea, struct pt_regs *regs)
{
int err;
- unsigned long i, size;
+ unsigned long size = l1_dcache_bytes();
-#ifdef __powerpc64__
- size = ppc64_caches.l1d.block_size;
- if (!(regs->msr & MSR_64BIT))
- ea &= 0xffffffffUL;
-#else
- size = L1_CACHE_BYTES;
-#endif
+ ea = truncate_if_32bit(regs->msr, ea);
ea &= ~(size - 1);
if (!address_ok(regs, ea, size))
return -EFAULT;
- for (i = 0; i < size; i += sizeof(long)) {
- err = __put_user(0, (unsigned long __user *) (ea + i));
- if (err) {
- regs->dar = ea;
- return err;
- }
+
+ if (is_kernel_addr(ea)) {
+ err = __emulate_dcbz(ea);
+ } else if (user_write_access_begin((void __user *)ea, size)) {
+ err = __emulate_dcbz(ea);
+ user_write_access_end();
+ } else {
+ err = -EFAULT;
}
- return 0;
+
+ if (err)
+ regs->dar = ea;
+
+
+ return err;
}
NOKPROBE_SYMBOL(emulate_dcbz);
#define __put_user_asmx(x, addr, err, op, cr) \
__asm__ __volatile__( \
+ ".machine push\n" \
+ ".machine power8\n" \
"1: " op " %2,0,%3\n" \
+ ".machine pop\n" \
" mfcr %1\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
@@ -919,7 +1102,10 @@ NOKPROBE_SYMBOL(emulate_dcbz);
#define __get_user_asmx(x, addr, err, op) \
__asm__ __volatile__( \
+ ".machine push\n" \
+ ".machine power8\n" \
"1: "op" %1,0,%2\n" \
+ ".machine pop\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %0,%3\n" \
@@ -948,10 +1134,8 @@ static nokprobe_inline void set_cr0(const struct pt_regs *regs,
op->type |= SETCC;
op->ccval = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000);
-#ifdef __powerpc64__
if (!(regs->msr & MSR_64BIT))
val = (int) val;
-#endif
if (val < 0)
op->ccval |= 0x80000000;
else if (val > 0)
@@ -979,15 +1163,11 @@ static nokprobe_inline void add_with_carry(const struct pt_regs *regs,
if (carry_in)
++val;
- op->type = COMPUTE + SETREG + SETXER;
+ op->type = COMPUTE | SETREG | SETXER;
op->reg = rd;
op->val = val;
-#ifdef __powerpc64__
- if (!(regs->msr & MSR_64BIT)) {
- val = (unsigned int) val;
- val1 = (unsigned int) val1;
- }
-#endif
+ val = truncate_if_32bit(regs->msr, val);
+ val1 = truncate_if_32bit(regs->msr, val1);
op->xerval = regs->xer;
if (val < val1 || (carry_in && val == val1))
op->xerval |= XER_CA;
@@ -1004,7 +1184,7 @@ static nokprobe_inline void do_cmp_signed(const struct pt_regs *regs,
{
unsigned int crval, shift;
- op->type = COMPUTE + SETCC;
+ op->type = COMPUTE | SETCC;
crval = (regs->xer >> 31) & 1; /* get SO bit */
if (v1 < v2)
crval |= 8;
@@ -1023,7 +1203,7 @@ static nokprobe_inline void do_cmp_unsigned(const struct pt_regs *regs,
{
unsigned int crval, shift;
- op->type = COMPUTE + SETCC;
+ op->type = COMPUTE | SETCC;
crval = (regs->xer >> 31) & 1; /* get SO bit */
if (v1 < v2)
crval |= 8;
@@ -1163,54 +1343,64 @@ static nokprobe_inline int trap_compare(long v1, long v2)
* otherwise.
*/
int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
- unsigned int instr)
+ ppc_inst_t instr)
{
+#ifdef CONFIG_PPC64
+ unsigned int suffixopcode, prefixtype, prefix_r;
+#endif
unsigned int opcode, ra, rb, rc, rd, spr, u;
unsigned long int imm;
unsigned long int val, val2;
unsigned int mb, me, sh;
+ unsigned int word, suffix;
long ival;
+ word = ppc_inst_val(instr);
+ suffix = ppc_inst_suffix(instr);
+
op->type = COMPUTE;
- opcode = instr >> 26;
+ opcode = ppc_inst_primary_opcode(instr);
switch (opcode) {
case 16: /* bc */
op->type = BRANCH;
- imm = (signed short)(instr & 0xfffc);
- if ((instr & 2) == 0)
+ imm = (signed short)(word & 0xfffc);
+ if ((word & 2) == 0)
imm += regs->nip;
op->val = truncate_if_32bit(regs->msr, imm);
- if (instr & 1)
+ if (word & 1)
op->type |= SETLK;
- if (branch_taken(instr, regs, op))
+ if (branch_taken(word, regs, op))
op->type |= BRTAKEN;
return 1;
-#ifdef CONFIG_PPC64
case 17: /* sc */
- if ((instr & 0xfe2) == 2)
+ if ((word & 0xfe2) == 2)
op->type = SYSCALL;
- else
+ else if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) &&
+ (word & 0xfe3) == 1) { /* scv */
+ op->type = SYSCALL_VECTORED_0;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ } else
op->type = UNKNOWN;
return 0;
-#endif
case 18: /* b */
op->type = BRANCH | BRTAKEN;
- imm = instr & 0x03fffffc;
+ imm = word & 0x03fffffc;
if (imm & 0x02000000)
imm -= 0x04000000;
- if ((instr & 2) == 0)
+ if ((word & 2) == 0)
imm += regs->nip;
op->val = truncate_if_32bit(regs->msr, imm);
- if (instr & 1)
+ if (word & 1)
op->type |= SETLK;
return 1;
case 19:
- switch ((instr >> 1) & 0x3ff) {
+ switch ((word >> 1) & 0x3ff) {
case 0: /* mcrf */
op->type = COMPUTE + SETCC;
- rd = 7 - ((instr >> 23) & 0x7);
- ra = 7 - ((instr >> 18) & 0x7);
+ rd = 7 - ((word >> 23) & 0x7);
+ ra = 7 - ((word >> 18) & 0x7);
rd *= 4;
ra *= 4;
val = (regs->ccr >> ra) & 0xf;
@@ -1220,11 +1410,11 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 16: /* bclr */
case 528: /* bcctr */
op->type = BRANCH;
- imm = (instr & 0x400)? regs->ctr: regs->link;
+ imm = (word & 0x400)? regs->ctr: regs->link;
op->val = truncate_if_32bit(regs->msr, imm);
- if (instr & 1)
+ if (word & 1)
op->type |= SETLK;
- if (branch_taken(instr, regs, op))
+ if (branch_taken(word, regs, op))
op->type |= BRTAKEN;
return 1;
@@ -1247,23 +1437,23 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 417: /* crorc */
case 449: /* cror */
op->type = COMPUTE + SETCC;
- ra = (instr >> 16) & 0x1f;
- rb = (instr >> 11) & 0x1f;
- rd = (instr >> 21) & 0x1f;
+ ra = (word >> 16) & 0x1f;
+ rb = (word >> 11) & 0x1f;
+ rd = (word >> 21) & 0x1f;
ra = (regs->ccr >> (31 - ra)) & 1;
rb = (regs->ccr >> (31 - rb)) & 1;
- val = (instr >> (6 + ra * 2 + rb)) & 1;
+ val = (word >> (6 + ra * 2 + rb)) & 1;
op->ccval = (regs->ccr & ~(1UL << (31 - rd))) |
(val << (31 - rd));
return 1;
}
break;
case 31:
- switch ((instr >> 1) & 0x3ff) {
+ switch ((word >> 1) & 0x3ff) {
case 598: /* sync */
op->type = BARRIER + BARRIER_SYNC;
#ifdef __powerpc64__
- switch ((instr >> 21) & 3) {
+ switch ((word >> 21) & 3) {
case 1: /* lwsync */
op->type = BARRIER + BARRIER_LWSYNC;
break;
@@ -1281,33 +1471,57 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
}
- /* Following cases refer to regs->gpr[], so we need all regs */
- if (!FULL_REGS(regs))
- return -1;
-
- rd = (instr >> 21) & 0x1f;
- ra = (instr >> 16) & 0x1f;
- rb = (instr >> 11) & 0x1f;
- rc = (instr >> 6) & 0x1f;
+ rd = (word >> 21) & 0x1f;
+ ra = (word >> 16) & 0x1f;
+ rb = (word >> 11) & 0x1f;
+ rc = (word >> 6) & 0x1f;
switch (opcode) {
#ifdef __powerpc64__
+ case 1:
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+ goto unknown_opcode;
+
+ prefix_r = GET_PREFIX_R(word);
+ ra = GET_PREFIX_RA(suffix);
+ rd = (suffix >> 21) & 0x1f;
+ op->reg = rd;
+ op->val = regs->gpr[rd];
+ suffixopcode = get_op(suffix);
+ prefixtype = (word >> 24) & 0x3;
+ switch (prefixtype) {
+ case 2:
+ if (prefix_r && ra)
+ return 0;
+ switch (suffixopcode) {
+ case 14: /* paddi */
+ op->type = COMPUTE | PREFIXED;
+ op->val = mlsd_8lsd_ea(word, suffix, regs);
+ goto compute_done;
+ }
+ }
+ break;
case 2: /* tdi */
- if (rd & trap_compare(regs->gpr[ra], (short) instr))
+ if (rd & trap_compare(regs->gpr[ra], (short) word))
goto trap;
return 1;
#endif
case 3: /* twi */
- if (rd & trap_compare((int)regs->gpr[ra], (short) instr))
+ if (rd & trap_compare((int)regs->gpr[ra], (short) word))
goto trap;
return 1;
#ifdef __powerpc64__
case 4:
+ /*
+ * There are very many instructions with this primary opcode
+ * introduced in the ISA as early as v2.03. However, the ones
+ * we currently emulate were all introduced with ISA 3.0
+ */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
- switch (instr & 0x3f) {
+ switch (word & 0x3f) {
case 48: /* maddhd */
asm volatile(PPC_MADDHD(%0, %1, %2, %3) :
"=r" (op->val) : "r" (regs->gpr[ra]),
@@ -1331,20 +1545,20 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
* There are other instructions from ISA 3.0 with the same
* primary opcode which do not have emulation support yet.
*/
- return -1;
+ goto unknown_opcode;
#endif
case 7: /* mulli */
- op->val = regs->gpr[ra] * (short) instr;
+ op->val = regs->gpr[ra] * (short) word;
goto compute_done;
case 8: /* subfic */
- imm = (short) instr;
+ imm = (short) word;
add_with_carry(regs, op, rd, ~regs->gpr[ra], imm, 1);
return 1;
case 10: /* cmpli */
- imm = (unsigned short) instr;
+ imm = (unsigned short) word;
val = regs->gpr[ra];
#ifdef __powerpc64__
if ((rd & 1) == 0)
@@ -1354,7 +1568,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
case 11: /* cmpi */
- imm = (short) instr;
+ imm = (short) word;
val = regs->gpr[ra];
#ifdef __powerpc64__
if ((rd & 1) == 0)
@@ -1364,35 +1578,37 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
case 12: /* addic */
- imm = (short) instr;
+ imm = (short) word;
add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
return 1;
case 13: /* addic. */
- imm = (short) instr;
+ imm = (short) word;
add_with_carry(regs, op, rd, regs->gpr[ra], imm, 0);
set_cr0(regs, op);
return 1;
case 14: /* addi */
- imm = (short) instr;
+ imm = (short) word;
if (ra)
imm += regs->gpr[ra];
op->val = imm;
goto compute_done;
case 15: /* addis */
- imm = ((short) instr) << 16;
+ imm = ((short) word) << 16;
if (ra)
imm += regs->gpr[ra];
op->val = imm;
goto compute_done;
case 19:
- if (((instr >> 1) & 0x1f) == 2) {
+ if (((word >> 1) & 0x1f) == 2) {
/* addpcis */
- imm = (short) (instr & 0xffc1); /* d0 + d2 fields */
- imm |= (instr >> 15) & 0x3e; /* d1 field */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ imm = (short) (word & 0xffc1); /* d0 + d2 fields */
+ imm |= (word >> 15) & 0x3e; /* d1 field */
op->val = regs->nip + (imm << 16) + 4;
goto compute_done;
}
@@ -1400,65 +1616,65 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 0;
case 20: /* rlwimi */
- mb = (instr >> 6) & 0x1f;
- me = (instr >> 1) & 0x1f;
+ mb = (word >> 6) & 0x1f;
+ me = (word >> 1) & 0x1f;
val = DATA32(regs->gpr[rd]);
imm = MASK32(mb, me);
op->val = (regs->gpr[ra] & ~imm) | (ROTATE(val, rb) & imm);
goto logical_done;
case 21: /* rlwinm */
- mb = (instr >> 6) & 0x1f;
- me = (instr >> 1) & 0x1f;
+ mb = (word >> 6) & 0x1f;
+ me = (word >> 1) & 0x1f;
val = DATA32(regs->gpr[rd]);
op->val = ROTATE(val, rb) & MASK32(mb, me);
goto logical_done;
case 23: /* rlwnm */
- mb = (instr >> 6) & 0x1f;
- me = (instr >> 1) & 0x1f;
+ mb = (word >> 6) & 0x1f;
+ me = (word >> 1) & 0x1f;
rb = regs->gpr[rb] & 0x1f;
val = DATA32(regs->gpr[rd]);
op->val = ROTATE(val, rb) & MASK32(mb, me);
goto logical_done;
case 24: /* ori */
- op->val = regs->gpr[rd] | (unsigned short) instr;
+ op->val = regs->gpr[rd] | (unsigned short) word;
goto logical_done_nocc;
case 25: /* oris */
- imm = (unsigned short) instr;
+ imm = (unsigned short) word;
op->val = regs->gpr[rd] | (imm << 16);
goto logical_done_nocc;
case 26: /* xori */
- op->val = regs->gpr[rd] ^ (unsigned short) instr;
+ op->val = regs->gpr[rd] ^ (unsigned short) word;
goto logical_done_nocc;
case 27: /* xoris */
- imm = (unsigned short) instr;
+ imm = (unsigned short) word;
op->val = regs->gpr[rd] ^ (imm << 16);
goto logical_done_nocc;
case 28: /* andi. */
- op->val = regs->gpr[rd] & (unsigned short) instr;
+ op->val = regs->gpr[rd] & (unsigned short) word;
set_cr0(regs, op);
goto logical_done_nocc;
case 29: /* andis. */
- imm = (unsigned short) instr;
+ imm = (unsigned short) word;
op->val = regs->gpr[rd] & (imm << 16);
set_cr0(regs, op);
goto logical_done_nocc;
#ifdef __powerpc64__
case 30: /* rld* */
- mb = ((instr >> 6) & 0x1f) | (instr & 0x20);
+ mb = ((word >> 6) & 0x1f) | (word & 0x20);
val = regs->gpr[rd];
- if ((instr & 0x10) == 0) {
- sh = rb | ((instr & 2) << 4);
+ if ((word & 0x10) == 0) {
+ sh = rb | ((word & 2) << 4);
val = ROTATE(val, sh);
- switch ((instr >> 2) & 3) {
+ switch ((word >> 2) & 3) {
case 0: /* rldicl */
val &= MASK64_L(mb);
break;
@@ -1478,7 +1694,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
} else {
sh = regs->gpr[rb] & 0x3f;
val = ROTATE(val, sh);
- switch ((instr >> 1) & 7) {
+ switch ((word >> 1) & 7) {
case 0: /* rldcl */
op->val = val & MASK64_L(mb);
goto logical_done;
@@ -1493,8 +1709,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 31:
/* isel occupies 32 minor opcodes */
- if (((instr >> 1) & 0x1f) == 15) {
- mb = (instr >> 6) & 0x1f; /* bc field */
+ if (((word >> 1) & 0x1f) == 15) {
+ mb = (word >> 6) & 0x1f; /* bc field */
val = (regs->ccr >> (31 - mb)) & 1;
val2 = (ra) ? regs->gpr[ra] : 0;
@@ -1502,7 +1718,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
goto compute_done;
}
- switch ((instr >> 1) & 0x3ff) {
+ switch ((word >> 1) & 0x3ff) {
case 4: /* tw */
if (rd == 0x1f ||
(rd & trap_compare((int)regs->gpr[ra],
@@ -1536,17 +1752,17 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->reg = rd;
/* only MSR_EE and MSR_RI get changed if bit 15 set */
/* mtmsrd doesn't change MSR_HV, MSR_ME or MSR_LE */
- imm = (instr & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
+ imm = (word & 0x10000)? 0x8002: 0xefffffffffffeffeUL;
op->val = imm;
return 0;
#endif
case 19: /* mfcr */
imm = 0xffffffffUL;
- if ((instr >> 20) & 1) {
+ if ((word >> 20) & 1) {
imm = 0xf0000000UL;
for (sh = 0; sh < 8; ++sh) {
- if (instr & (0x80000 >> sh))
+ if (word & (0x80000 >> sh))
break;
imm >>= 4;
}
@@ -1554,13 +1770,35 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->val = regs->ccr & imm;
goto compute_done;
+ case 128: /* setb */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ /*
+ * 'ra' encodes the CR field number (bfa) in the top 3 bits.
+ * Since each CR field is 4 bits,
+ * we can simply mask off the bottom two bits (bfa * 4)
+ * to yield the first bit in the CR field.
+ */
+ ra = ra & ~0x3;
+ /* 'val' stores bits of the CR field (bfa) */
+ val = regs->ccr >> (CR0_SHIFT - ra);
+ /* checks if the LT bit of CR field (bfa) is set */
+ if (val & 8)
+ op->val = -1;
+ /* checks if the GT bit of CR field (bfa) is set */
+ else if (val & 4)
+ op->val = 1;
+ else
+ op->val = 0;
+ goto compute_done;
+
case 144: /* mtcrf */
op->type = COMPUTE + SETCC;
imm = 0xf0000000UL;
val = regs->gpr[rd];
op->ccval = regs->ccr;
for (sh = 0; sh < 8; ++sh) {
- if (instr & (0x80000 >> sh))
+ if (word & (0x80000 >> sh))
op->ccval = (op->ccval & ~imm) |
(val & imm);
imm >>= 4;
@@ -1568,7 +1806,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
case 339: /* mfspr */
- spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
+ spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
op->type = MFSPR;
op->reg = rd;
op->spr = spr;
@@ -1578,7 +1816,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 0;
case 467: /* mtspr */
- spr = ((instr >> 16) & 0x1f) | ((instr >> 6) & 0x3e0);
+ spr = ((word >> 16) & 0x1f) | ((word >> 6) & 0x3e0);
op->type = MTSPR;
op->val = regs->gpr[rd];
op->spr = spr;
@@ -1703,7 +1941,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef __powerpc64__
case 265: /* modud */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
op->val = regs->gpr[ra] % regs->gpr[rb];
goto compute_done;
#endif
@@ -1713,7 +1951,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 267: /* moduw */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
op->val = (unsigned int) regs->gpr[ra] %
(unsigned int) regs->gpr[rb];
goto compute_done;
@@ -1736,10 +1974,21 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->val = (int) regs->gpr[ra] /
(int) regs->gpr[rb];
goto arith_done;
-
+#ifdef __powerpc64__
+ case 425: /* divde[.] */
+ asm volatile(PPC_DIVDE(%0, %1, %2) :
+ "=r" (op->val) : "r" (regs->gpr[ra]),
+ "r" (regs->gpr[rb]));
+ goto arith_done;
+ case 393: /* divdeu[.] */
+ asm volatile(PPC_DIVDEU(%0, %1, %2) :
+ "=r" (op->val) : "r" (regs->gpr[ra]),
+ "r" (regs->gpr[rb]));
+ goto arith_done;
+#endif
case 755: /* darn */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
switch (ra & 0x3) {
case 0:
/* 32-bit conditioned */
@@ -1757,18 +2006,18 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
goto compute_done;
}
- return -1;
+ goto unknown_opcode;
#ifdef __powerpc64__
case 777: /* modsd */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
op->val = (long int) regs->gpr[ra] %
(long int) regs->gpr[rb];
goto compute_done;
#endif
case 779: /* modsw */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
op->val = (int) regs->gpr[ra] %
(int) regs->gpr[rb];
goto compute_done;
@@ -1845,14 +2094,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#endif
case 538: /* cnttzw */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
val = (unsigned int) regs->gpr[rd];
op->val = (val ? __builtin_ctz(val) : 32);
goto logical_done;
#ifdef __powerpc64__
case 570: /* cnttzd */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
val = regs->gpr[rd];
op->val = (val ? __builtin_ctzl(val) : 64);
goto logical_done;
@@ -1948,7 +2197,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 826: /* sradi with sh_5 = 0 */
case 827: /* sradi with sh_5 = 1 */
op->type = COMPUTE + SETREG + SETXER;
- sh = rb | ((instr & 2) << 4);
+ sh = rb | ((word & 2) << 4);
ival = (signed long int) regs->gpr[rd];
op->val = ival >> sh;
op->xerval = regs->xer;
@@ -1962,9 +2211,9 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 890: /* extswsli with sh_5 = 0 */
case 891: /* extswsli with sh_5 = 1 */
if (!cpu_has_feature(CPU_FTR_ARCH_300))
- return -1;
+ goto unknown_opcode;
op->type = COMPUTE + SETREG;
- sh = rb | ((instr & 2) << 4);
+ sh = rb | ((word & 2) << 4);
val = (signed int) regs->gpr[rd];
if (sh)
op->val = ROTATE(val, sh) & MASK64(0, 63 - sh);
@@ -1979,34 +2228,34 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
*/
case 54: /* dcbst */
op->type = MKOP(CACHEOP, DCBST, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
return 0;
case 86: /* dcbf */
op->type = MKOP(CACHEOP, DCBF, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
return 0;
case 246: /* dcbtst */
op->type = MKOP(CACHEOP, DCBTST, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
op->reg = rd;
return 0;
case 278: /* dcbt */
op->type = MKOP(CACHEOP, DCBTST, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
op->reg = rd;
return 0;
case 982: /* icbi */
op->type = MKOP(CACHEOP, ICBI, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
return 0;
case 1014: /* dcbz */
op->type = MKOP(CACHEOP, DCBZ, 0);
- op->ea = xform_ea(instr, regs);
+ op->ea = xform_ea(word, regs);
return 0;
}
break;
@@ -2019,14 +2268,14 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
op->update_reg = ra;
op->reg = rd;
op->val = regs->gpr[rd];
- u = (instr >> 20) & UPDATE;
+ u = (word >> 20) & UPDATE;
op->vsx_flags = 0;
switch (opcode) {
case 31:
- u = instr & UPDATE;
- op->ea = xform_ea(instr, regs);
- switch ((instr >> 1) & 0x3ff) {
+ u = word & UPDATE;
+ op->ea = xform_ea(word, regs);
+ switch ((word >> 1) & 0x3ff) {
case 20: /* lwarx */
op->type = MKOP(LARX, 0, 4);
break;
@@ -2271,25 +2520,27 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef CONFIG_VSX
case 12: /* lxsiwzx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 4);
op->element_size = 8;
break;
case 76: /* lxsiwax */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, SIGNEXT, 4);
op->element_size = 8;
break;
case 140: /* stxsiwx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 4);
op->element_size = 8;
break;
case 268: /* lxvx */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 16;
op->vsx_flags = VSX_CHECK_VEC;
@@ -2298,33 +2549,47 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 269: /* lxvl */
case 301: { /* lxvll */
int nb;
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->ea = ra ? regs->gpr[ra] : 0;
nb = regs->gpr[rb] & 0xff;
if (nb > 16)
nb = 16;
op->type = MKOP(LOAD_VSX, 0, nb);
op->element_size = 16;
- op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
+ op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
VSX_CHECK_VEC;
break;
}
case 332: /* lxvdsx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 8);
op->element_size = 8;
op->vsx_flags = VSX_SPLAT;
break;
+ case 333: /* lxvpx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+ goto unknown_opcode;
+ op->reg = VSX_REGISTER_XTP(rd);
+ op->type = MKOP(LOAD_VSX, 0, 32);
+ op->element_size = 32;
+ break;
+
case 364: /* lxvwsx */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 4);
op->element_size = 4;
op->vsx_flags = VSX_SPLAT | VSX_CHECK_VEC;
break;
case 396: /* stxvx */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 16;
op->vsx_flags = VSX_CHECK_VEC;
@@ -2333,118 +2598,143 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 397: /* stxvl */
case 429: { /* stxvll */
int nb;
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->ea = ra ? regs->gpr[ra] : 0;
nb = regs->gpr[rb] & 0xff;
if (nb > 16)
nb = 16;
op->type = MKOP(STORE_VSX, 0, nb);
op->element_size = 16;
- op->vsx_flags = ((instr & 0x20) ? VSX_LDLEFT : 0) |
+ op->vsx_flags = ((word & 0x20) ? VSX_LDLEFT : 0) |
VSX_CHECK_VEC;
break;
}
+ case 461: /* stxvpx */
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+ goto unknown_opcode;
+ op->reg = VSX_REGISTER_XTP(rd);
+ op->type = MKOP(STORE_VSX, 0, 32);
+ op->element_size = 32;
+ break;
case 524: /* lxsspx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 4);
op->element_size = 8;
op->vsx_flags = VSX_FPCONV;
break;
case 588: /* lxsdx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 8);
op->element_size = 8;
break;
case 652: /* stxsspx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 4);
op->element_size = 8;
op->vsx_flags = VSX_FPCONV;
break;
case 716: /* stxsdx */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 8);
op->element_size = 8;
break;
case 780: /* lxvw4x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 4;
break;
case 781: /* lxsibzx */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 1);
op->element_size = 8;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 812: /* lxvh8x */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 2;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 813: /* lxsihzx */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 2);
op->element_size = 8;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 844: /* lxvd2x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 8;
break;
case 876: /* lxvb16x */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 1;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 908: /* stxvw4x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 4;
break;
case 909: /* stxsibx */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 1);
op->element_size = 8;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 940: /* stxvh8x */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 2;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 941: /* stxsihx */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 2);
op->element_size = 8;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 972: /* stxvd2x */
- op->reg = rd | ((instr & 1) << 5);
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 8;
break;
case 1004: /* stxvb16x */
- op->reg = rd | ((instr & 1) << 5);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->reg = rd | ((word & 1) << 5);
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 1;
op->vsx_flags = VSX_CHECK_VEC;
@@ -2457,80 +2747,80 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 32: /* lwz */
case 33: /* lwzu */
op->type = MKOP(LOAD, u, 4);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 34: /* lbz */
case 35: /* lbzu */
op->type = MKOP(LOAD, u, 1);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 36: /* stw */
case 37: /* stwu */
op->type = MKOP(STORE, u, 4);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 38: /* stb */
case 39: /* stbu */
op->type = MKOP(STORE, u, 1);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 40: /* lhz */
case 41: /* lhzu */
op->type = MKOP(LOAD, u, 2);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 42: /* lha */
case 43: /* lhau */
op->type = MKOP(LOAD, SIGNEXT | u, 2);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 44: /* sth */
case 45: /* sthu */
op->type = MKOP(STORE, u, 2);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 46: /* lmw */
if (ra >= rd)
break; /* invalid form, ra in range to load */
op->type = MKOP(LOAD_MULTI, 0, 4 * (32 - rd));
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 47: /* stmw */
op->type = MKOP(STORE_MULTI, 0, 4 * (32 - rd));
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
#ifdef CONFIG_PPC_FPU
case 48: /* lfs */
case 49: /* lfsu */
op->type = MKOP(LOAD_FP, u | FPCONV, 4);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 50: /* lfd */
case 51: /* lfdu */
op->type = MKOP(LOAD_FP, u, 8);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 52: /* stfs */
case 53: /* stfsu */
op->type = MKOP(STORE_FP, u | FPCONV, 4);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
case 54: /* stfd */
case 55: /* stfdu */
op->type = MKOP(STORE_FP, u, 8);
- op->ea = dform_ea(instr, regs);
+ op->ea = dform_ea(word, regs);
break;
#endif
@@ -2538,26 +2828,30 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 56: /* lq */
if (!((rd & 1) || (rd == ra)))
op->type = MKOP(LOAD, 0, 16);
- op->ea = dqform_ea(instr, regs);
+ op->ea = dqform_ea(word, regs);
break;
#endif
#ifdef CONFIG_VSX
case 57: /* lfdp, lxsd, lxssp */
- op->ea = dsform_ea(instr, regs);
- switch (instr & 3) {
+ op->ea = dsform_ea(word, regs);
+ switch (word & 3) {
case 0: /* lfdp */
if (rd & 1)
break; /* reg must be even */
op->type = MKOP(LOAD_FP, 0, 16);
break;
case 2: /* lxsd */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd + 32;
op->type = MKOP(LOAD_VSX, 0, 8);
op->element_size = 8;
op->vsx_flags = VSX_CHECK_VEC;
break;
case 3: /* lxssp */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
op->reg = rd + 32;
op->type = MKOP(LOAD_VSX, 0, 4);
op->element_size = 8;
@@ -2569,8 +2863,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef __powerpc64__
case 58: /* ld[u], lwa */
- op->ea = dsform_ea(instr, regs);
- switch (instr & 3) {
+ op->ea = dsform_ea(word, regs);
+ switch (word & 3) {
case 0: /* ld */
op->type = MKOP(LOAD, 0, 8);
break;
@@ -2585,17 +2879,35 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#endif
#ifdef CONFIG_VSX
+ case 6:
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+ goto unknown_opcode;
+ op->ea = dqform_ea(word, regs);
+ op->reg = VSX_REGISTER_XTP(rd);
+ op->element_size = 32;
+ switch (word & 0xf) {
+ case 0: /* lxvp */
+ op->type = MKOP(LOAD_VSX, 0, 32);
+ break;
+ case 1: /* stxvp */
+ op->type = MKOP(STORE_VSX, 0, 32);
+ break;
+ }
+ break;
+
case 61: /* stfdp, lxv, stxsd, stxssp, stxv */
- switch (instr & 7) {
+ switch (word & 7) {
case 0: /* stfdp with LSB of DS field = 0 */
case 4: /* stfdp with LSB of DS field = 1 */
- op->ea = dsform_ea(instr, regs);
+ op->ea = dsform_ea(word, regs);
op->type = MKOP(STORE_FP, 0, 16);
break;
case 1: /* lxv */
- op->ea = dqform_ea(instr, regs);
- if (instr & 8)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->ea = dqform_ea(word, regs);
+ if (word & 8)
op->reg = rd + 32;
op->type = MKOP(LOAD_VSX, 0, 16);
op->element_size = 16;
@@ -2604,7 +2916,9 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 2: /* stxsd with LSB of DS field = 0 */
case 6: /* stxsd with LSB of DS field = 1 */
- op->ea = dsform_ea(instr, regs);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->ea = dsform_ea(word, regs);
op->reg = rd + 32;
op->type = MKOP(STORE_VSX, 0, 8);
op->element_size = 8;
@@ -2613,7 +2927,9 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
case 3: /* stxssp with LSB of DS field = 0 */
case 7: /* stxssp with LSB of DS field = 1 */
- op->ea = dsform_ea(instr, regs);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->ea = dsform_ea(word, regs);
op->reg = rd + 32;
op->type = MKOP(STORE_VSX, 0, 4);
op->element_size = 8;
@@ -2621,8 +2937,10 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
case 5: /* stxv */
- op->ea = dqform_ea(instr, regs);
- if (instr & 8)
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ goto unknown_opcode;
+ op->ea = dqform_ea(word, regs);
+ if (word & 8)
op->reg = rd + 32;
op->type = MKOP(STORE_VSX, 0, 16);
op->element_size = 16;
@@ -2634,8 +2952,8 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
#ifdef __powerpc64__
case 62: /* std[u] */
- op->ea = dsform_ea(instr, regs);
- switch (instr & 3) {
+ op->ea = dsform_ea(word, regs);
+ switch (word & 3) {
case 0: /* std */
op->type = MKOP(STORE, 0, 8);
break;
@@ -2648,10 +2966,161 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
break;
}
break;
+ case 1: /* Prefixed instructions */
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+ goto unknown_opcode;
+
+ prefix_r = GET_PREFIX_R(word);
+ ra = GET_PREFIX_RA(suffix);
+ op->update_reg = ra;
+ rd = (suffix >> 21) & 0x1f;
+ op->reg = rd;
+ op->val = regs->gpr[rd];
+
+ suffixopcode = get_op(suffix);
+ prefixtype = (word >> 24) & 0x3;
+ switch (prefixtype) {
+ case 0: /* Type 00 Eight-Byte Load/Store */
+ if (prefix_r && ra)
+ break;
+ op->ea = mlsd_8lsd_ea(word, suffix, regs);
+ switch (suffixopcode) {
+ case 41: /* plwa */
+ op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 4);
+ break;
+#ifdef CONFIG_VSX
+ case 42: /* plxsd */
+ op->reg = rd + 32;
+ op->type = MKOP(LOAD_VSX, PREFIXED, 8);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+ case 43: /* plxssp */
+ op->reg = rd + 32;
+ op->type = MKOP(LOAD_VSX, PREFIXED, 4);
+ op->element_size = 8;
+ op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
+ break;
+ case 46: /* pstxsd */
+ op->reg = rd + 32;
+ op->type = MKOP(STORE_VSX, PREFIXED, 8);
+ op->element_size = 8;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+ case 47: /* pstxssp */
+ op->reg = rd + 32;
+ op->type = MKOP(STORE_VSX, PREFIXED, 4);
+ op->element_size = 8;
+ op->vsx_flags = VSX_FPCONV | VSX_CHECK_VEC;
+ break;
+ case 51: /* plxv1 */
+ op->reg += 32;
+ fallthrough;
+ case 50: /* plxv0 */
+ op->type = MKOP(LOAD_VSX, PREFIXED, 16);
+ op->element_size = 16;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+ case 55: /* pstxv1 */
+ op->reg = rd + 32;
+ fallthrough;
+ case 54: /* pstxv0 */
+ op->type = MKOP(STORE_VSX, PREFIXED, 16);
+ op->element_size = 16;
+ op->vsx_flags = VSX_CHECK_VEC;
+ break;
+#endif /* CONFIG_VSX */
+ case 56: /* plq */
+ op->type = MKOP(LOAD, PREFIXED, 16);
+ break;
+ case 57: /* pld */
+ op->type = MKOP(LOAD, PREFIXED, 8);
+ break;
+#ifdef CONFIG_VSX
+ case 58: /* plxvp */
+ op->reg = VSX_REGISTER_XTP(rd);
+ op->type = MKOP(LOAD_VSX, PREFIXED, 32);
+ op->element_size = 32;
+ break;
+#endif /* CONFIG_VSX */
+ case 60: /* pstq */
+ op->type = MKOP(STORE, PREFIXED, 16);
+ break;
+ case 61: /* pstd */
+ op->type = MKOP(STORE, PREFIXED, 8);
+ break;
+#ifdef CONFIG_VSX
+ case 62: /* pstxvp */
+ op->reg = VSX_REGISTER_XTP(rd);
+ op->type = MKOP(STORE_VSX, PREFIXED, 32);
+ op->element_size = 32;
+ break;
+#endif /* CONFIG_VSX */
+ }
+ break;
+ case 1: /* Type 01 Eight-Byte Register-to-Register */
+ break;
+ case 2: /* Type 10 Modified Load/Store */
+ if (prefix_r && ra)
+ break;
+ op->ea = mlsd_8lsd_ea(word, suffix, regs);
+ switch (suffixopcode) {
+ case 32: /* plwz */
+ op->type = MKOP(LOAD, PREFIXED, 4);
+ break;
+ case 34: /* plbz */
+ op->type = MKOP(LOAD, PREFIXED, 1);
+ break;
+ case 36: /* pstw */
+ op->type = MKOP(STORE, PREFIXED, 4);
+ break;
+ case 38: /* pstb */
+ op->type = MKOP(STORE, PREFIXED, 1);
+ break;
+ case 40: /* plhz */
+ op->type = MKOP(LOAD, PREFIXED, 2);
+ break;
+ case 42: /* plha */
+ op->type = MKOP(LOAD, PREFIXED | SIGNEXT, 2);
+ break;
+ case 44: /* psth */
+ op->type = MKOP(STORE, PREFIXED, 2);
+ break;
+ case 48: /* plfs */
+ op->type = MKOP(LOAD_FP, PREFIXED | FPCONV, 4);
+ break;
+ case 50: /* plfd */
+ op->type = MKOP(LOAD_FP, PREFIXED, 8);
+ break;
+ case 52: /* pstfs */
+ op->type = MKOP(STORE_FP, PREFIXED | FPCONV, 4);
+ break;
+ case 54: /* pstfd */
+ op->type = MKOP(STORE_FP, PREFIXED, 8);
+ break;
+ }
+ break;
+ case 3: /* Type 11 Modified Register-to-Register */
+ break;
+ }
#endif /* __powerpc64__ */
}
+ if (OP_IS_LOAD_STORE(op->type) && (op->type & UPDATE)) {
+ switch (GETTYPE(op->type)) {
+ case LOAD:
+ if (ra == rd)
+ goto unknown_opcode;
+ fallthrough;
+ case STORE:
+ case LOAD_FP:
+ case STORE_FP:
+ if (ra == 0)
+ goto unknown_opcode;
+ }
+ }
+
#ifdef CONFIG_VSX
if ((GETTYPE(op->type) == LOAD_VSX ||
GETTYPE(op->type) == STORE_VSX) &&
@@ -2662,8 +3131,12 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 0;
+ unknown_opcode:
+ op->type = UNKNOWN;
+ return 0;
+
logical_done:
- if (instr & 1)
+ if (word & 1)
set_cr0(regs, op);
logical_done_nocc:
op->reg = ra;
@@ -2671,7 +3144,7 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
return 1;
arith_done:
- if (instr & 1)
+ if (word & 1)
set_cr0(regs, op);
compute_done:
op->reg = rd;
@@ -2701,15 +3174,6 @@ NOKPROBE_SYMBOL(analyse_instr);
*/
static nokprobe_inline int handle_stack_update(unsigned long ea, struct pt_regs *regs)
{
-#ifdef CONFIG_PPC32
- /*
- * Check if we will touch kernel stack overflow
- */
- if (ea - STACK_INT_FRAME_SIZE <= current->thread.ksp_limit) {
- printk(KERN_CRIT "Can't kprobe this since kernel stack would overflow.\n");
- return -EINVAL;
- }
-#endif /* CONFIG_PPC32 */
/*
* Check if we already set since that means we'll
* lose the previous value.
@@ -2756,7 +3220,7 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
{
unsigned long next_pc;
- next_pc = truncate_if_32bit(regs->msr, regs->nip + 4);
+ next_pc = truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op->type));
switch (GETTYPE(op->type)) {
case COMPUTE:
if (op->type & SETREG)
@@ -2787,12 +3251,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
case BARRIER_EIEIO:
eieio();
break;
+#ifdef CONFIG_PPC64
case BARRIER_LWSYNC:
asm volatile("lwsync" : : : "memory");
break;
case BARRIER_PTESYNC:
asm volatile("ptesync" : : : "memory");
break;
+#endif
}
break;
@@ -2831,7 +3297,7 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
default:
WARN_ON_ONCE(1);
}
- regs->nip = next_pc;
+ regs_set_return_ip(regs, next_pc);
}
NOKPROBE_SYMBOL(emulate_update_regs);
@@ -2910,7 +3376,7 @@ int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op)
__put_user_asmx(op->val, ea, err, "stbcx.", cr);
break;
case 2:
- __put_user_asmx(op->val, ea, err, "stbcx.", cr);
+ __put_user_asmx(op->val, ea, err, "sthcx.", cr);
break;
#endif
case 4:
@@ -3101,7 +3567,7 @@ NOKPROBE_SYMBOL(emulate_loadstore);
* or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI.
*/
-int emulate_step(struct pt_regs *regs, unsigned int instr)
+int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
{
struct instruction_op op;
int r, err, type;
@@ -3169,38 +3635,31 @@ int emulate_step(struct pt_regs *regs, unsigned int instr)
/* can't step mtmsr[d] that would clear MSR_RI */
return -1;
/* here op.val is the mask of bits to change */
- regs->msr = (regs->msr & ~op.val) | (val & op.val);
+ regs_set_return_msr(regs, (regs->msr & ~op.val) | (val & op.val));
goto instr_done;
-#ifdef CONFIG_PPC64
case SYSCALL: /* sc */
/*
- * N.B. this uses knowledge about how the syscall
- * entry code works. If that is changed, this will
- * need to be changed also.
+ * Per ISA v3.1, section 7.5.15 'Trace Interrupt', we can't
+ * single step a system call instruction:
+ *
+ * Successful completion for an instruction means that the
+ * instruction caused no other interrupt. Thus a Trace
+ * interrupt never occurs for a System Call or System Call
+ * Vectored instruction, or for a Trap instruction that
+ * traps.
*/
- if (regs->gpr[0] == 0x1ebe &&
- cpu_has_feature(CPU_FTR_REAL_LE)) {
- regs->msr ^= MSR_LE;
- goto instr_done;
- }
- regs->gpr[9] = regs->gpr[13];
- regs->gpr[10] = MSR_KERNEL;
- regs->gpr[11] = regs->nip + 4;
- regs->gpr[12] = regs->msr & MSR_MASK;
- regs->gpr[13] = (unsigned long) get_paca();
- regs->nip = (unsigned long) &system_call_common;
- regs->msr = MSR_KERNEL;
- return 1;
-
+ return -1;
+ case SYSCALL_VECTORED_0: /* scv 0 */
+ return -1;
case RFI:
return -1;
-#endif
}
return 0;
instr_done:
- regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4);
+ regs_set_return_ip(regs,
+ truncate_if_32bit(regs->msr, regs->nip + GETLENGTH(op.type)));
return 1;
}
NOKPROBE_SYMBOL(emulate_step);
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index 169872bc0892..df41ce06f86b 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -11,11 +11,6 @@
#include <asm/asm-offsets.h>
#include <asm/export.h>
- .section ".toc","aw"
-PPC64_CACHES:
- .tc ppc64_caches[TC],ppc64_caches
- .section ".text"
-
/**
* __arch_clear_user: - Zero a block of memory in user space, with less checking.
* @to: Destination address, in user space.
@@ -133,7 +128,7 @@ err1; stb r0,0(r3)
blr
.Llong_clear:
- ld r5,PPC64_CACHES@toc(r2)
+ LOAD_REG_ADDR(r5, ppc64_caches)
bf cr7*4+0,11f
err2; std r0,0(r3)
diff --git a/arch/powerpc/lib/test-code-patching.c b/arch/powerpc/lib/test-code-patching.c
new file mode 100644
index 000000000000..c44823292f73
--- /dev/null
+++ b/arch/powerpc/lib/test-code-patching.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2008 Michael Ellerman, IBM Corporation.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+
+#include <asm/code-patching.h>
+
+static int __init instr_is_branch_to_addr(const u32 *instr, unsigned long addr)
+{
+ if (instr_is_branch_iform(ppc_inst_read(instr)) ||
+ instr_is_branch_bform(ppc_inst_read(instr)))
+ return branch_target(instr) == addr;
+
+ return 0;
+}
+
+static void __init test_trampoline(void)
+{
+ asm ("nop;nop;\n");
+}
+
+#define check(x) do { \
+ if (!(x)) \
+ pr_err("code-patching: test failed at line %d\n", __LINE__); \
+} while (0)
+
+static void __init test_branch_iform(void)
+{
+ int err;
+ ppc_inst_t instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
+ unsigned long addr = (unsigned long)tmp;
+
+ /* The simplest case, branch to self, no flags */
+ check(instr_is_branch_iform(ppc_inst(0x48000000)));
+ /* All bits of target set, and flags */
+ check(instr_is_branch_iform(ppc_inst(0x4bffffff)));
+ /* High bit of opcode set, which is wrong */
+ check(!instr_is_branch_iform(ppc_inst(0xcbffffff)));
+ /* Middle bits of opcode set, which is wrong */
+ check(!instr_is_branch_iform(ppc_inst(0x7bffffff)));
+
+ /* Simplest case, branch to self with link */
+ check(instr_is_branch_iform(ppc_inst(0x48000001)));
+ /* All bits of targets set */
+ check(instr_is_branch_iform(ppc_inst(0x4bfffffd)));
+ /* Some bits of targets set */
+ check(instr_is_branch_iform(ppc_inst(0x4bff00fd)));
+ /* Must be a valid branch to start with */
+ check(!instr_is_branch_iform(ppc_inst(0x7bfffffd)));
+
+ /* Absolute branch to 0x100 */
+ ppc_inst_write(iptr, ppc_inst(0x48000103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
+ /* Absolute branch to 0x420fc */
+ ppc_inst_write(iptr, ppc_inst(0x480420ff));
+ check(instr_is_branch_to_addr(iptr, 0x420fc));
+ /* Maximum positive relative branch, + 20MB - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x49fffffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x1FFFFFC));
+ /* Smallest negative relative branch, - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x4bfffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
+ /* Largest negative relative branch, - 32 MB */
+ ppc_inst_write(iptr, ppc_inst(0x4a000000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
+
+ /* Branch to self, with link */
+ err = create_branch(&instr, iptr, addr, BRANCH_SET_LINK);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+
+ /* Branch to self - 0x100, with link */
+ err = create_branch(&instr, iptr, addr - 0x100, BRANCH_SET_LINK);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
+
+ /* Branch to self + 0x100, no link */
+ err = create_branch(&instr, iptr, addr + 0x100, 0);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
+
+ /* Maximum relative negative offset, - 32 MB */
+ err = create_branch(&instr, iptr, addr - 0x2000000, BRANCH_SET_LINK);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
+
+ /* Out of range relative negative offset, - 32 MB + 4*/
+ err = create_branch(&instr, iptr, addr - 0x2000004, BRANCH_SET_LINK);
+ check(err);
+
+ /* Out of range relative positive offset, + 32 MB */
+ err = create_branch(&instr, iptr, addr + 0x2000000, BRANCH_SET_LINK);
+ check(err);
+
+ /* Unaligned target */
+ err = create_branch(&instr, iptr, addr + 3, BRANCH_SET_LINK);
+ check(err);
+
+ /* Check flags are masked correctly */
+ err = create_branch(&instr, iptr, addr, 0xFFFFFFFC);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+ check(ppc_inst_equal(instr, ppc_inst(0x48000000)));
+}
+
+static void __init test_create_function_call(void)
+{
+ u32 *iptr;
+ unsigned long dest;
+ ppc_inst_t instr;
+
+ /* Check we can create a function call */
+ iptr = (u32 *)ppc_function_entry(test_trampoline);
+ dest = ppc_function_entry(test_create_function_call);
+ create_branch(&instr, iptr, dest, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, dest));
+}
+
+static void __init test_branch_bform(void)
+{
+ int err;
+ unsigned long addr;
+ ppc_inst_t instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
+ unsigned int flags;
+
+ addr = (unsigned long)iptr;
+
+ /* The simplest case, branch to self, no flags */
+ check(instr_is_branch_bform(ppc_inst(0x40000000)));
+ /* All bits of target set, and flags */
+ check(instr_is_branch_bform(ppc_inst(0x43ffffff)));
+ /* High bit of opcode set, which is wrong */
+ check(!instr_is_branch_bform(ppc_inst(0xc3ffffff)));
+ /* Middle bits of opcode set, which is wrong */
+ check(!instr_is_branch_bform(ppc_inst(0x7bffffff)));
+
+ /* Absolute conditional branch to 0x100 */
+ ppc_inst_write(iptr, ppc_inst(0x43ff0103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
+ /* Absolute conditional branch to 0x20fc */
+ ppc_inst_write(iptr, ppc_inst(0x43ff20ff));
+ check(instr_is_branch_to_addr(iptr, 0x20fc));
+ /* Maximum positive relative conditional branch, + 32 KB - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x43ff7ffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x7FFC));
+ /* Smallest negative relative conditional branch, - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x43fffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
+ /* Largest negative relative conditional branch, - 32 KB */
+ ppc_inst_write(iptr, ppc_inst(0x43ff8000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
+
+ /* All condition code bits set & link */
+ flags = 0x3ff000 | BRANCH_SET_LINK;
+
+ /* Branch to self */
+ err = create_cond_branch(&instr, iptr, addr, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+
+ /* Branch to self - 0x100 */
+ err = create_cond_branch(&instr, iptr, addr - 0x100, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
+
+ /* Branch to self + 0x100 */
+ err = create_cond_branch(&instr, iptr, addr + 0x100, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
+
+ /* Maximum relative negative offset, - 32 KB */
+ err = create_cond_branch(&instr, iptr, addr - 0x8000, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
+
+ /* Out of range relative negative offset, - 32 KB + 4*/
+ err = create_cond_branch(&instr, iptr, addr - 0x8004, flags);
+ check(err);
+
+ /* Out of range relative positive offset, + 32 KB */
+ err = create_cond_branch(&instr, iptr, addr + 0x8000, flags);
+ check(err);
+
+ /* Unaligned target */
+ err = create_cond_branch(&instr, iptr, addr + 3, flags);
+ check(err);
+
+ /* Check flags are masked correctly */
+ err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+ check(ppc_inst_equal(instr, ppc_inst(0x43FF0000)));
+}
+
+static void __init test_translate_branch(void)
+{
+ unsigned long addr;
+ void *p, *q;
+ ppc_inst_t instr;
+ void *buf;
+
+ buf = vmalloc(PAGE_ALIGN(0x2000000 + 1));
+ check(buf);
+ if (!buf)
+ return;
+
+ /* Simple case, branch to self moved a little */
+ p = buf;
+ addr = (unsigned long)p;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ q = p + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Maximum negative case, move b . to addr + 32 MB */
+ p = buf;
+ addr = (unsigned long)p;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 0x2000000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x4a000000)));
+
+ /* Maximum positive case, move x to x - 32 MB + 4 */
+ p = buf + 0x2000000;
+ addr = (unsigned long)p;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x49fffffc)));
+
+ /* Jump to x + 16 MB moved to x + 20 MB */
+ p = buf;
+ addr = 0x1000000 + (unsigned long)buf;
+ create_branch(&instr, p, addr, BRANCH_SET_LINK);
+ ppc_inst_write(p, instr);
+ q = buf + 0x1400000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Jump to x + 16 MB moved to x - 16 MB + 4 */
+ p = buf + 0x1000000;
+ addr = 0x2000000 + (unsigned long)buf;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+
+ /* Conditional branch tests */
+
+ /* Simple case, branch to self moved a little */
+ p = buf;
+ addr = (unsigned long)p;
+ create_cond_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Maximum negative case, move b . to addr + 32 KB */
+ p = buf;
+ addr = (unsigned long)p;
+ create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
+ ppc_inst_write(p, instr);
+ q = buf + 0x8000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff8000)));
+
+ /* Maximum positive case, move x to x - 32 KB + 4 */
+ p = buf + 0x8000;
+ addr = (unsigned long)p;
+ create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff7ffc)));
+
+ /* Jump to x + 12 KB moved to x + 20 KB */
+ p = buf;
+ addr = 0x3000 + (unsigned long)buf;
+ create_cond_branch(&instr, p, addr, BRANCH_SET_LINK);
+ ppc_inst_write(p, instr);
+ q = buf + 0x5000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Jump to x + 8 KB moved to x - 8 KB + 4 */
+ p = buf + 0x2000;
+ addr = 0x4000 + (unsigned long)buf;
+ create_cond_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Free the buffer we were using */
+ vfree(buf);
+}
+
+static void __init test_prefixed_patching(void)
+{
+ u32 *iptr = (u32 *)ppc_function_entry(test_trampoline);
+ u32 expected[2] = {OP_PREFIX << 26, 0};
+ ppc_inst_t inst = ppc_inst_prefix(OP_PREFIX << 26, 0);
+
+ if (!IS_ENABLED(CONFIG_PPC64))
+ return;
+
+ patch_instruction(iptr, inst);
+
+ check(!memcmp(iptr, expected, sizeof(expected)));
+}
+
+static int __init test_code_patching(void)
+{
+ pr_info("Running code patching self-tests ...\n");
+
+ test_branch_iform();
+ test_branch_bform();
+ test_create_function_call();
+ test_translate_branch();
+ test_prefixed_patching();
+
+ return 0;
+}
+late_initcall(test_code_patching);
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
index 42347067739c..23c7805fb7b3 100644
--- a/arch/powerpc/lib/test_emulate_step.c
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -8,57 +8,50 @@
#define pr_fmt(fmt) "emulate_step_test: " fmt
#include <linux/ptrace.h>
+#include <asm/cpu_has_feature.h>
#include <asm/sstep.h>
#include <asm/ppc-opcode.h>
#include <asm/code-patching.h>
-
-#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
-
-/*
- * Defined with TEST_ prefix so it does not conflict with other
- * definitions.
- */
-#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \
- ___PPC_RA(base) | IMM_L(i))
-#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \
- ___PPC_RA(base) | IMM_L(i))
-#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \
- ___PPC_RA(base) | ((i) & 0xfffc))
-#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b) | \
- __PPC_EH(eh))
-#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b))
-#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b))
-#define TEST_ADD(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_ADD_DOT(t, a, b) (PPC_INST_ADD | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
-#define TEST_ADDC(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define TEST_ADDC_DOT(t, a, b) (PPC_INST_ADDC | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b) | 0x1)
+#include <asm/inst.h>
#define MAX_SUBTESTS 16
#define IGNORE_GPR(n) (0x1UL << (n))
#define IGNORE_XER (0x1UL << 32)
#define IGNORE_CCR (0x1UL << 33)
+#define NEGATIVE_TEST (0x1UL << 63)
+
+#define TEST_PLD(r, base, i, pr) \
+ ppc_inst_prefix(PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_H(i), \
+ PPC_INST_PLD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+
+#define TEST_PLWZ(r, base, i, pr) \
+ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
+ PPC_RAW_LWZ(r, base, i))
+
+#define TEST_PSTD(r, base, i, pr) \
+ ppc_inst_prefix(PPC_PREFIX_8LS | __PPC_PRFX_R(pr) | IMM_H(i), \
+ PPC_INST_PSTD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+
+#define TEST_PLFS(r, base, i, pr) \
+ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
+ PPC_INST_LFS | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+
+#define TEST_PSTFS(r, base, i, pr) \
+ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
+ PPC_INST_STFS | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+
+#define TEST_PLFD(r, base, i, pr) \
+ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
+ PPC_INST_LFD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+
+#define TEST_PSTFD(r, base, i, pr) \
+ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
+ PPC_INST_STFD | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
+
+#define TEST_PADDI(t, a, i, pr) \
+ ppc_inst_prefix(PPC_PREFIX_MLS | __PPC_PRFX_R(pr) | IMM_H(i), \
+ PPC_RAW_ADDI(t, a, i))
static void __init init_pt_regs(struct pt_regs *regs)
{
@@ -103,7 +96,7 @@ static void __init test_ld(void)
regs.gpr[3] = (unsigned long) &a;
/* ld r5, 0(r3) */
- stepped = emulate_step(&regs, TEST_LD(5, 3, 0));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LD(5, 3, 0)));
if (stepped == 1 && regs.gpr[5] == a)
show_result("ld", "PASS");
@@ -111,6 +104,29 @@ static void __init test_ld(void)
show_result("ld", "FAIL");
}
+static void __init test_pld(void)
+{
+ struct pt_regs regs;
+ unsigned long a = 0x23;
+ int stepped = -1;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+ show_result("pld", "SKIP (!CPU_FTR_ARCH_31)");
+ return;
+ }
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long)&a;
+
+ /* pld r5, 0(r3), 0 */
+ stepped = emulate_step(&regs, TEST_PLD(5, 3, 0, 0));
+
+ if (stepped == 1 && regs.gpr[5] == a)
+ show_result("pld", "PASS");
+ else
+ show_result("pld", "FAIL");
+}
+
static void __init test_lwz(void)
{
struct pt_regs regs;
@@ -121,7 +137,7 @@ static void __init test_lwz(void)
regs.gpr[3] = (unsigned long) &a;
/* lwz r5, 0(r3) */
- stepped = emulate_step(&regs, TEST_LWZ(5, 3, 0));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LWZ(5, 3, 0)));
if (stepped == 1 && regs.gpr[5] == a)
show_result("lwz", "PASS");
@@ -129,6 +145,30 @@ static void __init test_lwz(void)
show_result("lwz", "FAIL");
}
+static void __init test_plwz(void)
+{
+ struct pt_regs regs;
+ unsigned int a = 0x4545;
+ int stepped = -1;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+ show_result("plwz", "SKIP (!CPU_FTR_ARCH_31)");
+ return;
+ }
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long)&a;
+
+ /* plwz r5, 0(r3), 0 */
+
+ stepped = emulate_step(&regs, TEST_PLWZ(5, 3, 0, 0));
+
+ if (stepped == 1 && regs.gpr[5] == a)
+ show_result("plwz", "PASS");
+ else
+ show_result("plwz", "FAIL");
+}
+
static void __init test_lwzx(void)
{
struct pt_regs regs;
@@ -141,7 +181,7 @@ static void __init test_lwzx(void)
regs.gpr[5] = 0x8765;
/* lwzx r5, r3, r4 */
- stepped = emulate_step(&regs, TEST_LWZX(5, 3, 4));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LWZX(5, 3, 4)));
if (stepped == 1 && regs.gpr[5] == a[2])
show_result("lwzx", "PASS");
else
@@ -159,13 +199,36 @@ static void __init test_std(void)
regs.gpr[5] = 0x5678;
/* std r5, 0(r3) */
- stepped = emulate_step(&regs, TEST_STD(5, 3, 0));
- if (stepped == 1 || regs.gpr[5] == a)
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STD(5, 3, 0)));
+ if (stepped == 1 && regs.gpr[5] == a)
show_result("std", "PASS");
else
show_result("std", "FAIL");
}
+static void __init test_pstd(void)
+{
+ struct pt_regs regs;
+ unsigned long a = 0x1234;
+ int stepped = -1;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+ show_result("pstd", "SKIP (!CPU_FTR_ARCH_31)");
+ return;
+ }
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long)&a;
+ regs.gpr[5] = 0x5678;
+
+ /* pstd r5, 0(r3), 0 */
+ stepped = emulate_step(&regs, TEST_PSTD(5, 3, 0, 0));
+ if (stepped == 1 || regs.gpr[5] == a)
+ show_result("pstd", "PASS");
+ else
+ show_result("pstd", "FAIL");
+}
+
static void __init test_ldarx_stdcx(void)
{
struct pt_regs regs;
@@ -184,7 +247,7 @@ static void __init test_ldarx_stdcx(void)
regs.gpr[5] = 0x5678;
/* ldarx r5, r3, r4, 0 */
- stepped = emulate_step(&regs, TEST_LDARX(5, 3, 4, 0));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LDARX(5, 3, 4, 0)));
/*
* Don't touch 'a' here. Touching 'a' can do Load/store
@@ -202,7 +265,7 @@ static void __init test_ldarx_stdcx(void)
regs.gpr[5] = 0x9ABC;
/* stdcx. r5, r3, r4 */
- stepped = emulate_step(&regs, TEST_STDCX(5, 3, 4));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STDCX(5, 3, 4)));
/*
* Two possible scenarios that indicates successful emulation
@@ -242,7 +305,7 @@ static void __init test_lfsx_stfsx(void)
regs.gpr[4] = 0;
/* lfsx frt10, r3, r4 */
- stepped = emulate_step(&regs, TEST_LFSX(10, 3, 4));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LFSX(10, 3, 4)));
if (stepped == 1)
show_result("lfsx", "PASS");
@@ -255,7 +318,7 @@ static void __init test_lfsx_stfsx(void)
c.a = 678.91;
/* stfsx frs10, r3, r4 */
- stepped = emulate_step(&regs, TEST_STFSX(10, 3, 4));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STFSX(10, 3, 4)));
if (stepped == 1 && c.b == cached_b)
show_result("stfsx", "PASS");
@@ -263,6 +326,53 @@ static void __init test_lfsx_stfsx(void)
show_result("stfsx", "FAIL");
}
+static void __init test_plfs_pstfs(void)
+{
+ struct pt_regs regs;
+ union {
+ float a;
+ int b;
+ } c;
+ int cached_b;
+ int stepped = -1;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+ show_result("pld", "SKIP (!CPU_FTR_ARCH_31)");
+ return;
+ }
+
+ init_pt_regs(&regs);
+
+
+ /*** plfs ***/
+
+ c.a = 123.45;
+ cached_b = c.b;
+
+ regs.gpr[3] = (unsigned long)&c.a;
+
+ /* plfs frt10, 0(r3), 0 */
+ stepped = emulate_step(&regs, TEST_PLFS(10, 3, 0, 0));
+
+ if (stepped == 1)
+ show_result("plfs", "PASS");
+ else
+ show_result("plfs", "FAIL");
+
+
+ /*** pstfs ***/
+
+ c.a = 678.91;
+
+ /* pstfs frs10, 0(r3), 0 */
+ stepped = emulate_step(&regs, TEST_PSTFS(10, 3, 0, 0));
+
+ if (stepped == 1 && c.b == cached_b)
+ show_result("pstfs", "PASS");
+ else
+ show_result("pstfs", "FAIL");
+}
+
static void __init test_lfdx_stfdx(void)
{
struct pt_regs regs;
@@ -285,7 +395,7 @@ static void __init test_lfdx_stfdx(void)
regs.gpr[4] = 0;
/* lfdx frt10, r3, r4 */
- stepped = emulate_step(&regs, TEST_LFDX(10, 3, 4));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LFDX(10, 3, 4)));
if (stepped == 1)
show_result("lfdx", "PASS");
@@ -298,13 +408,60 @@ static void __init test_lfdx_stfdx(void)
c.a = 987654.32;
/* stfdx frs10, r3, r4 */
- stepped = emulate_step(&regs, TEST_STFDX(10, 3, 4));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STFDX(10, 3, 4)));
if (stepped == 1 && c.b == cached_b)
show_result("stfdx", "PASS");
else
show_result("stfdx", "FAIL");
}
+
+static void __init test_plfd_pstfd(void)
+{
+ struct pt_regs regs;
+ union {
+ double a;
+ long b;
+ } c;
+ long cached_b;
+ int stepped = -1;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+ show_result("pld", "SKIP (!CPU_FTR_ARCH_31)");
+ return;
+ }
+
+ init_pt_regs(&regs);
+
+
+ /*** plfd ***/
+
+ c.a = 123456.78;
+ cached_b = c.b;
+
+ regs.gpr[3] = (unsigned long)&c.a;
+
+ /* plfd frt10, 0(r3), 0 */
+ stepped = emulate_step(&regs, TEST_PLFD(10, 3, 0, 0));
+
+ if (stepped == 1)
+ show_result("plfd", "PASS");
+ else
+ show_result("plfd", "FAIL");
+
+
+ /*** pstfd ***/
+
+ c.a = 987654.32;
+
+ /* pstfd frs10, 0(r3), 0 */
+ stepped = emulate_step(&regs, TEST_PSTFD(10, 3, 0, 0));
+
+ if (stepped == 1 && c.b == cached_b)
+ show_result("pstfd", "PASS");
+ else
+ show_result("pstfd", "FAIL");
+}
#else
static void __init test_lfsx_stfsx(void)
{
@@ -312,11 +469,23 @@ static void __init test_lfsx_stfsx(void)
show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)");
}
+static void __init test_plfs_pstfs(void)
+{
+ show_result("plfs", "SKIP (CONFIG_PPC_FPU is not set)");
+ show_result("pstfs", "SKIP (CONFIG_PPC_FPU is not set)");
+}
+
static void __init test_lfdx_stfdx(void)
{
show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)");
show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)");
}
+
+static void __init test_plfd_pstfd(void)
+{
+ show_result("plfd", "SKIP (CONFIG_PPC_FPU is not set)");
+ show_result("pstfd", "SKIP (CONFIG_PPC_FPU is not set)");
+}
#endif /* CONFIG_PPC_FPU */
#ifdef CONFIG_ALTIVEC
@@ -344,7 +513,7 @@ static void __init test_lvx_stvx(void)
regs.gpr[4] = 0;
/* lvx vrt10, r3, r4 */
- stepped = emulate_step(&regs, TEST_LVX(10, 3, 4));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LVX(10, 3, 4)));
if (stepped == 1)
show_result("lvx", "PASS");
@@ -360,7 +529,7 @@ static void __init test_lvx_stvx(void)
c.b[3] = 498532;
/* stvx vrs10, r3, r4 */
- stepped = emulate_step(&regs, TEST_STVX(10, 3, 4));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STVX(10, 3, 4)));
if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
cached_b[2] == c.b[2] && cached_b[3] == c.b[3])
@@ -401,7 +570,7 @@ static void __init test_lxvd2x_stxvd2x(void)
regs.gpr[4] = 0;
/* lxvd2x vsr39, r3, r4 */
- stepped = emulate_step(&regs, TEST_LXVD2X(39, 3, 4));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LXVD2X(39, R3, R4)));
if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) {
show_result("lxvd2x", "PASS");
@@ -421,7 +590,7 @@ static void __init test_lxvd2x_stxvd2x(void)
c.b[3] = 4;
/* stxvd2x vsr39, r3, r4 */
- stepped = emulate_step(&regs, TEST_STXVD2X(39, 3, 4));
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STXVD2X(39, R3, R4)));
if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] &&
cached_b[2] == c.b[2] && cached_b[3] == c.b[3] &&
@@ -442,36 +611,315 @@ static void __init test_lxvd2x_stxvd2x(void)
}
#endif /* CONFIG_VSX */
+#ifdef CONFIG_VSX
+static void __init test_lxvp_stxvp(void)
+{
+ struct pt_regs regs;
+ union {
+ vector128 a;
+ u32 b[4];
+ } c[2];
+ u32 cached_b[8];
+ int stepped = -1;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+ show_result("lxvp", "SKIP (!CPU_FTR_ARCH_31)");
+ show_result("stxvp", "SKIP (!CPU_FTR_ARCH_31)");
+ return;
+ }
+
+ init_pt_regs(&regs);
+
+ /*** lxvp ***/
+
+ cached_b[0] = c[0].b[0] = 18233;
+ cached_b[1] = c[0].b[1] = 34863571;
+ cached_b[2] = c[0].b[2] = 834;
+ cached_b[3] = c[0].b[3] = 6138911;
+ cached_b[4] = c[1].b[0] = 1234;
+ cached_b[5] = c[1].b[1] = 5678;
+ cached_b[6] = c[1].b[2] = 91011;
+ cached_b[7] = c[1].b[3] = 121314;
+
+ regs.gpr[4] = (unsigned long)&c[0].a;
+
+ /*
+ * lxvp XTp,DQ(RA)
+ * XTp = 32xTX + 2xTp
+ * let TX=1 Tp=1 RA=4 DQ=0
+ */
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LXVP(34, 4, 0)));
+
+ if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) {
+ show_result("lxvp", "PASS");
+ } else {
+ if (!cpu_has_feature(CPU_FTR_VSX))
+ show_result("lxvp", "PASS (!CPU_FTR_VSX)");
+ else
+ show_result("lxvp", "FAIL");
+ }
+
+ /*** stxvp ***/
+
+ c[0].b[0] = 21379463;
+ c[0].b[1] = 87;
+ c[0].b[2] = 374234;
+ c[0].b[3] = 4;
+ c[1].b[0] = 90;
+ c[1].b[1] = 122;
+ c[1].b[2] = 555;
+ c[1].b[3] = 32144;
+
+ /*
+ * stxvp XSp,DQ(RA)
+ * XSp = 32xSX + 2xSp
+ * let SX=1 Sp=1 RA=4 DQ=0
+ */
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STXVP(34, 4, 0)));
+
+ if (stepped == 1 && cached_b[0] == c[0].b[0] && cached_b[1] == c[0].b[1] &&
+ cached_b[2] == c[0].b[2] && cached_b[3] == c[0].b[3] &&
+ cached_b[4] == c[1].b[0] && cached_b[5] == c[1].b[1] &&
+ cached_b[6] == c[1].b[2] && cached_b[7] == c[1].b[3] &&
+ cpu_has_feature(CPU_FTR_VSX)) {
+ show_result("stxvp", "PASS");
+ } else {
+ if (!cpu_has_feature(CPU_FTR_VSX))
+ show_result("stxvp", "PASS (!CPU_FTR_VSX)");
+ else
+ show_result("stxvp", "FAIL");
+ }
+}
+#else
+static void __init test_lxvp_stxvp(void)
+{
+ show_result("lxvp", "SKIP (CONFIG_VSX is not set)");
+ show_result("stxvp", "SKIP (CONFIG_VSX is not set)");
+}
+#endif /* CONFIG_VSX */
+
+#ifdef CONFIG_VSX
+static void __init test_lxvpx_stxvpx(void)
+{
+ struct pt_regs regs;
+ union {
+ vector128 a;
+ u32 b[4];
+ } c[2];
+ u32 cached_b[8];
+ int stepped = -1;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+ show_result("lxvpx", "SKIP (!CPU_FTR_ARCH_31)");
+ show_result("stxvpx", "SKIP (!CPU_FTR_ARCH_31)");
+ return;
+ }
+
+ init_pt_regs(&regs);
+
+ /*** lxvpx ***/
+
+ cached_b[0] = c[0].b[0] = 18233;
+ cached_b[1] = c[0].b[1] = 34863571;
+ cached_b[2] = c[0].b[2] = 834;
+ cached_b[3] = c[0].b[3] = 6138911;
+ cached_b[4] = c[1].b[0] = 1234;
+ cached_b[5] = c[1].b[1] = 5678;
+ cached_b[6] = c[1].b[2] = 91011;
+ cached_b[7] = c[1].b[3] = 121314;
+
+ regs.gpr[3] = (unsigned long)&c[0].a;
+ regs.gpr[4] = 0;
+
+ /*
+ * lxvpx XTp,RA,RB
+ * XTp = 32xTX + 2xTp
+ * let TX=1 Tp=1 RA=3 RB=4
+ */
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_LXVPX(34, 3, 4)));
+
+ if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) {
+ show_result("lxvpx", "PASS");
+ } else {
+ if (!cpu_has_feature(CPU_FTR_VSX))
+ show_result("lxvpx", "PASS (!CPU_FTR_VSX)");
+ else
+ show_result("lxvpx", "FAIL");
+ }
+
+ /*** stxvpx ***/
+
+ c[0].b[0] = 21379463;
+ c[0].b[1] = 87;
+ c[0].b[2] = 374234;
+ c[0].b[3] = 4;
+ c[1].b[0] = 90;
+ c[1].b[1] = 122;
+ c[1].b[2] = 555;
+ c[1].b[3] = 32144;
+
+ /*
+ * stxvpx XSp,RA,RB
+ * XSp = 32xSX + 2xSp
+ * let SX=1 Sp=1 RA=3 RB=4
+ */
+ stepped = emulate_step(&regs, ppc_inst(PPC_RAW_STXVPX(34, 3, 4)));
+
+ if (stepped == 1 && cached_b[0] == c[0].b[0] && cached_b[1] == c[0].b[1] &&
+ cached_b[2] == c[0].b[2] && cached_b[3] == c[0].b[3] &&
+ cached_b[4] == c[1].b[0] && cached_b[5] == c[1].b[1] &&
+ cached_b[6] == c[1].b[2] && cached_b[7] == c[1].b[3] &&
+ cpu_has_feature(CPU_FTR_VSX)) {
+ show_result("stxvpx", "PASS");
+ } else {
+ if (!cpu_has_feature(CPU_FTR_VSX))
+ show_result("stxvpx", "PASS (!CPU_FTR_VSX)");
+ else
+ show_result("stxvpx", "FAIL");
+ }
+}
+#else
+static void __init test_lxvpx_stxvpx(void)
+{
+ show_result("lxvpx", "SKIP (CONFIG_VSX is not set)");
+ show_result("stxvpx", "SKIP (CONFIG_VSX is not set)");
+}
+#endif /* CONFIG_VSX */
+
+#ifdef CONFIG_VSX
+static void __init test_plxvp_pstxvp(void)
+{
+ ppc_inst_t instr;
+ struct pt_regs regs;
+ union {
+ vector128 a;
+ u32 b[4];
+ } c[2];
+ u32 cached_b[8];
+ int stepped = -1;
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+ show_result("plxvp", "SKIP (!CPU_FTR_ARCH_31)");
+ show_result("pstxvp", "SKIP (!CPU_FTR_ARCH_31)");
+ return;
+ }
+
+ /*** plxvp ***/
+
+ cached_b[0] = c[0].b[0] = 18233;
+ cached_b[1] = c[0].b[1] = 34863571;
+ cached_b[2] = c[0].b[2] = 834;
+ cached_b[3] = c[0].b[3] = 6138911;
+ cached_b[4] = c[1].b[0] = 1234;
+ cached_b[5] = c[1].b[1] = 5678;
+ cached_b[6] = c[1].b[2] = 91011;
+ cached_b[7] = c[1].b[3] = 121314;
+
+ init_pt_regs(&regs);
+ regs.gpr[3] = (unsigned long)&c[0].a;
+
+ /*
+ * plxvp XTp,D(RA),R
+ * XTp = 32xTX + 2xTp
+ * let RA=3 R=0 D=d0||d1=0 R=0 Tp=1 TX=1
+ */
+ instr = ppc_inst_prefix(PPC_RAW_PLXVP_P(34, 0, 3, 0), PPC_RAW_PLXVP_S(34, 0, 3, 0));
+
+ stepped = emulate_step(&regs, instr);
+ if (stepped == 1 && cpu_has_feature(CPU_FTR_VSX)) {
+ show_result("plxvp", "PASS");
+ } else {
+ if (!cpu_has_feature(CPU_FTR_VSX))
+ show_result("plxvp", "PASS (!CPU_FTR_VSX)");
+ else
+ show_result("plxvp", "FAIL");
+ }
+
+ /*** pstxvp ***/
+
+ c[0].b[0] = 21379463;
+ c[0].b[1] = 87;
+ c[0].b[2] = 374234;
+ c[0].b[3] = 4;
+ c[1].b[0] = 90;
+ c[1].b[1] = 122;
+ c[1].b[2] = 555;
+ c[1].b[3] = 32144;
+
+ /*
+ * pstxvp XSp,D(RA),R
+ * XSp = 32xSX + 2xSp
+ * let RA=3 D=d0||d1=0 R=0 Sp=1 SX=1
+ */
+ instr = ppc_inst_prefix(PPC_RAW_PSTXVP_P(34, 0, 3, 0), PPC_RAW_PSTXVP_S(34, 0, 3, 0));
+
+ stepped = emulate_step(&regs, instr);
+
+ if (stepped == 1 && cached_b[0] == c[0].b[0] && cached_b[1] == c[0].b[1] &&
+ cached_b[2] == c[0].b[2] && cached_b[3] == c[0].b[3] &&
+ cached_b[4] == c[1].b[0] && cached_b[5] == c[1].b[1] &&
+ cached_b[6] == c[1].b[2] && cached_b[7] == c[1].b[3] &&
+ cpu_has_feature(CPU_FTR_VSX)) {
+ show_result("pstxvp", "PASS");
+ } else {
+ if (!cpu_has_feature(CPU_FTR_VSX))
+ show_result("pstxvp", "PASS (!CPU_FTR_VSX)");
+ else
+ show_result("pstxvp", "FAIL");
+ }
+}
+#else
+static void __init test_plxvp_pstxvp(void)
+{
+ show_result("plxvp", "SKIP (CONFIG_VSX is not set)");
+ show_result("pstxvp", "SKIP (CONFIG_VSX is not set)");
+}
+#endif /* CONFIG_VSX */
+
static void __init run_tests_load_store(void)
{
test_ld();
+ test_pld();
test_lwz();
+ test_plwz();
test_lwzx();
test_std();
+ test_pstd();
test_ldarx_stdcx();
test_lfsx_stfsx();
+ test_plfs_pstfs();
test_lfdx_stfdx();
+ test_plfd_pstfd();
test_lvx_stvx();
test_lxvd2x_stxvd2x();
+ test_lxvp_stxvp();
+ test_lxvpx_stxvpx();
+ test_plxvp_pstxvp();
}
struct compute_test {
char *mnemonic;
+ unsigned long cpu_feature;
struct {
char *descr;
unsigned long flags;
- unsigned int instr;
+ ppc_inst_t instr;
struct pt_regs regs;
} subtests[MAX_SUBTESTS + 1];
};
+/* Extreme values for si0||si1 (the MLS:D-form 34 bit immediate field) */
+#define SI_MIN BIT(33)
+#define SI_MAX (BIT(33) - 1)
+#define SI_UMAX (BIT(34) - 1)
+
static struct compute_test compute_tests[] = {
{
.mnemonic = "nop",
.subtests = {
{
.descr = "R0 = LONG_MAX",
- .instr = PPC_INST_NOP,
+ .instr = ppc_inst(PPC_RAW_NOP()),
.regs = {
.gpr[0] = LONG_MAX,
}
@@ -479,11 +927,38 @@ static struct compute_test compute_tests[] = {
}
},
{
+ .mnemonic = "setb",
+ .cpu_feature = CPU_FTR_ARCH_300,
+ .subtests = {
+ {
+ .descr = "BFA = 1, CR = GT",
+ .instr = ppc_inst(PPC_RAW_SETB(20, 1)),
+ .regs = {
+ .ccr = 0x4000000,
+ }
+ },
+ {
+ .descr = "BFA = 4, CR = LT",
+ .instr = ppc_inst(PPC_RAW_SETB(20, 4)),
+ .regs = {
+ .ccr = 0x8000,
+ }
+ },
+ {
+ .descr = "BFA = 5, CR = EQ",
+ .instr = ppc_inst(PPC_RAW_SETB(20, 5)),
+ .regs = {
+ .ccr = 0x200,
+ }
+ }
+ }
+ },
+ {
.mnemonic = "add",
.subtests = {
{
.descr = "RA = LONG_MIN, RB = LONG_MIN",
- .instr = TEST_ADD(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MIN,
.gpr[22] = LONG_MIN,
@@ -491,7 +966,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = LONG_MIN, RB = LONG_MAX",
- .instr = TEST_ADD(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MIN,
.gpr[22] = LONG_MAX,
@@ -499,7 +974,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = LONG_MAX, RB = LONG_MAX",
- .instr = TEST_ADD(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MAX,
.gpr[22] = LONG_MAX,
@@ -507,7 +982,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = ULONG_MAX, RB = ULONG_MAX",
- .instr = TEST_ADD(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)),
.regs = {
.gpr[21] = ULONG_MAX,
.gpr[22] = ULONG_MAX,
@@ -515,7 +990,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = ULONG_MAX, RB = 0x1",
- .instr = TEST_ADD(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)),
.regs = {
.gpr[21] = ULONG_MAX,
.gpr[22] = 0x1,
@@ -523,7 +998,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MIN, RB = INT_MIN",
- .instr = TEST_ADD(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)),
.regs = {
.gpr[21] = INT_MIN,
.gpr[22] = INT_MIN,
@@ -531,7 +1006,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MIN, RB = INT_MAX",
- .instr = TEST_ADD(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)),
.regs = {
.gpr[21] = INT_MIN,
.gpr[22] = INT_MAX,
@@ -539,7 +1014,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MAX, RB = INT_MAX",
- .instr = TEST_ADD(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)),
.regs = {
.gpr[21] = INT_MAX,
.gpr[22] = INT_MAX,
@@ -547,7 +1022,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = UINT_MAX, RB = UINT_MAX",
- .instr = TEST_ADD(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)),
.regs = {
.gpr[21] = UINT_MAX,
.gpr[22] = UINT_MAX,
@@ -555,7 +1030,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = UINT_MAX, RB = 0x1",
- .instr = TEST_ADD(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD(20, 21, 22)),
.regs = {
.gpr[21] = UINT_MAX,
.gpr[22] = 0x1,
@@ -569,7 +1044,7 @@ static struct compute_test compute_tests[] = {
{
.descr = "RA = LONG_MIN, RB = LONG_MIN",
.flags = IGNORE_CCR,
- .instr = TEST_ADD_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MIN,
.gpr[22] = LONG_MIN,
@@ -577,7 +1052,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = LONG_MIN, RB = LONG_MAX",
- .instr = TEST_ADD_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MIN,
.gpr[22] = LONG_MAX,
@@ -586,7 +1061,7 @@ static struct compute_test compute_tests[] = {
{
.descr = "RA = LONG_MAX, RB = LONG_MAX",
.flags = IGNORE_CCR,
- .instr = TEST_ADD_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MAX,
.gpr[22] = LONG_MAX,
@@ -594,7 +1069,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = ULONG_MAX, RB = ULONG_MAX",
- .instr = TEST_ADD_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)),
.regs = {
.gpr[21] = ULONG_MAX,
.gpr[22] = ULONG_MAX,
@@ -602,7 +1077,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = ULONG_MAX, RB = 0x1",
- .instr = TEST_ADD_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)),
.regs = {
.gpr[21] = ULONG_MAX,
.gpr[22] = 0x1,
@@ -610,7 +1085,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MIN, RB = INT_MIN",
- .instr = TEST_ADD_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)),
.regs = {
.gpr[21] = INT_MIN,
.gpr[22] = INT_MIN,
@@ -618,7 +1093,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MIN, RB = INT_MAX",
- .instr = TEST_ADD_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)),
.regs = {
.gpr[21] = INT_MIN,
.gpr[22] = INT_MAX,
@@ -626,7 +1101,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MAX, RB = INT_MAX",
- .instr = TEST_ADD_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)),
.regs = {
.gpr[21] = INT_MAX,
.gpr[22] = INT_MAX,
@@ -634,7 +1109,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = UINT_MAX, RB = UINT_MAX",
- .instr = TEST_ADD_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)),
.regs = {
.gpr[21] = UINT_MAX,
.gpr[22] = UINT_MAX,
@@ -642,7 +1117,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = UINT_MAX, RB = 0x1",
- .instr = TEST_ADD_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADD_DOT(20, 21, 22)),
.regs = {
.gpr[21] = UINT_MAX,
.gpr[22] = 0x1,
@@ -655,7 +1130,7 @@ static struct compute_test compute_tests[] = {
.subtests = {
{
.descr = "RA = LONG_MIN, RB = LONG_MIN",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MIN,
.gpr[22] = LONG_MIN,
@@ -663,7 +1138,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = LONG_MIN, RB = LONG_MAX",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MIN,
.gpr[22] = LONG_MAX,
@@ -671,7 +1146,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = LONG_MAX, RB = LONG_MAX",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MAX,
.gpr[22] = LONG_MAX,
@@ -679,7 +1154,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = ULONG_MAX, RB = ULONG_MAX",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = ULONG_MAX,
.gpr[22] = ULONG_MAX,
@@ -687,7 +1162,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = ULONG_MAX, RB = 0x1",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = ULONG_MAX,
.gpr[22] = 0x1,
@@ -695,7 +1170,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MIN, RB = INT_MIN",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = INT_MIN,
.gpr[22] = INT_MIN,
@@ -703,7 +1178,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MIN, RB = INT_MAX",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = INT_MIN,
.gpr[22] = INT_MAX,
@@ -711,7 +1186,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MAX, RB = INT_MAX",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = INT_MAX,
.gpr[22] = INT_MAX,
@@ -719,7 +1194,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = UINT_MAX, RB = UINT_MAX",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = UINT_MAX,
.gpr[22] = UINT_MAX,
@@ -727,7 +1202,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = UINT_MAX, RB = 0x1",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = UINT_MAX,
.gpr[22] = 0x1,
@@ -735,7 +1210,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = LONG_MIN | INT_MIN, RB = LONG_MIN | INT_MIN",
- .instr = TEST_ADDC(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MIN | (uint)INT_MIN,
.gpr[22] = LONG_MIN | (uint)INT_MIN,
@@ -749,7 +1224,7 @@ static struct compute_test compute_tests[] = {
{
.descr = "RA = LONG_MIN, RB = LONG_MIN",
.flags = IGNORE_CCR,
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MIN,
.gpr[22] = LONG_MIN,
@@ -757,7 +1232,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = LONG_MIN, RB = LONG_MAX",
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MIN,
.gpr[22] = LONG_MAX,
@@ -766,7 +1241,7 @@ static struct compute_test compute_tests[] = {
{
.descr = "RA = LONG_MAX, RB = LONG_MAX",
.flags = IGNORE_CCR,
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MAX,
.gpr[22] = LONG_MAX,
@@ -774,7 +1249,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = ULONG_MAX, RB = ULONG_MAX",
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = ULONG_MAX,
.gpr[22] = ULONG_MAX,
@@ -782,7 +1257,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = ULONG_MAX, RB = 0x1",
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = ULONG_MAX,
.gpr[22] = 0x1,
@@ -790,7 +1265,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MIN, RB = INT_MIN",
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = INT_MIN,
.gpr[22] = INT_MIN,
@@ -798,7 +1273,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MIN, RB = INT_MAX",
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = INT_MIN,
.gpr[22] = INT_MAX,
@@ -806,7 +1281,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = INT_MAX, RB = INT_MAX",
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = INT_MAX,
.gpr[22] = INT_MAX,
@@ -814,7 +1289,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = UINT_MAX, RB = UINT_MAX",
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = UINT_MAX,
.gpr[22] = UINT_MAX,
@@ -822,7 +1297,7 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = UINT_MAX, RB = 0x1",
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = UINT_MAX,
.gpr[22] = 0x1,
@@ -830,47 +1305,336 @@ static struct compute_test compute_tests[] = {
},
{
.descr = "RA = LONG_MIN | INT_MIN, RB = LONG_MIN | INT_MIN",
- .instr = TEST_ADDC_DOT(20, 21, 22),
+ .instr = ppc_inst(PPC_RAW_ADDC_DOT(20, 21, 22)),
.regs = {
.gpr[21] = LONG_MIN | (uint)INT_MIN,
.gpr[22] = LONG_MIN | (uint)INT_MIN,
}
}
}
+ },
+ {
+ .mnemonic = "divde",
+ .subtests = {
+ {
+ .descr = "RA = LONG_MIN, RB = LONG_MIN",
+ .instr = ppc_inst(PPC_RAW_DIVDE(20, 21, 22)),
+ .regs = {
+ .gpr[21] = LONG_MIN,
+ .gpr[22] = LONG_MIN,
+ }
+ },
+ {
+ .descr = "RA = 1L, RB = 0",
+ .instr = ppc_inst(PPC_RAW_DIVDE(20, 21, 22)),
+ .flags = IGNORE_GPR(20),
+ .regs = {
+ .gpr[21] = 1L,
+ .gpr[22] = 0,
+ }
+ },
+ {
+ .descr = "RA = LONG_MIN, RB = LONG_MAX",
+ .instr = ppc_inst(PPC_RAW_DIVDE(20, 21, 22)),
+ .regs = {
+ .gpr[21] = LONG_MIN,
+ .gpr[22] = LONG_MAX,
+ }
+ }
+ }
+ },
+ {
+ .mnemonic = "divde.",
+ .subtests = {
+ {
+ .descr = "RA = LONG_MIN, RB = LONG_MIN",
+ .instr = ppc_inst(PPC_RAW_DIVDE_DOT(20, 21, 22)),
+ .regs = {
+ .gpr[21] = LONG_MIN,
+ .gpr[22] = LONG_MIN,
+ }
+ },
+ {
+ .descr = "RA = 1L, RB = 0",
+ .instr = ppc_inst(PPC_RAW_DIVDE_DOT(20, 21, 22)),
+ .flags = IGNORE_GPR(20),
+ .regs = {
+ .gpr[21] = 1L,
+ .gpr[22] = 0,
+ }
+ },
+ {
+ .descr = "RA = LONG_MIN, RB = LONG_MAX",
+ .instr = ppc_inst(PPC_RAW_DIVDE_DOT(20, 21, 22)),
+ .regs = {
+ .gpr[21] = LONG_MIN,
+ .gpr[22] = LONG_MAX,
+ }
+ }
+ }
+ },
+ {
+ .mnemonic = "divdeu",
+ .subtests = {
+ {
+ .descr = "RA = LONG_MIN, RB = LONG_MIN",
+ .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)),
+ .flags = IGNORE_GPR(20),
+ .regs = {
+ .gpr[21] = LONG_MIN,
+ .gpr[22] = LONG_MIN,
+ }
+ },
+ {
+ .descr = "RA = 1L, RB = 0",
+ .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)),
+ .flags = IGNORE_GPR(20),
+ .regs = {
+ .gpr[21] = 1L,
+ .gpr[22] = 0,
+ }
+ },
+ {
+ .descr = "RA = LONG_MIN, RB = LONG_MAX",
+ .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)),
+ .regs = {
+ .gpr[21] = LONG_MIN,
+ .gpr[22] = LONG_MAX,
+ }
+ },
+ {
+ .descr = "RA = LONG_MAX - 1, RB = LONG_MAX",
+ .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)),
+ .regs = {
+ .gpr[21] = LONG_MAX - 1,
+ .gpr[22] = LONG_MAX,
+ }
+ },
+ {
+ .descr = "RA = LONG_MIN + 1, RB = LONG_MIN",
+ .instr = ppc_inst(PPC_RAW_DIVDEU(20, 21, 22)),
+ .flags = IGNORE_GPR(20),
+ .regs = {
+ .gpr[21] = LONG_MIN + 1,
+ .gpr[22] = LONG_MIN,
+ }
+ }
+ }
+ },
+ {
+ .mnemonic = "divdeu.",
+ .subtests = {
+ {
+ .descr = "RA = LONG_MIN, RB = LONG_MIN",
+ .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)),
+ .flags = IGNORE_GPR(20),
+ .regs = {
+ .gpr[21] = LONG_MIN,
+ .gpr[22] = LONG_MIN,
+ }
+ },
+ {
+ .descr = "RA = 1L, RB = 0",
+ .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)),
+ .flags = IGNORE_GPR(20),
+ .regs = {
+ .gpr[21] = 1L,
+ .gpr[22] = 0,
+ }
+ },
+ {
+ .descr = "RA = LONG_MIN, RB = LONG_MAX",
+ .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)),
+ .regs = {
+ .gpr[21] = LONG_MIN,
+ .gpr[22] = LONG_MAX,
+ }
+ },
+ {
+ .descr = "RA = LONG_MAX - 1, RB = LONG_MAX",
+ .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)),
+ .regs = {
+ .gpr[21] = LONG_MAX - 1,
+ .gpr[22] = LONG_MAX,
+ }
+ },
+ {
+ .descr = "RA = LONG_MIN + 1, RB = LONG_MIN",
+ .instr = ppc_inst(PPC_RAW_DIVDEU_DOT(20, 21, 22)),
+ .flags = IGNORE_GPR(20),
+ .regs = {
+ .gpr[21] = LONG_MIN + 1,
+ .gpr[22] = LONG_MIN,
+ }
+ }
+ }
+ },
+ {
+ .mnemonic = "paddi",
+ .cpu_feature = CPU_FTR_ARCH_31,
+ .subtests = {
+ {
+ .descr = "RA = LONG_MIN, SI = SI_MIN, R = 0",
+ .instr = TEST_PADDI(21, 22, SI_MIN, 0),
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = LONG_MIN,
+ }
+ },
+ {
+ .descr = "RA = LONG_MIN, SI = SI_MAX, R = 0",
+ .instr = TEST_PADDI(21, 22, SI_MAX, 0),
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = LONG_MIN,
+ }
+ },
+ {
+ .descr = "RA = LONG_MAX, SI = SI_MAX, R = 0",
+ .instr = TEST_PADDI(21, 22, SI_MAX, 0),
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = LONG_MAX,
+ }
+ },
+ {
+ .descr = "RA = ULONG_MAX, SI = SI_UMAX, R = 0",
+ .instr = TEST_PADDI(21, 22, SI_UMAX, 0),
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = ULONG_MAX,
+ }
+ },
+ {
+ .descr = "RA = ULONG_MAX, SI = 0x1, R = 0",
+ .instr = TEST_PADDI(21, 22, 0x1, 0),
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = ULONG_MAX,
+ }
+ },
+ {
+ .descr = "RA = INT_MIN, SI = SI_MIN, R = 0",
+ .instr = TEST_PADDI(21, 22, SI_MIN, 0),
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = INT_MIN,
+ }
+ },
+ {
+ .descr = "RA = INT_MIN, SI = SI_MAX, R = 0",
+ .instr = TEST_PADDI(21, 22, SI_MAX, 0),
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = INT_MIN,
+ }
+ },
+ {
+ .descr = "RA = INT_MAX, SI = SI_MAX, R = 0",
+ .instr = TEST_PADDI(21, 22, SI_MAX, 0),
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = INT_MAX,
+ }
+ },
+ {
+ .descr = "RA = UINT_MAX, SI = 0x1, R = 0",
+ .instr = TEST_PADDI(21, 22, 0x1, 0),
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = UINT_MAX,
+ }
+ },
+ {
+ .descr = "RA = UINT_MAX, SI = SI_MAX, R = 0",
+ .instr = TEST_PADDI(21, 22, SI_MAX, 0),
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = UINT_MAX,
+ }
+ },
+ {
+ .descr = "RA is r0, SI = SI_MIN, R = 0",
+ .instr = TEST_PADDI(21, 0, SI_MIN, 0),
+ .regs = {
+ .gpr[21] = 0x0,
+ }
+ },
+ {
+ .descr = "RA = 0, SI = SI_MIN, R = 0",
+ .instr = TEST_PADDI(21, 22, SI_MIN, 0),
+ .regs = {
+ .gpr[21] = 0x0,
+ .gpr[22] = 0x0,
+ }
+ },
+ {
+ .descr = "RA is r0, SI = 0, R = 1",
+ .instr = TEST_PADDI(21, 0, 0, 1),
+ .regs = {
+ .gpr[21] = 0,
+ }
+ },
+ {
+ .descr = "RA is r0, SI = SI_MIN, R = 1",
+ .instr = TEST_PADDI(21, 0, SI_MIN, 1),
+ .regs = {
+ .gpr[21] = 0,
+ }
+ },
+ /* Invalid instruction form with R = 1 and RA != 0 */
+ {
+ .descr = "RA = R22(0), SI = 0, R = 1",
+ .instr = TEST_PADDI(21, 22, 0, 1),
+ .flags = NEGATIVE_TEST,
+ .regs = {
+ .gpr[21] = 0,
+ .gpr[22] = 0,
+ }
+ }
+ }
}
};
static int __init emulate_compute_instr(struct pt_regs *regs,
- unsigned int instr)
+ ppc_inst_t instr,
+ bool negative)
{
+ int analysed;
struct instruction_op op;
- if (!regs || !instr)
+ if (!regs || !ppc_inst_val(instr))
return -EINVAL;
- if (analyse_instr(&op, regs, instr) != 1 ||
- GETTYPE(op.type) != COMPUTE) {
- pr_info("emulation failed, instruction = 0x%08x\n", instr);
+ /* This is not a return frame regs */
+ regs->nip = patch_site_addr(&patch__exec_instr);
+
+ analysed = analyse_instr(&op, regs, instr);
+ if (analysed != 1 || GETTYPE(op.type) != COMPUTE) {
+ if (negative)
+ return -EFAULT;
+ pr_info("emulation failed, instruction = %08lx\n", ppc_inst_as_ulong(instr));
return -EFAULT;
}
-
- emulate_update_regs(regs, &op);
+ if (analysed == 1 && negative)
+ pr_info("negative test failed, instruction = %08lx\n", ppc_inst_as_ulong(instr));
+ if (!negative)
+ emulate_update_regs(regs, &op);
return 0;
}
static int __init execute_compute_instr(struct pt_regs *regs,
- unsigned int instr)
+ ppc_inst_t instr)
{
extern int exec_instr(struct pt_regs *regs);
- extern s32 patch__exec_instr;
- if (!regs || !instr)
+ if (!regs || !ppc_inst_val(instr))
return -EINVAL;
/* Patch the NOP with the actual instruction */
patch_instruction_site(&patch__exec_instr, instr);
if (exec_instr(regs)) {
- pr_info("execution failed, instruction = 0x%08x\n", instr);
+ pr_info("execution failed, instruction = %08lx\n", ppc_inst_as_ulong(instr));
return -EFAULT;
}
@@ -890,16 +1654,23 @@ static void __init run_tests_compute(void)
unsigned long flags;
struct compute_test *test;
struct pt_regs *regs, exp, got;
- unsigned int i, j, k, instr;
- bool ignore_gpr, ignore_xer, ignore_ccr, passed;
+ unsigned int i, j, k;
+ ppc_inst_t instr;
+ bool ignore_gpr, ignore_xer, ignore_ccr, passed, rc, negative;
for (i = 0; i < ARRAY_SIZE(compute_tests); i++) {
test = &compute_tests[i];
+ if (test->cpu_feature && !early_cpu_has_feature(test->cpu_feature)) {
+ show_result(test->mnemonic, "SKIP (!CPU_FTR)");
+ continue;
+ }
+
for (j = 0; j < MAX_SUBTESTS && test->subtests[j].descr; j++) {
instr = test->subtests[j].instr;
flags = test->subtests[j].flags;
regs = &test->subtests[j].regs;
+ negative = flags & NEGATIVE_TEST;
ignore_xer = flags & IGNORE_XER;
ignore_ccr = flags & IGNORE_CCR;
passed = true;
@@ -914,8 +1685,12 @@ static void __init run_tests_compute(void)
exp.msr = MSR_KERNEL;
got.msr = MSR_KERNEL;
- if (emulate_compute_instr(&got, instr) ||
- execute_compute_instr(&exp, instr)) {
+ rc = emulate_compute_instr(&got, instr, negative) != 0;
+ if (negative) {
+ /* skip executing instruction */
+ passed = rc;
+ goto print;
+ } else if (rc || execute_compute_instr(&exp, instr)) {
passed = false;
goto print;
}
diff --git a/arch/powerpc/lib/test_emulate_step_exec_instr.S b/arch/powerpc/lib/test_emulate_step_exec_instr.S
index 1580f34f4f4f..5473f9d03df3 100644
--- a/arch/powerpc/lib/test_emulate_step_exec_instr.S
+++ b/arch/powerpc/lib/test_emulate_step_exec_instr.S
@@ -37,7 +37,7 @@ _GLOBAL(exec_instr)
* The stack pointer (GPR1) and the thread pointer (GPR13) are not
* saved as these should not be modified anyway.
*/
- SAVE_2GPRS(2, r1)
+ SAVE_GPRS(2, 3, r1)
SAVE_NVGPRS(r1)
/*
@@ -75,12 +75,13 @@ _GLOBAL(exec_instr)
/* Load GPRs from pt_regs */
REST_GPR(0, r31)
- REST_10GPRS(2, r31)
- REST_GPR(12, r31)
+ REST_GPRS(2, 12, r31)
REST_NVGPRS(r31)
/* Placeholder for the test instruction */
+ .balign 64
1: nop
+ nop
patch_site 1b patch__exec_instr
/*
@@ -97,8 +98,7 @@ _GLOBAL(exec_instr)
subi r3, r3, GPR0
SAVE_GPR(0, r3)
SAVE_GPR(2, r3)
- SAVE_8GPRS(4, r3)
- SAVE_GPR(12, r3)
+ SAVE_GPRS(4, 12, r3)
SAVE_NVGPRS(r3)
/* Save resulting LR to pt_regs */
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index 62e6c3045252..d491da8d1838 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -9,7 +9,6 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <asm/switch_to.h>
-#include <asm/asm-prototypes.h>
int enter_vmx_usercopy(void)
{
@@ -37,7 +36,17 @@ int exit_vmx_usercopy(void)
{
disable_kernel_altivec();
pagefault_enable();
- preempt_enable();
+ preempt_enable_no_resched();
+ /*
+ * Must never explicitly call schedule (including preempt_enable())
+ * while in a kuap-unlocked user copy, because the AMR register will
+ * not be saved and restored across context switch. However preempt
+ * kernels need to be preempted as soon as possible if need_resched is
+ * set and we are preemptible. The hack here is to schedule a
+ * decrementer to fire here and reschedule for us if necessary.
+ */
+ if (IS_ENABLED(CONFIG_PREEMPT) && need_resched())
+ set_dec(1);
return 0;
}
diff --git a/arch/powerpc/lib/xor_vmx.c b/arch/powerpc/lib/xor_vmx.c
index 54e61979e80e..aab49d056d18 100644
--- a/arch/powerpc/lib/xor_vmx.c
+++ b/arch/powerpc/lib/xor_vmx.c
@@ -49,8 +49,9 @@ typedef vector signed char unative_t;
V1##_3 = vec_xor(V1##_3, V2##_3); \
} while (0)
-void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in)
+void __xor_altivec_2(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -67,8 +68,10 @@ void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
} while (--lines > 0);
}
-void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in)
+void __xor_altivec_3(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in,
+ const unsigned long * __restrict v3_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -89,9 +92,11 @@ void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
} while (--lines > 0);
}
-void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in)
+void __xor_altivec_4(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in,
+ const unsigned long * __restrict v3_in,
+ const unsigned long * __restrict v4_in)
{
DEFINE(v1);
DEFINE(v2);
@@ -116,9 +121,12 @@ void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
} while (--lines > 0);
}
-void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in)
+void __xor_altivec_5(unsigned long bytes,
+ unsigned long * __restrict v1_in,
+ const unsigned long * __restrict v2_in,
+ const unsigned long * __restrict v3_in,
+ const unsigned long * __restrict v4_in,
+ const unsigned long * __restrict v5_in)
{
DEFINE(v1);
DEFINE(v2);
diff --git a/arch/powerpc/lib/xor_vmx.h b/arch/powerpc/lib/xor_vmx.h
index 5c2b0839b179..573c41d90dac 100644
--- a/arch/powerpc/lib/xor_vmx.h
+++ b/arch/powerpc/lib/xor_vmx.h
@@ -6,16 +6,17 @@
* outside of the enable/disable altivec block.
*/
-void __xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in);
-
-void __xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in);
-
-void __xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in);
-
-void __xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in);
+void __xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2);
+void __xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3);
+void __xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4);
+void __xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5);
diff --git a/arch/powerpc/lib/xor_vmx_glue.c b/arch/powerpc/lib/xor_vmx_glue.c
index 80dba916c367..35d917ece4d1 100644
--- a/arch/powerpc/lib/xor_vmx_glue.c
+++ b/arch/powerpc/lib/xor_vmx_glue.c
@@ -12,47 +12,51 @@
#include <asm/xor_altivec.h>
#include "xor_vmx.h"
-void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in)
+void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_2(bytes, v1_in, v2_in);
+ __xor_altivec_2(bytes, p1, p2);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_2);
-void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in)
+void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_3(bytes, v1_in, v2_in, v3_in);
+ __xor_altivec_3(bytes, p1, p2, p3);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_3);
-void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in)
+void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_4(bytes, v1_in, v2_in, v3_in, v4_in);
+ __xor_altivec_4(bytes, p1, p2, p3, p4);
disable_kernel_altivec();
preempt_enable();
}
EXPORT_SYMBOL(xor_altivec_4);
-void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
- unsigned long *v2_in, unsigned long *v3_in,
- unsigned long *v4_in, unsigned long *v5_in)
+void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1,
+ const unsigned long * __restrict p2,
+ const unsigned long * __restrict p3,
+ const unsigned long * __restrict p4,
+ const unsigned long * __restrict p5)
{
preempt_disable();
enable_kernel_altivec();
- __xor_altivec_5(bytes, v1_in, v2_in, v3_in, v4_in, v5_in);
+ __xor_altivec_5(bytes, p1, p2, p3, p4, p5);
disable_kernel_altivec();
preempt_enable();
}
diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile
index a8794032f15f..603e59c3db10 100644
--- a/arch/powerpc/math-emu/Makefile
+++ b/arch/powerpc/math-emu/Makefile
@@ -17,4 +17,9 @@ obj-$(CONFIG_SPE) += math_efp.o
CFLAGS_fabs.o = -fno-builtin-fabs
CFLAGS_math.o = -fno-builtin-fabs
-ccflags-y = -w
+ccflags-remove-y = -Wmissing-prototypes -Wmissing-declarations -Wunused-but-set-variable
+
+ifdef KBUILD_EXTRA_WARN
+CFLAGS_math.o += -Wmissing-prototypes -Wmissing-declarations -Wunused-but-set-variable
+CFLAGS_math_efp.o += -Wmissing-prototypes -Wmissing-declarations -Wunused-but-set-variable
+endif
diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c
index 30b4b69c6941..936a9a149037 100644
--- a/arch/powerpc/math-emu/math.c
+++ b/arch/powerpc/math-emu/math.c
@@ -24,9 +24,9 @@ FLOATFUNC(mtfsf);
FLOATFUNC(mtfsfi);
#ifdef CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED
-#undef FLOATFUNC(x)
+#undef FLOATFUNC
#define FLOATFUNC(x) static inline int x(void *op1, void *op2, void *op3, \
- void *op4) { }
+ void *op4) { return 0; }
#endif
FLOATFUNC(fadd);
@@ -225,7 +225,7 @@ record_exception(struct pt_regs *regs, int eflag)
int
do_mathemu(struct pt_regs *regs)
{
- void *op0 = 0, *op1 = 0, *op2 = 0, *op3 = 0;
+ void *op0 = NULL, *op1 = NULL, *op2 = NULL, *op3 = NULL;
unsigned long pc = regs->nip;
signed short sdisp;
u32 insn = 0;
@@ -234,7 +234,7 @@ do_mathemu(struct pt_regs *regs)
int type = 0;
int eflag, trap;
- if (get_user(insn, (u32 *)pc))
+ if (get_user(insn, (u32 __user *)pc))
return -EFAULT;
switch (insn >> 26) {
@@ -396,28 +396,28 @@ do_mathemu(struct pt_regs *regs)
case XCR:
op0 = (void *)&regs->ccr;
- op1 = (void *)((insn >> 23) & 0x7);
+ op1 = (void *)(long)((insn >> 23) & 0x7);
op2 = (void *)&current->thread.TS_FPR((insn >> 16) & 0x1f);
op3 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f);
break;
case XCRL:
op0 = (void *)&regs->ccr;
- op1 = (void *)((insn >> 23) & 0x7);
- op2 = (void *)((insn >> 18) & 0x7);
+ op1 = (void *)(long)((insn >> 23) & 0x7);
+ op2 = (void *)(long)((insn >> 18) & 0x7);
break;
case XCRB:
- op0 = (void *)((insn >> 21) & 0x1f);
+ op0 = (void *)(long)((insn >> 21) & 0x1f);
break;
case XCRI:
- op0 = (void *)((insn >> 23) & 0x7);
- op1 = (void *)((insn >> 12) & 0xf);
+ op0 = (void *)(long)((insn >> 23) & 0x7);
+ op1 = (void *)(long)((insn >> 12) & 0xf);
break;
case XFLB:
- op0 = (void *)((insn >> 17) & 0xff);
+ op0 = (void *)(long)((insn >> 17) & 0xff);
op1 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f);
break;
@@ -453,7 +453,7 @@ do_mathemu(struct pt_regs *regs)
break;
}
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return 0;
illegal:
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c
index 0a05e51964c1..34f62aafe706 100644
--- a/arch/powerpc/math-emu/math_efp.c
+++ b/arch/powerpc/math-emu/math_efp.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/prctl.h>
+#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/reg.h>
@@ -218,6 +219,7 @@ int do_spe_mathemu(struct pt_regs *regs)
case AB:
case XCR:
FP_UNPACK_SP(SA, va.wp + 1);
+ fallthrough;
case XB:
FP_UNPACK_SP(SB, vb.wp + 1);
break;
@@ -226,8 +228,8 @@ int do_spe_mathemu(struct pt_regs *regs)
break;
}
- pr_debug("SA: %ld %08lx %ld (%ld)\n", SA_s, SA_f, SA_e, SA_c);
- pr_debug("SB: %ld %08lx %ld (%ld)\n", SB_s, SB_f, SB_e, SB_c);
+ pr_debug("SA: %d %08x %d (%d)\n", SA_s, SA_f, SA_e, SA_c);
+ pr_debug("SB: %d %08x %d (%d)\n", SB_s, SB_f, SB_e, SB_c);
switch (func) {
case EFSABS:
@@ -278,7 +280,7 @@ int do_spe_mathemu(struct pt_regs *regs)
} else {
SB_e += (func == EFSCTSF ? 31 : 32);
FP_TO_INT_ROUND_S(vc.wp[1], SB, 32,
- (func == EFSCTSF));
+ (func == EFSCTSF) ? 1 : 0);
}
goto update_regs;
@@ -287,7 +289,7 @@ int do_spe_mathemu(struct pt_regs *regs)
FP_CLEAR_EXCEPTIONS;
FP_UNPACK_DP(DB, vb.dp);
- pr_debug("DB: %ld %08lx %08lx %ld (%ld)\n",
+ pr_debug("DB: %d %08x %08x %d (%d)\n",
DB_s, DB_f1, DB_f0, DB_e, DB_c);
FP_CONV(S, D, 1, 2, SR, DB);
@@ -301,7 +303,7 @@ int do_spe_mathemu(struct pt_regs *regs)
FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
FP_TO_INT_ROUND_S(vc.wp[1], SB, 32,
- ((func & 0x3) != 0));
+ ((func & 0x3) != 0) ? 1 : 0);
}
goto update_regs;
@@ -312,7 +314,7 @@ int do_spe_mathemu(struct pt_regs *regs)
FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
FP_TO_INT_S(vc.wp[1], SB, 32,
- ((func & 0x3) != 0));
+ ((func & 0x3) != 0) ? 1 : 0);
}
goto update_regs;
@@ -322,7 +324,7 @@ int do_spe_mathemu(struct pt_regs *regs)
break;
pack_s:
- pr_debug("SR: %ld %08lx %ld (%ld)\n", SR_s, SR_f, SR_e, SR_c);
+ pr_debug("SR: %d %08x %d (%d)\n", SR_s, SR_f, SR_e, SR_c);
FP_PACK_SP(vc.wp + 1, SR);
goto update_regs;
@@ -346,6 +348,7 @@ cmp_s:
case AB:
case XCR:
FP_UNPACK_DP(DA, va.dp);
+ fallthrough;
case XB:
FP_UNPACK_DP(DB, vb.dp);
break;
@@ -354,9 +357,9 @@ cmp_s:
break;
}
- pr_debug("DA: %ld %08lx %08lx %ld (%ld)\n",
+ pr_debug("DA: %d %08x %08x %d (%d)\n",
DA_s, DA_f1, DA_f0, DA_e, DA_c);
- pr_debug("DB: %ld %08lx %08lx %ld (%ld)\n",
+ pr_debug("DB: %d %08x %08x %d (%d)\n",
DB_s, DB_f1, DB_f0, DB_e, DB_c);
switch (func) {
@@ -408,7 +411,7 @@ cmp_s:
} else {
DB_e += (func == EFDCTSF ? 31 : 32);
FP_TO_INT_ROUND_D(vc.wp[1], DB, 32,
- (func == EFDCTSF));
+ (func == EFDCTSF) ? 1 : 0);
}
goto update_regs;
@@ -417,7 +420,7 @@ cmp_s:
FP_CLEAR_EXCEPTIONS;
FP_UNPACK_SP(SB, vb.wp + 1);
- pr_debug("SB: %ld %08lx %ld (%ld)\n",
+ pr_debug("SB: %d %08x %d (%d)\n",
SB_s, SB_f, SB_e, SB_c);
FP_CONV(D, S, 2, 1, DR, SB);
@@ -431,7 +434,7 @@ cmp_s:
FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
FP_TO_INT_D(vc.dp[0], DB, 64,
- ((func & 0x1) == 0));
+ ((func & 0x1) == 0) ? 1 : 0);
}
goto update_regs;
@@ -442,7 +445,7 @@ cmp_s:
FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
FP_TO_INT_ROUND_D(vc.wp[1], DB, 32,
- ((func & 0x3) != 0));
+ ((func & 0x3) != 0) ? 1 : 0);
}
goto update_regs;
@@ -453,7 +456,7 @@ cmp_s:
FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
FP_TO_INT_D(vc.wp[1], DB, 32,
- ((func & 0x3) != 0));
+ ((func & 0x3) != 0) ? 1 : 0);
}
goto update_regs;
@@ -463,7 +466,7 @@ cmp_s:
break;
pack_d:
- pr_debug("DR: %ld %08lx %08lx %ld (%ld)\n",
+ pr_debug("DR: %d %08x %08x %d (%d)\n",
DR_s, DR_f1, DR_f0, DR_e, DR_c);
FP_PACK_DP(vc.dp, DR);
@@ -492,6 +495,7 @@ cmp_d:
case XCR:
FP_UNPACK_SP(SA0, va.wp);
FP_UNPACK_SP(SA1, va.wp + 1);
+ fallthrough;
case XB:
FP_UNPACK_SP(SB0, vb.wp);
FP_UNPACK_SP(SB1, vb.wp + 1);
@@ -502,13 +506,13 @@ cmp_d:
break;
}
- pr_debug("SA0: %ld %08lx %ld (%ld)\n",
+ pr_debug("SA0: %d %08x %d (%d)\n",
SA0_s, SA0_f, SA0_e, SA0_c);
- pr_debug("SA1: %ld %08lx %ld (%ld)\n",
+ pr_debug("SA1: %d %08x %d (%d)\n",
SA1_s, SA1_f, SA1_e, SA1_c);
- pr_debug("SB0: %ld %08lx %ld (%ld)\n",
+ pr_debug("SB0: %d %08x %d (%d)\n",
SB0_s, SB0_f, SB0_e, SB0_c);
- pr_debug("SB1: %ld %08lx %ld (%ld)\n",
+ pr_debug("SB1: %d %08x %d (%d)\n",
SB1_s, SB1_f, SB1_e, SB1_c);
switch (func) {
@@ -567,7 +571,7 @@ cmp_d:
} else {
SB0_e += (func == EVFSCTSF ? 31 : 32);
FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32,
- (func == EVFSCTSF));
+ (func == EVFSCTSF) ? 1 : 0);
}
if (SB1_c == FP_CLS_NAN) {
vc.wp[1] = 0;
@@ -575,7 +579,7 @@ cmp_d:
} else {
SB1_e += (func == EVFSCTSF ? 31 : 32);
FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32,
- (func == EVFSCTSF));
+ (func == EVFSCTSF) ? 1 : 0);
}
goto update_regs;
@@ -586,14 +590,14 @@ cmp_d:
FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
FP_TO_INT_ROUND_S(vc.wp[0], SB0, 32,
- ((func & 0x3) != 0));
+ ((func & 0x3) != 0) ? 1 : 0);
}
if (SB1_c == FP_CLS_NAN) {
vc.wp[1] = 0;
FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
FP_TO_INT_ROUND_S(vc.wp[1], SB1, 32,
- ((func & 0x3) != 0));
+ ((func & 0x3) != 0) ? 1 : 0);
}
goto update_regs;
@@ -604,14 +608,14 @@ cmp_d:
FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
FP_TO_INT_S(vc.wp[0], SB0, 32,
- ((func & 0x3) != 0));
+ ((func & 0x3) != 0) ? 1 : 0);
}
if (SB1_c == FP_CLS_NAN) {
vc.wp[1] = 0;
FP_SET_EXCEPTION(FP_EX_INVALID);
} else {
FP_TO_INT_S(vc.wp[1], SB1, 32,
- ((func & 0x3) != 0));
+ ((func & 0x3) != 0) ? 1 : 0);
}
goto update_regs;
@@ -621,9 +625,9 @@ cmp_d:
break;
pack_vs:
- pr_debug("SR0: %ld %08lx %ld (%ld)\n",
+ pr_debug("SR0: %d %08x %d (%d)\n",
SR0_s, SR0_f, SR0_e, SR0_c);
- pr_debug("SR1: %ld %08lx %ld (%ld)\n",
+ pr_debug("SR1: %d %08x %d (%d)\n",
SR1_s, SR1_f, SR1_e, SR1_c);
FP_PACK_SP(vc.wp, SR0);
@@ -710,7 +714,7 @@ update_regs:
illegal:
if (have_e500_cpu_a005_erratum) {
/* according to e500 cpu a005 erratum, reissue efp inst */
- regs->nip -= 4;
+ regs_add_return_ip(regs, -4);
pr_debug("re-issue efp inst: %08lx\n", speinsn);
return 0;
}
@@ -886,7 +890,7 @@ int speround_handler(struct pt_regs *regs)
return 0;
}
-int __init spe_mathemu_init(void)
+static int __init spe_mathemu_init(void)
{
u32 pvr, maj, min;
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 5e147986400d..503a6e249940 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -5,18 +5,17 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y := fault.o mem.o pgtable.o mmap.o \
+obj-y := fault.o mem.o pgtable.o maccess.o pageattr.o \
init_$(BITS).o pgtable_$(BITS).o \
pgtable-frag.o ioremap.o ioremap_$(BITS).o \
- init-common.o mmu_context.o drmem.o
+ init-common.o mmu_context.o drmem.o \
+ cacheflush.o
obj-$(CONFIG_PPC_MMU_NOHASH) += nohash/
obj-$(CONFIG_PPC_BOOK3S_32) += book3s32/
obj-$(CONFIG_PPC_BOOK3S_64) += book3s64/
-obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
-obj-$(CONFIG_PPC_MM_SLICES) += slice.o
+obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
-obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
-obj-$(CONFIG_PPC_PTDUMP) += ptdump/
+obj-$(CONFIG_PTDUMP_CORE) += ptdump/
obj-$(CONFIG_KASAN) += kasan/
diff --git a/arch/powerpc/mm/book3s32/Makefile b/arch/powerpc/mm/book3s32/Makefile
index 1732eaa740a9..50dd8f6bdf46 100644
--- a/arch/powerpc/mm/book3s32/Makefile
+++ b/arch/powerpc/mm/book3s32/Makefile
@@ -6,4 +6,7 @@ ifdef CONFIG_KASAN
CFLAGS_mmu.o += -DDISABLE_BRANCH_PROFILING
endif
-obj-y += mmu.o hash_low.o mmu_context.o tlb.o
+obj-y += mmu.o mmu_context.o
+obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o
+obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o
+obj-$(CONFIG_PPC_KUAP) += kuap.o
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index 2015c4f96238..6925ce998557 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -14,9 +14,10 @@
* hash table, so this file is not used on them.)
*/
+#include <linux/pgtable.h>
+#include <linux/init.h>
#include <asm/reg.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/thread_info.h>
@@ -25,12 +26,13 @@
#include <asm/feature-fixups.h>
#include <asm/code-patching-asm.h>
-#ifdef CONFIG_SMP
- .section .bss
- .align 2
-mmu_hash_lock:
- .space 4
-#endif /* CONFIG_SMP */
+#ifdef CONFIG_PTE_64BIT
+#define PTE_T_SIZE 8
+#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */
+#else
+#define PTE_T_SIZE 4
+#define PTE_FLAGS_OFFSET 0
+#endif
/*
* Load a PTE into the hash table, if possible.
@@ -62,15 +64,16 @@ _GLOBAL(hash_page)
isync
#endif
/* Get PTE (linux-style) and check access */
- lis r0,KERNELBASE@h /* check if kernel address */
+ lis r0, TASK_SIZE@h /* check if kernel address */
cmplw 0,r4,r0
+ mfspr r8,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
- mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
+ lwz r5,PGDIR(r8) /* virt page-table root */
blt+ 112f /* assume user more likely */
- lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
- addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
+ lis r5,swapper_pg_dir@ha /* if kernel address, use */
+ addi r5,r5,swapper_pg_dir@l /* kernel page table */
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
-112:
+112: tophys(r5, r5)
#ifndef CONFIG_PTE_64BIT
rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
lwz r8,0(r5) /* get pmd entry */
@@ -81,7 +84,7 @@ _GLOBAL(hash_page)
rlwinm. r8,r8,0,0,20 /* extract pt base address */
#endif
#ifdef CONFIG_SMP
- beq- hash_page_out /* return if no mapping */
+ beq- .Lhash_page_out /* return if no mapping */
#else
/* XXX it seems like the 601 will give a machine fault on the
rfi if its alignment is wrong (bottom 4 bits of address are
@@ -93,27 +96,35 @@ _GLOBAL(hash_page)
rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
#else
rlwimi r8,r4,23,20,28 /* compute pte address */
+ /*
+ * If PTE_64BIT is set, the low word is the flags word; use that
+ * word for locking since it contains all the interesting bits.
+ */
+ addi r8,r8,PTE_FLAGS_OFFSET
#endif
- rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
- ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
/*
* Update the linux PTE atomically. We do the lwarx up-front
* because almost always, there won't be a permission violation
* and there won't already be an HPTE, and thus we will have
* to update the PTE to set _PAGE_HASHPTE. -- paulus.
- *
- * If PTE_64BIT is set, the low word is the flags word; use that
- * word for locking since it contains all the interesting bits.
*/
-#if (PTE_FLAGS_OFFSET != 0)
- addi r8,r8,PTE_FLAGS_OFFSET
-#endif
-retry:
+.Lretry:
lwarx r6,0,r8 /* get linux-style pte, flag word */
+#ifdef CONFIG_PPC_KUAP
+ mfsrin r5,r4
+ rlwinm r0,r9,28,_PAGE_RW /* MSR[PR] => _PAGE_RW */
+ rlwinm r5,r5,12,_PAGE_RW /* Ks => _PAGE_RW */
+ andc r5,r5,r0 /* Ks & ~MSR[PR] */
+ andc r5,r6,r5 /* Clear _PAGE_RW when Ks = 1 && MSR[PR] = 0 */
+ andc. r5,r3,r5 /* check access & ~permission */
+#else
andc. r5,r3,r6 /* check access & ~permission */
+#endif
+ rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
+ ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
#ifdef CONFIG_SMP
- bne- hash_page_out /* return if access not permitted */
+ bne- .Lhash_page_out /* return if access not permitted */
#else
bnelr-
#endif
@@ -128,13 +139,9 @@ retry:
#endif /* CONFIG_SMP */
#endif /* CONFIG_PTE_64BIT */
stwcx. r5,0,r8 /* attempt to update PTE */
- bne- retry /* retry if someone got there first */
+ bne- .Lretry /* retry if someone got there first */
mfsrin r3,r4 /* get segment reg for segment */
-#ifndef CONFIG_VMAP_STACK
- mfctr r0
- stw r0,_CTR(r11)
-#endif
bl create_hpte /* add the hash table entry */
#ifdef CONFIG_SMP
@@ -143,26 +150,17 @@ retry:
li r0,0
stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
#endif
-
-#ifdef CONFIG_VMAP_STACK
b fast_hash_page_return
-#else
- /* Return from the exception */
- lwz r5,_CTR(r11)
- mtctr r5
- lwz r0,GPR0(r11)
- lwz r8,GPR8(r11)
- b fast_exception_return
-#endif
#ifdef CONFIG_SMP
-hash_page_out:
+.Lhash_page_out:
eieio
lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
li r0,0
stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
blr
#endif /* CONFIG_SMP */
+_ASM_NOKPROBE_SYMBOL(hash_page)
/*
* Add an entry for a particular page to the hash table.
@@ -177,12 +175,6 @@ _GLOBAL(add_hash_page)
mflr r0
stw r0,4(r1)
- /* Convert context and va to VSID */
- mulli r3,r3,897*16 /* multiply context by context skew */
- rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
- mulli r0,r0,0x111 /* multiply by ESID skew */
- add r3,r3,r0 /* note create_hpte trims to 24 bits */
-
#ifdef CONFIG_SMP
lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */
oris r8,r8,12
@@ -198,11 +190,9 @@ _GLOBAL(add_hash_page)
* covered by a BAT). -- paulus
*/
mfmsr r9
- SYNC
rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
rlwinm r0,r0,0,28,26 /* clear MSR_DR */
mtmsr r0
- SYNC_601
isync
#ifdef CONFIG_SMP
@@ -248,6 +238,12 @@ _GLOBAL(add_hash_page)
stwcx. r5,0,r8
bne- 1b
+ /* Convert context and va to VSID */
+ mulli r3,r3,897*16 /* multiply context by context skew */
+ rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
+ mulli r0,r0,0x111 /* multiply by ESID skew */
+ add r3,r3,r0 /* note create_hpte trims to 24 bits */
+
bl create_hpte
9:
@@ -261,12 +257,12 @@ _GLOBAL(add_hash_page)
/* reenable interrupts and DR */
mtmsr r9
- SYNC_601
isync
lwz r0,4(r1)
mtlr r0
blr
+_ASM_NOKPROBE_SYMBOL(add_hash_page)
/*
* This routine adds a hardware PTE to the hash table.
@@ -285,9 +281,9 @@ _GLOBAL(add_hash_page)
*
* For speed, 4 of the instructions get patched once the size and
* physical address of the hash table are known. These definitions
- * of Hash_base and Hash_bits below are just an example.
+ * of Hash_base and Hash_bits below are for the early hash table.
*/
-Hash_base = 0xc0180000
+Hash_base = early_hash
Hash_bits = 12 /* e.g. 256kB hash table */
Hash_msk = (((1 << Hash_bits) - 1) * 64)
@@ -308,6 +304,7 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
#define HASH_RIGHT 31-LG_PTEG_SIZE
+__REF
_GLOBAL(create_hpte)
/* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
@@ -349,18 +346,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
beq+ 10f /* no PTE: go look for an empty slot */
tlbie r4
- lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
- lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
- addi r6,r6,1 /* count how many searches we do */
- stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
-
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
mtctr r0
addi r4,r3,-HPTE_SIZE
1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
CMPPTE 0,r6,r5
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
- beq+ found_slot
+ beq+ .Lfound_slot
patch_site 0f, patch__hash_page_B
/* Search the secondary PTEG for a matching PTE */
@@ -372,7 +364,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
2: LDPTEu r6,HPTE_SIZE(r4)
CMPPTE 0,r6,r5
bdnzf 2,2b
- beq+ found_slot
+ beq+ .Lfound_slot
xori r5,r5,PTE_H /* clear H bit again */
/* Search the primary PTEG for an empty slot */
@@ -381,13 +373,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
TST_V(r6) /* test valid bit */
bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
- beq+ found_empty
-
- /* update counter of times that the primary PTEG is full */
- lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
- lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
- addi r6,r6,1
- stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
+ beq+ .Lfound_empty
patch_site 0f, patch__hash_page_C
/* Search the secondary PTEG for an empty slot */
@@ -399,7 +385,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
2: LDPTEu r6,HPTE_SIZE(r4)
TST_V(r6)
bdnzf 2,2b
- beq+ found_empty
+ beq+ .Lfound_empty
xori r5,r5,PTE_H /* clear H bit again */
/*
@@ -410,36 +396,20 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
* and we know there is a definite (although small) speed
* advantage to putting the PTE in the primary PTEG, we always
* put the PTE in the primary PTEG.
- *
- * In addition, we skip any slot that is mapping kernel text in
- * order to avoid a deadlock when not using BAT mappings if
- * trying to hash in the kernel hash code itself after it has
- * already taken the hash table lock. This works in conjunction
- * with pre-faulting of the kernel text.
- *
- * If the hash table bucket is full of kernel text entries, we'll
- * lockup here but that shouldn't happen
*/
-1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
+ lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
addi r6,r6,HPTE_SIZE /* search for candidate */
andi. r6,r6,7*HPTE_SIZE
stw r6,next_slot@l(r4)
add r4,r3,r6
- LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */
- clrrwi r0,r0,12
- lis r6,etext@h
- ori r6,r6,etext@l /* get etext */
- tophys(r6,r6)
- cmpl cr0,r0,r6 /* compare and try again */
- blt 1b
#ifndef CONFIG_SMP
/* Store PTE in PTEG */
-found_empty:
+.Lfound_empty:
STPTE r5,0(r4)
-found_slot:
+.Lfound_slot:
STPTE r8,HPTE_SIZE/2(r4)
#else /* CONFIG_SMP */
@@ -460,8 +430,8 @@ found_slot:
* We do however have to make sure that the PTE is never in an invalid
* state with the V bit set.
*/
-found_empty:
-found_slot:
+.Lfound_empty:
+.Lfound_slot:
CLR_V(r5,r0) /* clear V (valid) bit in PTE */
STPTE r5,0(r4)
sync
@@ -474,15 +444,13 @@ found_slot:
sync /* make sure pte updates get to memory */
blr
+ .previous
+_ASM_NOKPROBE_SYMBOL(create_hpte)
.section .bss
.align 2
next_slot:
.space 4
-primary_pteg_full:
- .space 4
-htab_hash_searches:
- .space 4
.previous
/*
@@ -493,6 +461,7 @@ htab_hash_searches:
*
* We assume that there is a hash table in use (Hash != 0).
*/
+__REF
_GLOBAL(flush_hash_pages)
/*
* We disable interrupts here, even on UP, because we want
@@ -503,11 +472,9 @@ _GLOBAL(flush_hash_pages)
* covered by a BAT). -- paulus
*/
mfmsr r10
- SYNC
rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
rlwinm r0,r0,0,28,26 /* clear MSR_DR */
mtmsr r0
- SYNC_601
isync
/* First find a PTE in the range that has _PAGE_HASHPTE set */
@@ -515,14 +482,15 @@ _GLOBAL(flush_hash_pages)
rlwimi r5,r4,22,20,29
#else
rlwimi r5,r4,23,20,28
+ addi r5,r5,PTE_FLAGS_OFFSET
#endif
-1: lwz r0,PTE_FLAGS_OFFSET(r5)
+1: lwz r0,0(r5)
cmpwi cr1,r6,1
andi. r0,r0,_PAGE_HASHPTE
bne 2f
ble cr1,19f
addi r4,r4,0x1000
- addi r5,r5,PTE_SIZE
+ addi r5,r5,PTE_T_SIZE
addi r6,r6,-1
b 1b
@@ -560,9 +528,6 @@ _GLOBAL(flush_hash_pages)
* already clear, we're done (for this pte). If not,
* clear it (atomically) and proceed. -- paulus.
*/
-#if (PTE_FLAGS_OFFSET != 0)
- addi r5,r5,PTE_FLAGS_OFFSET
-#endif
33: lwarx r8,0,r5 /* fetch the pte flags word */
andi. r0,r8,_PAGE_HASHPTE
beq 8f /* done if HASHPTE is already clear */
@@ -610,7 +575,7 @@ _GLOBAL(flush_hash_pages)
8: ble cr1,9f /* if all ptes checked */
81: addi r6,r6,-1
- addi r5,r5,PTE_SIZE
+ addi r5,r5,PTE_T_SIZE
addi r4,r4,0x1000
lwz r0,0(r5) /* check next pte */
cmpwi cr1,r6,1
@@ -626,82 +591,8 @@ _GLOBAL(flush_hash_pages)
#endif
19: mtmsr r10
- SYNC_601
isync
blr
+ .previous
EXPORT_SYMBOL(flush_hash_pages)
-
-/*
- * Flush an entry from the TLB
- */
-_GLOBAL(_tlbie)
-#ifdef CONFIG_SMP
- lwz r8,TASK_CPU(r2)
- oris r8,r8,11
- mfmsr r10
- SYNC
- rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
- rlwinm r0,r0,0,28,26 /* clear DR */
- mtmsr r0
- SYNC_601
- isync
- lis r9,mmu_hash_lock@h
- ori r9,r9,mmu_hash_lock@l
- tophys(r9,r9)
-10: lwarx r7,0,r9
- cmpwi 0,r7,0
- bne- 10b
- stwcx. r8,0,r9
- bne- 10b
- eieio
- tlbie r3
- sync
- TLBSYNC
- li r0,0
- stw r0,0(r9) /* clear mmu_hash_lock */
- mtmsr r10
- SYNC_601
- isync
-#else /* CONFIG_SMP */
- tlbie r3
- sync
-#endif /* CONFIG_SMP */
- blr
-
-/*
- * Flush the entire TLB. 603/603e only
- */
-_GLOBAL(_tlbia)
-#if defined(CONFIG_SMP)
- lwz r8,TASK_CPU(r2)
- oris r8,r8,10
- mfmsr r10
- SYNC
- rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
- rlwinm r0,r0,0,28,26 /* clear DR */
- mtmsr r0
- SYNC_601
- isync
- lis r9,mmu_hash_lock@h
- ori r9,r9,mmu_hash_lock@l
- tophys(r9,r9)
-10: lwarx r7,0,r9
- cmpwi 0,r7,0
- bne- 10b
- stwcx. r8,0,r9
- bne- 10b
- sync
- tlbia
- sync
- TLBSYNC
- li r0,0
- stw r0,0(r9) /* clear mmu_hash_lock */
- mtmsr r10
- SYNC_601
- isync
-#else /* CONFIG_SMP */
- sync
- tlbia
- sync
-#endif /* CONFIG_SMP */
- blr
+_ASM_NOKPROBE_SYMBOL(flush_hash_pages)
diff --git a/arch/powerpc/mm/book3s32/kuap.c b/arch/powerpc/mm/book3s32/kuap.c
new file mode 100644
index 000000000000..28676cabb005
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/kuap.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <asm/kup.h>
+#include <asm/smp.h>
+
+struct static_key_false disable_kuap_key;
+EXPORT_SYMBOL(disable_kuap_key);
+
+void kuap_lock_all_ool(void)
+{
+ kuap_lock_all();
+}
+EXPORT_SYMBOL(kuap_lock_all_ool);
+
+void kuap_unlock_all_ool(void)
+{
+ kuap_unlock_all();
+}
+EXPORT_SYMBOL(kuap_unlock_all_ool);
+
+void setup_kuap(bool disabled)
+{
+ if (!disabled) {
+ kuap_lock_all_ool();
+ init_mm.context.sr0 |= SR_KS;
+ current->thread.sr0 |= SR_KS;
+ }
+
+ if (smp_processor_id() != boot_cpuid)
+ return;
+
+ if (disabled)
+ static_branch_enable(&disable_kuap_key);
+ else
+ pr_info("Activating Kernel Userspace Access Protection\n");
+}
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index f888cbb109b9..850783cfa9c7 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -23,7 +23,6 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
-#include <asm/prom.h>
#include <asm/mmu.h>
#include <asm/machdep.h>
#include <asm/code-patching.h>
@@ -31,19 +30,25 @@
#include <mm/mmu_decl.h>
-struct hash_pte *Hash;
-static unsigned long Hash_size, Hash_mask;
-unsigned long _SDR1;
-static unsigned int hash_mb, hash_mb2;
+u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0};
+
+static struct hash_pte __initdata *Hash = (struct hash_pte *)early_hash;
+static unsigned long __initdata Hash_size, Hash_mask;
+static unsigned int __initdata hash_mb, hash_mb2;
+unsigned long __initdata _SDR1;
struct ppc_bat BATS[8][2]; /* 8 pairs of IBAT, DBAT */
-struct batrange { /* stores address ranges mapped by BATs */
+static struct batrange { /* stores address ranges mapped by BATs */
unsigned long start;
unsigned long limit;
phys_addr_t phys;
} bat_addrs[8];
+#ifdef CONFIG_SMP
+unsigned long mmu_hash_lock;
+#endif
+
/*
* Return PA for this VA if it is mapped by a BAT, or 0
*/
@@ -70,26 +75,16 @@ unsigned long p_block_mapped(phys_addr_t pa)
return 0;
}
-static int find_free_bat(void)
+int __init find_free_bat(void)
{
int b;
+ int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_601)) {
- for (b = 0; b < 4; b++) {
- struct ppc_bat *bat = BATS[b];
-
- if (!(bat[0].batl & 0x40))
- return b;
- }
- } else {
- int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
-
- for (b = 0; b < n; b++) {
- struct ppc_bat *bat = BATS[b];
+ for (b = 0; b < n; b++) {
+ struct ppc_bat *bat = BATS[b];
- if (!(bat[1].batu & 3))
- return b;
- }
+ if (!(bat[1].batu & 3))
+ return b;
}
return -1;
}
@@ -97,16 +92,16 @@ static int find_free_bat(void)
/*
* This function calculates the size of the larger block usable to map the
* beginning of an area based on the start address and size of that area:
- * - max block size is 8M on 601 and 256 on other 6xx.
+ * - max block size is 256 on 6xx.
* - base address must be aligned to the block size. So the maximum block size
* is identified by the lowest bit set to 1 in the base address (for instance
* if base is 0x16000000, max size is 0x02000000).
* - block size has to be a power of two. This is calculated by finding the
* highest bit set to 1.
*/
-static unsigned int block_size(unsigned long base, unsigned long top)
+unsigned int bat_block_size(unsigned long base, unsigned long top)
{
- unsigned int max_size = IS_ENABLED(CONFIG_PPC_BOOK3S_601) ? SZ_8M : SZ_256M;
+ unsigned int max_size = SZ_256M;
unsigned int base_shift = (ffs(base) - 1) & 31;
unsigned int block_shift = (fls(top - base) - 1) & 31;
@@ -117,7 +112,6 @@ static unsigned int block_size(unsigned long base, unsigned long top)
* Set up one of the IBAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power
* of 2 between 128k and 256M.
- * Only for 603+ ...
*/
static void setibat(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, pgprot_t prot)
@@ -150,7 +144,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to
int idx;
while ((idx = find_free_bat()) != -1 && base != top) {
- unsigned int size = block_size(base, top);
+ unsigned int size = bat_block_size(base, top);
if (size < 128 << 10)
break;
@@ -164,11 +158,18 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
unsigned long done;
- unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
+ unsigned long border = (unsigned long)__srwx_boundary - PAGE_OFFSET;
+ unsigned long size;
- if (__map_without_bats) {
- pr_debug("RAM mapped without BATs\n");
- return base;
+ size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET);
+ setibat(0, PAGE_OFFSET, 0, size, PAGE_KERNEL_X);
+
+ if (debug_pagealloc_enabled_or_kfence()) {
+ pr_debug_once("Read-Write memory mapped without BATs\n");
+ if (base >= border)
+ return base;
+ if (top >= border)
+ top = border;
}
if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
@@ -181,29 +182,38 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return __mmu_mapin_ram(border, top);
}
+static bool is_module_segment(unsigned long addr)
+{
+ if (!IS_ENABLED(CONFIG_MODULES))
+ return false;
+ if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
+ return false;
+ if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
+ return false;
+ return true;
+}
+
void mmu_mark_initmem_nx(void)
{
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i;
unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
- unsigned long top = (unsigned long)_etext - PAGE_OFFSET;
+ unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
+ unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
unsigned long size;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
- return;
-
- for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) {
- size = block_size(base, top);
+ for (i = 0; i < nb - 1 && base < top;) {
+ size = bat_block_size(base, top);
setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
base += size;
}
if (base < top) {
- size = block_size(base, top);
- size = max(size, 128UL << 10);
+ size = bat_block_size(base, top);
if ((top - base) > size) {
- if (strict_kernel_rwx_enabled())
- pr_warn("Kernel _etext not properly aligned\n");
size <<= 1;
+ if (strict_kernel_rwx_enabled() && base + size > border)
+ pr_warn("Some RW data is getting mapped X. "
+ "Adjust CONFIG_DATA_SHIFT to avoid that.\n");
}
setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
base += size;
@@ -215,10 +225,10 @@ void mmu_mark_initmem_nx(void)
for (i = TASK_SIZE >> 28; i < 16; i++) {
/* Do not set NX on VM space for modules */
- if (IS_ENABLED(CONFIG_MODULES) &&
- (VMALLOC_START & 0xf0000000) == i << 28)
- break;
- mtsrin(mfsrin(i << 28) | 0x10000000, i << 28);
+ if (is_module_segment(i << 28))
+ continue;
+
+ mtsr(mfsr(i << 28) | 0x10000000, i << 28);
}
}
@@ -227,13 +237,10 @@ void mmu_mark_rodata_ro(void)
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i;
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
- return;
-
for (i = 0; i < nb; i++) {
struct ppc_bat *bat = BATS[i];
- if (bat_addrs[i].start < (unsigned long)__init_begin)
+ if (bat_addrs[i].start < (unsigned long)__end_rodata)
bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX;
}
@@ -241,10 +248,9 @@ void mmu_mark_rodata_ro(void)
}
/*
- * Set up one of the I/D BAT (block address translation) register pairs.
+ * Set up one of the D BAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power
* of 2 between 128k and 256M.
- * On 603+, only set IBAT when _PAGE_EXEC is set
*/
void __init setbat(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, pgprot_t prot)
@@ -268,34 +274,17 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
flags &= ~_PAGE_COHERENT;
bl = (size >> 17) - 1;
- if (!IS_ENABLED(CONFIG_PPC_BOOK3S_601)) {
- /* 603, 604, etc. */
- /* Do DBAT first */
- wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
- | _PAGE_COHERENT | _PAGE_GUARDED);
- wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
- bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
- bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
- if (flags & _PAGE_USER)
- bat[1].batu |= 1; /* Vp = 1 */
- if (flags & _PAGE_GUARDED) {
- /* G bit must be zero in IBATs */
- flags &= ~_PAGE_EXEC;
- }
- if (flags & _PAGE_EXEC)
- bat[0] = bat[1];
- else
- bat[0].batu = bat[0].batl = 0;
- } else {
- /* 601 cpu */
- if (bl > BL_8M)
- bl = BL_8M;
- wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
- | _PAGE_COHERENT);
- wimgxpp |= (flags & _PAGE_RW)?
- ((flags & _PAGE_USER)? PP_RWRW: PP_RWXX): PP_RXRX;
- bat->batu = virt | wimgxpp | 4; /* Ks=0, Ku=1 */
- bat->batl = phys | bl | 0x40; /* V=1 */
+ /* Do DBAT first */
+ wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
+ | _PAGE_COHERENT | _PAGE_GUARDED);
+ wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
+ bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
+ bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
+ if (flags & _PAGE_USER)
+ bat[1].batu |= 1; /* Vp = 1 */
+ if (flags & _PAGE_GUARDED) {
+ /* G bit must be zero in IBATs */
+ flags &= ~_PAGE_EXEC;
}
bat_addrs[index].start = virt;
@@ -306,13 +295,13 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
/*
* Preload a translation in the hash table
*/
-void hash_preload(struct mm_struct *mm, unsigned long ea)
+static void hash_preload(struct mm_struct *mm, unsigned long ea)
{
pmd_t *pmd;
- if (!Hash)
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
return;
- pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea);
+ pmd = pmd_off(mm, ea);
if (!pmd_none(*pmd))
add_hash_page(mm->context.id, ea, pmd_val(*pmd));
}
@@ -325,11 +314,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea)
*
* This must always be called with the pte lock held.
*/
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
- if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
- return;
/*
* We don't need to worry about _PAGE_PRESENT here because we are
* called with either mm->page_table_lock held or ptl lock held
@@ -399,15 +386,6 @@ void __init MMU_init_hw(void)
hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
if (lg_n_hpteg > 16)
hash_mb2 = 16 - LG_HPTEG_SIZE;
-
- /*
- * When KASAN is selected, there is already an early temporary hash
- * table and the switch to the final hash table is done later.
- */
- if (IS_ENABLED(CONFIG_KASAN))
- return;
-
- MMU_init_hw_patch();
}
void __init MMU_init_hw_patch(void)
@@ -415,6 +393,9 @@ void __init MMU_init_hw_patch(void)
unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ return;
+
if (ppc_md.progress)
ppc_md.progress("hash:patch", 0x345);
if (ppc_md.progress)
@@ -448,11 +429,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
*/
BUG_ON(first_memblock_base != 0);
- /* 601 can only access 16MB at the moment */
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
- memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000));
- else /* Anything else has 256M mapped */
- memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000));
+ memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_256M));
}
void __init print_system_hash_info(void)
@@ -462,22 +439,6 @@ void __init print_system_hash_info(void)
pr_info("Hash_mask = 0x%lx\n", Hash_mask);
}
-#ifdef CONFIG_PPC_KUEP
-void __init setup_kuep(bool disabled)
-{
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-
- if (disabled)
- pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
-}
-#endif
-
-#ifdef CONFIG_PPC_KUAP
-void __init setup_kuap(bool disabled)
+void __init early_init_mmu(void)
{
- pr_info("Activating Kernel Userspace Access Protection\n");
-
- if (disabled)
- pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
}
-#endif
diff --git a/arch/powerpc/mm/book3s32/mmu_context.c b/arch/powerpc/mm/book3s32/mmu_context.c
index 218996e40a8e..269a3eb25a73 100644
--- a/arch/powerpc/mm/book3s32/mmu_context.c
+++ b/arch/powerpc/mm/book3s32/mmu_context.c
@@ -24,6 +24,12 @@
#include <asm/mmu_context.h>
/*
+ * Room for two PTE pointers, usually the kernel and current user pointers
+ * to their respective root page table.
+ */
+void *abatron_pteptrs[2];
+
+/*
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
* (virtual segment identifiers) for each context. Although the
* hardware supports 24-bit VSIDs, and thus >1 million contexts,
@@ -39,19 +45,6 @@
#define LAST_CONTEXT 32767
#define FIRST_CONTEXT 1
-/*
- * This function defines the mapping from contexts to VSIDs (virtual
- * segment IDs). We use a skew on both the context and the high 4 bits
- * of the 32-bit virtual address (the "effective segment ID") in order
- * to spread out the entries in the MMU hash table. Note, if this
- * function is changed then arch/ppc/mm/hashtable.S will have to be
- * changed to correspond.
- *
- *
- * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
- * & 0xffffff)
- */
-
static unsigned long next_mmu_context;
static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
@@ -76,6 +69,12 @@ EXPORT_SYMBOL_GPL(__init_new_context);
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
mm->context.id = __init_new_context();
+ mm->context.sr0 = CTX_TO_VSID(mm->context.id, 0);
+
+ if (!kuep_is_disabled())
+ mm->context.sr0 |= SR_NX;
+ if (!kuap_is_disabled())
+ mm->context.sr0 |= SR_KS;
return 0;
}
@@ -111,3 +110,25 @@ void __init mmu_context_init(void)
context_map[0] = (1 << FIRST_CONTEXT) - 1;
next_mmu_context = FIRST_CONTEXT;
}
+
+void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
+{
+ long id = next->context.id;
+
+ if (id < 0)
+ panic("mm_struct %p has no context ID", next);
+
+ isync();
+
+ update_user_segments(next->context.sr0);
+
+ if (IS_ENABLED(CONFIG_BDI_SWITCH))
+ abatron_pteptrs[1] = next->pgd;
+
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ mtspr(SPRN_SDR1, rol32(__pa(next->pgd), 4) & 0xffff01ff);
+
+ mb(); /* sync */
+ isync();
+}
+EXPORT_SYMBOL(switch_mmu_context);
diff --git a/arch/powerpc/mm/book3s32/nohash_low.S b/arch/powerpc/mm/book3s32/nohash_low.S
new file mode 100644
index 000000000000..19f418b0ed2d
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/nohash_low.S
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This file contains low-level assembler routines for managing
+ * the PowerPC 603 tlb invalidation.
+ */
+
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/*
+ * Flush an entry from the TLB
+ */
+#ifdef CONFIG_SMP
+_GLOBAL(_tlbie)
+ lwz r8,TASK_CPU(r2)
+ oris r8,r8,11
+ mfmsr r10
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ eieio
+ tlbie r3
+ sync
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ isync
+ blr
+_ASM_NOKPROBE_SYMBOL(_tlbie)
+#endif /* CONFIG_SMP */
+
+/*
+ * Flush the entire TLB. 603/603e only
+ */
+_GLOBAL(_tlbia)
+#if defined(CONFIG_SMP)
+ lwz r8,TASK_CPU(r2)
+ oris r8,r8,10
+ mfmsr r10
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+#endif /* CONFIG_SMP */
+ li r5, 32
+ lis r4, KERNELBASE@h
+ mtctr r5
+ sync
+0: tlbie r4
+ addi r4, r4, 0x1000
+ bdnz 0b
+ sync
+#ifdef CONFIG_SMP
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ isync
+#endif /* CONFIG_SMP */
+ blr
+_ASM_NOKPROBE_SYMBOL(_tlbia)
diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c
index 2fcd321040ff..9ad6b56bfec9 100644
--- a/arch/powerpc/mm/book3s32/tlb.c
+++ b/arch/powerpc/mm/book3s32/tlb.c
@@ -30,35 +30,6 @@
#include <mm/mmu_decl.h>
/*
- * Called when unmapping pages to flush entries from the TLB/hash table.
- */
-void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
-{
- unsigned long ptephys;
-
- if (Hash) {
- ptephys = __pa(ptep) & PAGE_MASK;
- flush_hash_pages(mm->context.id, addr, ptephys, 1);
- }
-}
-EXPORT_SYMBOL(flush_hash_entry);
-
-/*
- * Called at the end of a mmu_gather operation to make sure the
- * TLB flush is completely done.
- */
-void tlb_flush(struct mmu_gather *tlb)
-{
- if (!Hash) {
- /*
- * 603 needs to flush the whole TLB here since
- * it doesn't use a hash table.
- */
- _tlbia();
- }
-}
-
-/*
* TLB flushing:
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
@@ -71,23 +42,23 @@ void tlb_flush(struct mmu_gather *tlb)
* -- Cort
*/
-static void flush_range(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+/*
+ * For each address in the range, find the pte for the address
+ * and check _PAGE_HASHPTE bit; if it is set, find and destroy
+ * the corresponding HPTE.
+ */
+void hash__flush_range(struct mm_struct *mm, unsigned long start, unsigned long end)
{
pmd_t *pmd;
unsigned long pmd_end;
int count;
unsigned int ctx = mm->context.id;
- if (!Hash) {
- _tlbia();
- return;
- }
start &= PAGE_MASK;
if (start >= end)
return;
end = (end - 1) | ~PAGE_MASK;
- pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
+ pmd = pmd_off(mm, start);
for (;;) {
pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
if (pmd_end > end)
@@ -102,67 +73,35 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
++pmd;
}
}
-
-/*
- * Flush kernel TLB entries in the given range
- */
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- flush_range(&init_mm, start, end);
-}
-EXPORT_SYMBOL(flush_tlb_kernel_range);
+EXPORT_SYMBOL(hash__flush_range);
/*
* Flush all the (user) entries for the address space described by mm.
*/
-void flush_tlb_mm(struct mm_struct *mm)
+void hash__flush_tlb_mm(struct mm_struct *mm)
{
struct vm_area_struct *mp;
-
- if (!Hash) {
- _tlbia();
- return;
- }
+ VMA_ITERATOR(vmi, mm, 0);
/*
- * It is safe to go down the mm's list of vmas when called
- * from dup_mmap, holding mmap_sem. It would also be safe from
- * unmap_region or exit_mmap, but not from vmtruncate on SMP -
- * but it seems dup_mmap is the only SMP case which gets here.
+ * It is safe to iterate the vmas when called from dup_mmap,
+ * holding mmap_lock. It would also be safe from unmap_region
+ * or exit_mmap, but not from vmtruncate on SMP - but it seems
+ * dup_mmap is the only SMP case which gets here.
*/
- for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
- flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
+ for_each_vma(vmi, mp)
+ hash__flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
}
-EXPORT_SYMBOL(flush_tlb_mm);
+EXPORT_SYMBOL(hash__flush_tlb_mm);
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+void hash__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
struct mm_struct *mm;
pmd_t *pmd;
- if (!Hash) {
- _tlbie(vmaddr);
- return;
- }
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
- pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
+ pmd = pmd_off(mm, vmaddr);
if (!pmd_none(*pmd))
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
}
-EXPORT_SYMBOL(flush_tlb_page);
-
-/*
- * For each address in the range, find the pte for the address
- * and check _PAGE_HASHPTE bit; if it is set, find and destroy
- * the corresponding HPTE.
- */
-void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- flush_range(vma->vm_mm, start, end);
-}
-EXPORT_SYMBOL(flush_tlb_range);
-
-void __init early_init_mmu(void)
-{
-}
+EXPORT_SYMBOL(hash__flush_tlb_page);
diff --git a/arch/powerpc/mm/book3s64/Makefile b/arch/powerpc/mm/book3s64/Makefile
index fd393b8be14f..cad2abc1730f 100644
--- a/arch/powerpc/mm/book3s64/Makefile
+++ b/arch/powerpc/mm/book3s64/Makefile
@@ -2,22 +2,34 @@
ccflags-y := $(NO_MINIMAL_TOC)
+obj-y += mmu_context.o pgtable.o trace.o
+ifdef CONFIG_PPC_64S_HASH_MMU
CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
-
-obj-y += hash_pgtable.o hash_utils.o slb.o \
- mmu_context.o pgtable.o hash_tlb.o
-obj-$(CONFIG_PPC_NATIVE) += hash_native.o
-obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o
+obj-y += hash_pgtable.o hash_utils.o hash_tlb.o slb.o slice.o
+obj-$(CONFIG_PPC_HASH_MMU_NATIVE) += hash_native.o
obj-$(CONFIG_PPC_4K_PAGES) += hash_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash_64k.o
-obj-$(CONFIG_HUGETLB_PAGE) += hash_hugetlbpage.o
+obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
+obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o
+endif
+
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+
+obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o
ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_RADIX_MMU) += radix_hugetlbpage.o
endif
-obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
-obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o
obj-$(CONFIG_SPAPR_TCE_IOMMU) += iommu_api.o
-obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o
+obj-$(CONFIG_PPC_PKEY) += pkeys.o
# Instrumenting the SLB fault path can lead to duplicate SLB entries
KCOV_INSTRUMENT_slb.o := n
+
+# Parts of these can run in real mode and therefore are
+# not safe with the current outline KASAN implementation
+KASAN_SANITIZE_mmu_context.o := n
+KASAN_SANITIZE_pgtable.o := n
+KASAN_SANITIZE_radix_pgtable.o := n
+KASAN_SANITIZE_radix_tlb.o := n
+KASAN_SANITIZE_slb.o := n
+KASAN_SANITIZE_pkeys.o := n
diff --git a/arch/powerpc/mm/book3s64/hash_4k.c b/arch/powerpc/mm/book3s64/hash_4k.c
index 22e787123cdf..7de1a8a0c62a 100644
--- a/arch/powerpc/mm/book3s64/hash_4k.c
+++ b/arch/powerpc/mm/book3s64/hash_4k.c
@@ -54,7 +54,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* PP bits. _PAGE_USER is already PP bit 0x2, so we only
* need to add in 0x1 if it's a read-only user page
*/
- rflags = htab_convert_pte_flags(new_pte);
+ rflags = htab_convert_pte_flags(new_pte, flags);
rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
diff --git a/arch/powerpc/mm/book3s64/hash_64k.c b/arch/powerpc/mm/book3s64/hash_64k.c
index 7084ce2951e6..998c6817ed47 100644
--- a/arch/powerpc/mm/book3s64/hash_64k.c
+++ b/arch/powerpc/mm/book3s64/hash_64k.c
@@ -72,7 +72,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
* Handle the subpage protection bits
*/
subpg_pte = new_pte & ~subpg_prot;
- rflags = htab_convert_pte_flags(subpg_pte);
+ rflags = htab_convert_pte_flags(subpg_pte, flags);
if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
@@ -260,7 +260,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
new_pte |= _PAGE_DIRTY;
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
- rflags = htab_convert_pte_flags(new_pte);
+ rflags = htab_convert_pte_flags(new_pte, flags);
rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
diff --git a/arch/powerpc/mm/book3s64/hash_hugepage.c b/arch/powerpc/mm/book3s64/hash_hugepage.c
index 440823797de7..c0fabe6c5a12 100644
--- a/arch/powerpc/mm/book3s64/hash_hugepage.c
+++ b/arch/powerpc/mm/book3s64/hash_hugepage.c
@@ -57,7 +57,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)))
return 0;
- rflags = htab_convert_pte_flags(new_pmd);
+ rflags = htab_convert_pte_flags(new_pmd, flags);
#if 0
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index d2d8237ea9d5..9342e79870df 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -14,11 +14,11 @@
#include <linux/processor.h>
#include <linux/threads.h>
#include <linux/smp.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/trace.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
@@ -43,102 +43,28 @@
static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
-static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
-{
- unsigned long rb;
-
- rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
-
- asm volatile("tlbiel %0" : : "r" (rb));
-}
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map hpte_lock_map =
+ STATIC_LOCKDEP_MAP_INIT("hpte_lock", &hpte_lock_map);
-/*
- * tlbiel instruction for hash, set invalidation
- * i.e., r=1 and is=01 or is=10 or is=11
- */
-static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
- unsigned int pid,
- unsigned int ric, unsigned int prs)
+static void acquire_hpte_lock(void)
{
- unsigned long rb;
- unsigned long rs;
- unsigned int r = 0; /* hash format */
-
- rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
- rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
-
- asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
- : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
- : "memory");
+ lock_map_acquire(&hpte_lock_map);
}
-
-static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
+static void release_hpte_lock(void)
{
- unsigned int set;
-
- asm volatile("ptesync": : :"memory");
-
- for (set = 0; set < num_sets; set++)
- tlbiel_hash_set_isa206(set, is);
-
- asm volatile("ptesync": : :"memory");
+ lock_map_release(&hpte_lock_map);
}
-
-static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
+#else
+static void acquire_hpte_lock(void)
{
- unsigned int set;
-
- asm volatile("ptesync": : :"memory");
-
- /*
- * Flush the first set of the TLB, and any caching of partition table
- * entries. Then flush the remaining sets of the TLB. Hash mode uses
- * partition scoped TLB translations.
- */
- tlbiel_hash_set_isa300(0, is, 0, 2, 0);
- for (set = 1; set < num_sets; set++)
- tlbiel_hash_set_isa300(set, is, 0, 0, 0);
-
- /*
- * Now invalidate the process table cache.
- *
- * From ISA v3.0B p. 1078:
- * The following forms are invalid.
- * * PRS=1, R=0, and RIC!=2 (The only process-scoped
- * HPT caching is of the Process Table.)
- */
- tlbiel_hash_set_isa300(0, is, 0, 2, 1);
-
- asm volatile("ptesync": : :"memory");
-
- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
}
-void hash__tlbiel_all(unsigned int action)
+static void release_hpte_lock(void)
{
- unsigned int is;
-
- switch (action) {
- case TLB_INVAL_SCOPE_GLOBAL:
- is = 3;
- break;
- case TLB_INVAL_SCOPE_LPID:
- is = 2;
- break;
- default:
- BUG();
- }
-
- if (early_cpu_has_feature(CPU_FTR_ARCH_300))
- tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
- else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
- tlbiel_all_isa206(POWER8_TLB_SETS, is);
- else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
- tlbiel_all_isa206(POWER7_TLB_SETS, is);
- else
- WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
}
+#endif
static inline unsigned long ___tlbie(unsigned long vpn, int psize,
int apsize, int ssize)
@@ -260,7 +186,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
va |= ssize << 8;
sllp = get_sllp_encoding(apsize);
va |= sllp << 5;
- asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
+ asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 0), %1)
: : "r" (va), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
@@ -279,7 +205,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
*/
va |= (vpn & 0xfe);
va |= 1; /* L */
- asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
+ asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 1), %1)
: : "r" (va), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
@@ -303,7 +229,7 @@ static inline void tlbie(unsigned long vpn, int psize, int apsize,
asm volatile("ptesync": : :"memory");
if (use_local) {
__tlbiel(vpn, psize, apsize, ssize);
- asm volatile("ptesync": : :"memory");
+ ppc_after_tlbiel_barrier();
} else {
__tlbie(vpn, psize, apsize, ssize);
fixup_tlbie_vpn(vpn, psize, apsize, ssize);
@@ -317,6 +243,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
{
unsigned long *word = (unsigned long *)&hptep->v;
+ acquire_hpte_lock();
while (1) {
if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
break;
@@ -331,6 +258,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
{
unsigned long *word = (unsigned long *)&hptep->v;
+ release_hpte_lock();
clear_bit_unlock(HPTE_LOCK_BIT, word);
}
@@ -340,8 +268,11 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
{
struct hash_pte *hptep = htab_address + hpte_group;
unsigned long hpte_v, hpte_r;
+ unsigned long flags;
int i;
+ local_irq_save(flags);
+
if (!(vflags & HPTE_V_BOLTED)) {
DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
" rflags=%lx, vflags=%lx, psize=%d)\n",
@@ -360,8 +291,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
hptep++;
}
- if (i == HPTES_PER_GROUP)
+ if (i == HPTES_PER_GROUP) {
+ local_irq_restore(flags);
return -1;
+ }
hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
@@ -383,10 +316,13 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
* Now set the first dword including the valid bit
* NOTE: this also unlocks the hpte
*/
+ release_hpte_lock();
hptep->v = cpu_to_be64(hpte_v);
__asm__ __volatile__ ("ptesync" : : : "memory");
+ local_irq_restore(flags);
+
return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
}
@@ -424,6 +360,7 @@ static long native_hpte_remove(unsigned long hpte_group)
return -1;
/* Invalidate the hpte. NOTE: this also unlocks it */
+ release_hpte_lock();
hptep->v = 0;
return i;
@@ -436,6 +373,9 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
struct hash_pte *hptep = htab_address + slot;
unsigned long hpte_v, want_v;
int ret = 0, local = 0;
+ unsigned long irqflags;
+
+ local_irq_save(irqflags);
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
@@ -479,6 +419,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
if (!(flags & HPTE_NOHPTE_UPDATE))
tlbie(vpn, bpsize, apsize, ssize, local);
+ local_irq_restore(irqflags);
+
return ret;
}
@@ -542,6 +484,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
unsigned long vsid;
long slot;
struct hash_pte *hptep;
+ unsigned long flags;
+
+ local_irq_save(flags);
vsid = get_kernel_vsid(ea, ssize);
vpn = hpt_vpn(ea, vsid, ssize);
@@ -560,6 +505,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
* actual page size will be same.
*/
tlbie(vpn, psize, psize, ssize, 0);
+
+ local_irq_restore(flags);
}
/*
@@ -573,6 +520,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
unsigned long vsid;
long slot;
struct hash_pte *hptep;
+ unsigned long flags;
+
+ local_irq_save(flags);
vsid = get_kernel_vsid(ea, ssize);
vpn = hpt_vpn(ea, vsid, ssize);
@@ -590,6 +540,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
/* Invalidate the TLB */
tlbie(vpn, psize, psize, ssize, 0);
+
+ local_irq_restore(flags);
+
return 0;
}
@@ -614,10 +567,11 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
/* recheck with locks held */
hpte_v = hpte_get_old_v(hptep);
- if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
+ if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
/* Invalidate the hpte. NOTE: this also unlocks it */
+ release_hpte_lock();
hptep->v = 0;
- else
+ } else
native_unlock_hpte(hptep);
}
/*
@@ -677,10 +631,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
hpte_v = hpte_get_old_v(hptep);
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
- /*
- * Invalidate the hpte. NOTE: this also unlocks it
- */
-
+ /* Invalidate the hpte. NOTE: this also unlocks it */
+ release_hpte_lock();
hptep->v = 0;
} else
native_unlock_hpte(hptep);
@@ -780,7 +732,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
* TODO: add batching support when enabled. remember, no dynamic memory here,
* although there is the control page available...
*/
-static void native_hpte_clear(void)
+static notrace void native_hpte_clear(void)
{
unsigned long vpn = 0;
unsigned long slot, slots;
@@ -862,8 +814,10 @@ static void native_flush_hash_range(unsigned long number, int local)
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
native_unlock_hpte(hptep);
- else
+ else {
+ release_hpte_lock();
hptep->v = 0;
+ }
} pte_iterate_hashed_end();
}
@@ -879,7 +833,7 @@ static void native_flush_hash_range(unsigned long number, int local)
__tlbiel(vpn, psize, psize, ssize);
} pte_iterate_hashed_end();
}
- asm volatile("ptesync":::"memory");
+ ppc_after_tlbiel_barrier();
} else {
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index 64733b9cb20a..51f48984abca 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -8,16 +8,15 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
+#include <linux/stop_machine.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/mmu.h>
#include <asm/tlb.h>
+#include <asm/firmware.h>
#include <mm/mmu_decl.h>
-#define CREATE_TRACE_POINTS
#include <trace/events/thp.h>
#if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))
@@ -148,6 +147,7 @@ void hash__vmemmap_remove_mapping(unsigned long start,
int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -155,7 +155,8 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE);
if (slab_is_available()) {
pgdp = pgd_offset_k(ea);
- pudp = pud_alloc(&init_mm, pgdp, ea);
+ p4dp = p4d_offset(pgdp, ea);
+ pudp = pud_alloc(&init_mm, p4dp, ea);
if (!pudp)
return -ENOMEM;
pmdp = pmd_alloc(&init_mm, pudp, ea);
@@ -236,7 +237,7 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
* to hugepage, we first clear the pmd, then invalidate all
* the PTE entries. The assumption here is that any low level
* page fault will see a none pmd and take the slow path that
- * will wait on mmap_sem. But we could very well be in a
+ * will wait on mmap_lock. But we could very well be in a
* hash_page with local ptep pointer value. Such a hash page
* can result in adding new HPTE entries for normal subpages.
* That means we could be modifying the page content as we
@@ -250,12 +251,12 @@ pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addres
* Now invalidate the hpte entries in the range
* covered by pmd. This make sure we take a
* fault and will find the pmd as none, which will
- * result in a major fault which takes mmap_sem and
+ * result in a major fault which takes mmap_lock and
* hence wait for collapse to complete. Without this
* the __collapse_huge_page_copy can result in copying
* the old content.
*/
- flush_tlb_pmd_range(vma->vm_mm, &pmd, address);
+ flush_hash_table_pmd_range(vma->vm_mm, &pmd, address);
return pmd;
}
@@ -363,17 +364,6 @@ pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
* hash fault look at them.
*/
memset(pgtable, 0, PTE_FRAG_SIZE);
- /*
- * Serialize against find_current_mm_pte variants which does lock-less
- * lookup in page tables with local interrupts disabled. For huge pages
- * it casts pmd_t to pte_t. Since format of pte_t is different from
- * pmd_t we want to prevent transit from pmd pointing to page table
- * to pmd pointing to huge page (and back) while interrupts are disabled.
- * We clear pmd to possibly replace it with page table pointer in
- * different code paths. So make sure we wait for the parallel
- * find_curren_mm_pte to finish.
- */
- serialize_against_pte_lookup(mm);
return old_pmd;
}
@@ -388,7 +378,7 @@ int hash__has_transparent_hugepage(void)
if (mmu_psize_defs[MMU_PAGE_16M].shift != PMD_SHIFT)
return 0;
/*
- * We need to make sure that we support 16MB hugepage in a segement
+ * We need to make sure that we support 16MB hugepage in a segment
* with base page size 64K or 4K. We only enable THP with a PAGE_SIZE
* of 64K.
*/
@@ -411,10 +401,105 @@ EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#ifdef CONFIG_STRICT_KERNEL_RWX
+
+struct change_memory_parms {
+ unsigned long start, end, newpp;
+ unsigned int step, nr_cpus;
+ atomic_t master_cpu;
+ atomic_t cpu_counter;
+};
+
+// We'd rather this was on the stack but it has to be in the RMO
+static struct change_memory_parms chmem_parms;
+
+// And therefore we need a lock to protect it from concurrent use
+static DEFINE_MUTEX(chmem_lock);
+
+static void change_memory_range(unsigned long start, unsigned long end,
+ unsigned int step, unsigned long newpp)
+{
+ unsigned long idx;
+
+ pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
+ start, end, newpp, step);
+
+ for (idx = start; idx < end; idx += step)
+ /* Not sure if we can do much with the return value */
+ mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
+ mmu_kernel_ssize);
+}
+
+static int notrace chmem_secondary_loop(struct change_memory_parms *parms)
+{
+ unsigned long msr, tmp, flags;
+ int *p;
+
+ p = &parms->cpu_counter.counter;
+
+ local_irq_save(flags);
+ hard_irq_disable();
+
+ asm volatile (
+ // Switch to real mode and leave interrupts off
+ "mfmsr %[msr] ;"
+ "li %[tmp], %[MSR_IR_DR] ;"
+ "andc %[tmp], %[msr], %[tmp] ;"
+ "mtmsrd %[tmp] ;"
+
+ // Tell the master we are in real mode
+ "1: "
+ "lwarx %[tmp], 0, %[p] ;"
+ "addic %[tmp], %[tmp], -1 ;"
+ "stwcx. %[tmp], 0, %[p] ;"
+ "bne- 1b ;"
+
+ // Spin until the counter goes to zero
+ "2: ;"
+ "lwz %[tmp], 0(%[p]) ;"
+ "cmpwi %[tmp], 0 ;"
+ "bne- 2b ;"
+
+ // Switch back to virtual mode
+ "mtmsrd %[msr] ;"
+
+ : // outputs
+ [msr] "=&r" (msr), [tmp] "=&b" (tmp), "+m" (*p)
+ : // inputs
+ [p] "b" (p), [MSR_IR_DR] "i" (MSR_IR | MSR_DR)
+ : // clobbers
+ "cc", "xer"
+ );
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int change_memory_range_fn(void *data)
+{
+ struct change_memory_parms *parms = data;
+
+ // First CPU goes through, all others wait.
+ if (atomic_xchg(&parms->master_cpu, 1) == 1)
+ return chmem_secondary_loop(parms);
+
+ // Wait for all but one CPU (this one) to call-in
+ while (atomic_read(&parms->cpu_counter) > 1)
+ barrier();
+
+ change_memory_range(parms->start, parms->end, parms->step, parms->newpp);
+
+ mb();
+
+ // Signal the other CPUs that we're done
+ atomic_dec(&parms->cpu_counter);
+
+ return 0;
+}
+
static bool hash__change_memory_range(unsigned long start, unsigned long end,
unsigned long newpp)
{
- unsigned long idx;
unsigned int step, shift;
shift = mmu_psize_defs[mmu_linear_psize].shift;
@@ -426,25 +511,43 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
if (start >= end)
return false;
- pr_debug("Changing page protection on range 0x%lx-0x%lx, to 0x%lx, step 0x%x\n",
- start, end, newpp, step);
+ if (firmware_has_feature(FW_FEATURE_LPAR)) {
+ mutex_lock(&chmem_lock);
- for (idx = start; idx < end; idx += step)
- /* Not sure if we can do much with the return value */
- mmu_hash_ops.hpte_updateboltedpp(newpp, idx, mmu_linear_psize,
- mmu_kernel_ssize);
+ chmem_parms.start = start;
+ chmem_parms.end = end;
+ chmem_parms.step = step;
+ chmem_parms.newpp = newpp;
+ atomic_set(&chmem_parms.master_cpu, 0);
+
+ cpus_read_lock();
+
+ atomic_set(&chmem_parms.cpu_counter, num_online_cpus());
+
+ // Ensure state is consistent before we call the other CPUs
+ mb();
+
+ stop_machine_cpuslocked(change_memory_range_fn, &chmem_parms,
+ cpu_online_mask);
+
+ cpus_read_unlock();
+ mutex_unlock(&chmem_lock);
+ } else
+ change_memory_range(start, end, step, newpp);
return true;
}
void hash__mark_rodata_ro(void)
{
- unsigned long start, end;
+ unsigned long start, end, pp;
start = (unsigned long)_stext;
- end = (unsigned long)__init_begin;
+ end = (unsigned long)__end_rodata;
+
+ pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL_ROX), HPTE_USE_KERNEL_KEY);
- WARN_ON(!hash__change_memory_range(start, end, PP_RXXX));
+ WARN_ON(!hash__change_memory_range(start, end, pp));
}
void hash__mark_initmem_nx(void)
@@ -454,7 +557,7 @@ void hash__mark_initmem_nx(void)
start = (unsigned long)__init_begin;
end = (unsigned long)__init_end;
- pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
+ pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY);
WARN_ON(!hash__change_memory_range(start, end, pp));
}
diff --git a/arch/powerpc/mm/book3s64/hash_tlb.c b/arch/powerpc/mm/book3s64/hash_tlb.c
index 4a70d8dd39cd..a64ea0a7ef96 100644
--- a/arch/powerpc/mm/book3s64/hash_tlb.c
+++ b/arch/powerpc/mm/book3s64/hash_tlb.c
@@ -21,7 +21,6 @@
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/bug.h>
@@ -176,7 +175,6 @@ void hash__tlb_flush(struct mmu_gather *tlb)
* from the hash table (and the TLB). But keeps
* the linux PTEs intact.
*
- * @mm : mm_struct of the target address space (generally init_mm)
* @start : starting address
* @end : ending address (not included in the flush)
*
@@ -189,17 +187,14 @@ void hash__tlb_flush(struct mmu_gather *tlb)
* Because of that usage pattern, it is implemented for small size rather
* than speed.
*/
-void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
- unsigned long end)
+void __flush_hash_table_range(unsigned long start, unsigned long end)
{
- bool is_thp;
int hugepage_shift;
unsigned long flags;
- start = _ALIGN_DOWN(start, PAGE_SIZE);
- end = _ALIGN_UP(end, PAGE_SIZE);
+ start = ALIGN_DOWN(start, PAGE_SIZE);
+ end = ALIGN(end, PAGE_SIZE);
- BUG_ON(!mm->pgd);
/*
* Note: Normally, we should only ever use a batch within a
@@ -212,33 +207,27 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
local_irq_save(flags);
arch_enter_lazy_mmu_mode();
for (; start < end; start += PAGE_SIZE) {
- pte_t *ptep = find_current_mm_pte(mm->pgd, start, &is_thp,
- &hugepage_shift);
+ pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
unsigned long pte;
if (ptep == NULL)
continue;
pte = pte_val(*ptep);
- if (is_thp)
- trace_hugepage_invalidate(start, pte);
if (!(pte & H_PAGE_HASHPTE))
continue;
- if (unlikely(is_thp))
- hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
- else
- hpte_need_flush(mm, start, ptep, pte, hugepage_shift);
+ hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
}
arch_leave_lazy_mmu_mode();
local_irq_restore(flags);
}
-void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
+void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
{
pte_t *pte;
pte_t *start_pte;
unsigned long flags;
- addr = _ALIGN_DOWN(addr, PMD_SIZE);
+ addr = ALIGN_DOWN(addr, PMD_SIZE);
/*
* Note: Normally, we should only ever use a batch within a
* PTE locked section. This violates the rule, but will work
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 523d4d39d11e..6df4c6d38b66 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -7,7 +7,7 @@
*
* SMP scalability work:
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
- *
+ *
* Module name: htab.c
*
* Description:
@@ -35,17 +35,20 @@
#include <linux/pkeys.h>
#include <linux/hugetlb.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
+#include <linux/debugfs.h>
+#include <linux/random.h>
+#include <linux/elf-randomize.h>
+#include <linux/of_fdt.h>
-#include <asm/debugfs.h>
+#include <asm/interrupt.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/types.h>
#include <linux/uaccess.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/eeh.h>
#include <asm/tlb.h>
@@ -66,6 +69,9 @@
#include <mm/mmu_decl.h>
+#include "internal.h"
+
+
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
@@ -95,8 +101,6 @@
*/
static unsigned long _SDR1;
-struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
-EXPORT_SYMBOL_GPL(mmu_psize_defs);
u8 hpte_page_sizes[1 << LP_BITS];
EXPORT_SYMBOL_GPL(hpte_page_sizes);
@@ -109,9 +113,7 @@ int mmu_linear_psize = MMU_PAGE_4K;
EXPORT_SYMBOL_GPL(mmu_linear_psize);
int mmu_virtual_psize = MMU_PAGE_4K;
int mmu_vmalloc_psize = MMU_PAGE_4K;
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-int mmu_vmemmap_psize = MMU_PAGE_4K;
-#endif
+EXPORT_SYMBOL_GPL(mmu_vmalloc_psize);
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
EXPORT_SYMBOL_GPL(mmu_kernel_ssize);
@@ -121,11 +123,8 @@ EXPORT_SYMBOL_GPL(mmu_slb_size);
#ifdef CONFIG_PPC_64K_PAGES
int mmu_ci_restrictions;
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
static u8 *linear_map_hash_slots;
static unsigned long linear_map_hash_count;
-static DEFINE_SPINLOCK(linear_map_hash_lock);
-#endif /* CONFIG_DEBUG_PAGEALLOC */
struct mmu_hash_ops mmu_hash_ops;
EXPORT_SYMBOL(mmu_hash_ops);
@@ -170,6 +169,110 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
},
};
+static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
+{
+ unsigned long rb;
+
+ rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
+
+ asm volatile("tlbiel %0" : : "r" (rb));
+}
+
+/*
+ * tlbiel instruction for hash, set invalidation
+ * i.e., r=1 and is=01 or is=10 or is=11
+ */
+static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
+ unsigned int pid,
+ unsigned int ric, unsigned int prs)
+{
+ unsigned long rb;
+ unsigned long rs;
+ unsigned int r = 0; /* hash format */
+
+ rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
+ rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
+
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
+ : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
+ : "memory");
+}
+
+
+static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
+{
+ unsigned int set;
+
+ asm volatile("ptesync": : :"memory");
+
+ for (set = 0; set < num_sets; set++)
+ tlbiel_hash_set_isa206(set, is);
+
+ ppc_after_tlbiel_barrier();
+}
+
+static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
+{
+ unsigned int set;
+
+ asm volatile("ptesync": : :"memory");
+
+ /*
+ * Flush the partition table cache if this is HV mode.
+ */
+ if (early_cpu_has_feature(CPU_FTR_HVMODE))
+ tlbiel_hash_set_isa300(0, is, 0, 2, 0);
+
+ /*
+ * Now invalidate the process table cache. UPRT=0 HPT modes (what
+ * current hardware implements) do not use the process table, but
+ * add the flushes anyway.
+ *
+ * From ISA v3.0B p. 1078:
+ * The following forms are invalid.
+ * * PRS=1, R=0, and RIC!=2 (The only process-scoped
+ * HPT caching is of the Process Table.)
+ */
+ tlbiel_hash_set_isa300(0, is, 0, 2, 1);
+
+ /*
+ * Then flush the sets of the TLB proper. Hash mode uses
+ * partition scoped TLB translations, which may be flushed
+ * in !HV mode.
+ */
+ for (set = 0; set < num_sets; set++)
+ tlbiel_hash_set_isa300(set, is, 0, 0, 0);
+
+ ppc_after_tlbiel_barrier();
+
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
+}
+
+void hash__tlbiel_all(unsigned int action)
+{
+ unsigned int is;
+
+ switch (action) {
+ case TLB_INVAL_SCOPE_GLOBAL:
+ is = 3;
+ break;
+ case TLB_INVAL_SCOPE_LPID:
+ is = 2;
+ break;
+ default:
+ BUG();
+ }
+
+ if (early_cpu_has_feature(CPU_FTR_ARCH_300))
+ tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
+ else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
+ tlbiel_all_isa206(POWER8_TLB_SETS, is);
+ else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
+ tlbiel_all_isa206(POWER7_TLB_SETS, is);
+ else
+ WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
+}
+
/*
* 'R' and 'C' update notes:
* - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
@@ -183,7 +286,7 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
* - We make sure R is always set and never lost
* - C is _PAGE_DIRTY, and *should* always be set for a writeable mapping
*/
-unsigned long htab_convert_pte_flags(unsigned long pteflags)
+unsigned long htab_convert_pte_flags(unsigned long pteflags, unsigned long flags)
{
unsigned long rflags = 0;
@@ -237,7 +340,7 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
*/
rflags |= HPTE_R_M;
- rflags |= pte_to_hpte_pkey_bits(pteflags);
+ rflags |= pte_to_hpte_pkey_bits(pteflags, flags);
return rflags;
}
@@ -252,13 +355,17 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
shift = mmu_psize_defs[psize].shift;
step = 1 << shift;
- prot = htab_convert_pte_flags(prot);
+ prot = htab_convert_pte_flags(prot, HPTE_USE_KERNEL_KEY);
DBG("htab_bolt_mapping(%lx..%lx -> %lx (%lx,%d,%d)\n",
vstart, vend, pstart, prot, psize, ssize);
- for (vaddr = vstart, paddr = pstart; vaddr < vend;
- vaddr += step, paddr += step) {
+ /* Carefully map only the possible range */
+ vaddr = ALIGN(vstart, step);
+ paddr = ALIGN(pstart, step);
+ vend = ALIGN_DOWN(vend, step);
+
+ for (; vaddr < vend; vaddr += step, paddr += step) {
unsigned long hash, hpteg;
unsigned long vsid = get_kernel_vsid(vaddr, ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
@@ -298,7 +405,7 @@ repeat:
ssize);
if (ret == -1) {
/*
- * Try to to keep bolted entries in primary.
+ * Try to keep bolted entries in primary.
* Remove non bolted entries and try insert again
*/
ret = mmu_hash_ops.hpte_remove(hpteg);
@@ -317,11 +424,9 @@ repeat:
break;
cond_resched();
-#ifdef CONFIG_DEBUG_PAGEALLOC
- if (debug_pagealloc_enabled() &&
+ if (debug_pagealloc_enabled_or_kfence() &&
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
-#endif /* CONFIG_DEBUG_PAGEALLOC */
}
return ret < 0 ? ret : 0;
}
@@ -329,7 +434,7 @@ repeat:
int htab_remove_mapping(unsigned long vstart, unsigned long vend,
int psize, int ssize)
{
- unsigned long vaddr;
+ unsigned long vaddr, time_limit;
unsigned int step, shift;
int rc;
int ret = 0;
@@ -340,8 +445,21 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
if (!mmu_hash_ops.hpte_removebolted)
return -ENODEV;
- for (vaddr = vstart; vaddr < vend; vaddr += step) {
+ /* Unmap the full range specificied */
+ vaddr = ALIGN_DOWN(vstart, step);
+ time_limit = jiffies + HZ;
+
+ for (;vaddr < vend; vaddr += step) {
rc = mmu_hash_ops.hpte_removebolted(vaddr, psize, ssize);
+
+ /*
+ * For large number of mappings introduce a cond_resched()
+ * to prevent softlockup warnings.
+ */
+ if (time_after(jiffies, time_limit)) {
+ cond_resched();
+ time_limit = jiffies + HZ;
+ }
if (rc == -ENOENT) {
ret = -ENOENT;
continue;
@@ -541,7 +659,7 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
}
#endif /* CONFIG_HUGETLB_PAGE */
-static void mmu_psize_set_default_penc(void)
+static void __init mmu_psize_set_default_penc(void)
{
int bpsize, apsize;
for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
@@ -551,7 +669,7 @@ static void mmu_psize_set_default_penc(void)
#ifdef CONFIG_PPC_64K_PAGES
-static bool might_have_hea(void)
+static bool __init might_have_hea(void)
{
/*
* The HEA ethernet adapter requires awareness of the
@@ -593,7 +711,7 @@ static void __init htab_scan_page_sizes(void)
}
#ifdef CONFIG_HUGETLB_PAGE
- if (!hugetlb_disabled) {
+ if (!hugetlb_disabled && !early_radix_enabled() ) {
/* Reserve 16G huge page memory sections for huge pages */
of_scan_flat_dt(htab_dt_scan_hugepage_blocks, NULL);
}
@@ -622,7 +740,7 @@ static void __init htab_scan_page_sizes(void)
* low-order N bits as the encoding for the 2^(12+N) byte page size
* (if it exists).
*/
-static void init_hpte_page_sizes(void)
+static void __init init_hpte_page_sizes(void)
{
long int ap, bp;
long int shift, penc;
@@ -655,16 +773,15 @@ static void __init htab_init_page_sizes(void)
bool aligned = true;
init_hpte_page_sizes();
- if (!debug_pagealloc_enabled()) {
+ if (!debug_pagealloc_enabled_or_kfence()) {
/*
* Pick a size for the linear mapping. Currently, we only
* support 16M, 1M and 4K which is the default
*/
- if (IS_ENABLED(STRICT_KERNEL_RWX) &&
+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) &&
(unsigned long)_stext % 0x1000000) {
if (mmu_psize_defs[MMU_PAGE_16M].shift)
- pr_warn("Kernel not 16M aligned, "
- "disabling 16M linear map alignment");
+ pr_warn("Kernel not 16M aligned, disabling 16M linear map alignment\n");
aligned = false;
}
@@ -785,7 +902,7 @@ static unsigned long __init htab_get_table_size(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int resize_hpt_for_hotplug(unsigned long new_mem_size)
+static int resize_hpt_for_hotplug(unsigned long new_mem_size)
{
unsigned target_hpt_shift;
@@ -809,7 +926,8 @@ int resize_hpt_for_hotplug(unsigned long new_mem_size)
return 0;
}
-int hash__create_section_mapping(unsigned long start, unsigned long end, int nid)
+int hash__create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot)
{
int rc;
@@ -818,8 +936,10 @@ int hash__create_section_mapping(unsigned long start, unsigned long end, int nid
return -1;
}
+ resize_hpt_for_hotplug(memblock_phys_mem_size());
+
rc = htab_bolt_mapping(start, end, __pa(start),
- pgprot_val(PAGE_KERNEL), mmu_linear_psize,
+ pgprot_val(prot), mmu_linear_psize,
mmu_kernel_ssize);
if (rc < 0) {
@@ -834,7 +954,10 @@ int hash__remove_section_mapping(unsigned long start, unsigned long end)
{
int rc = htab_remove_mapping(start, end, mmu_linear_psize,
mmu_kernel_ssize);
- WARN_ON(rc < 0);
+
+ if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
+ pr_warn("Hash collision while resizing HPT\n");
+
return rc;
}
#endif /* CONFIG_MEMORY_HOTPLUG */
@@ -858,8 +981,8 @@ static void __init htab_initialize(void)
unsigned long table;
unsigned long pteg_count;
unsigned long prot;
- unsigned long base = 0, size = 0;
- struct memblock_region *reg;
+ phys_addr_t base = 0, size = 0, end;
+ u64 i;
DBG(" -> htab_initialize()\n");
@@ -869,10 +992,13 @@ static void __init htab_initialize(void)
printk(KERN_INFO "Using 1TB segments\n");
}
+ if (stress_slb_enabled)
+ static_branch_enable(&stress_slb_key);
+
/*
* Calculate the required size of the htab. We want the number of
* PTEGs to equal one half the number of real pages.
- */
+ */
htab_size_bytes = htab_get_table_size();
pteg_count = htab_size_bytes >> 7;
@@ -882,7 +1008,7 @@ static void __init htab_initialize(void)
firmware_has_feature(FW_FEATURE_PS3_LV1)) {
/* Using a hypervisor which owns the htab */
htab_address = NULL;
- _SDR1 = 0;
+ _SDR1 = 0;
#ifdef CONFIG_FA_DUMP
/*
* If firmware assisted dump is active firmware preserves
@@ -935,8 +1061,7 @@ static void __init htab_initialize(void)
prot = pgprot_val(PAGE_KERNEL);
-#ifdef CONFIG_DEBUG_PAGEALLOC
- if (debug_pagealloc_enabled()) {
+ if (debug_pagealloc_enabled_or_kfence()) {
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
linear_map_hash_slots = memblock_alloc_try_nid(
linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
@@ -945,12 +1070,11 @@ static void __init htab_initialize(void)
panic("%s: Failed to allocate %lu bytes max_addr=%pa\n",
__func__, linear_map_hash_count, &ppc64_rma_size);
}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
/* create bolted the linear mapping in the hash table */
- for_each_memblock(memory, reg) {
- base = (unsigned long)__va(reg->base);
- size = reg->size;
+ for_each_mem_range(i, &base, &end) {
+ size = end - base;
+ base = (unsigned long)__va(base);
DBG("creating mapping for region: %lx..%lx (prot: %lx)\n",
base, size, prot);
@@ -1061,7 +1185,7 @@ void __init hash__early_init_mmu(void)
ps3_early_mm_init();
else if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_pseries();
- else if (IS_ENABLED(CONFIG_PPC_NATIVE))
+ else if (IS_ENABLED(CONFIG_PPC_HASH_MMU_NATIVE))
hpte_init_native();
if (!mmu_hash_ops.hpte_insert)
@@ -1104,6 +1228,11 @@ void hash__early_init_mmu_secondary(void)
if (cpu_has_feature(CPU_FTR_ARCH_206)
&& cpu_has_feature(CPU_FTR_HVMODE))
tlbiel_all();
+
+#ifdef CONFIG_PPC_MEM_KEYS
+ if (mmu_has_feature(MMU_FTR_PKEY))
+ mtspr(SPRN_UAMOR, default_uamor);
+#endif
}
#endif /* CONFIG_SMP */
@@ -1120,17 +1249,16 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
page = pte_page(pte);
/* page is dirty */
- if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
- if (trap == 0x400) {
+ if (!test_bit(PG_dcache_clean, &page->flags) && !PageReserved(page)) {
+ if (trap == INTERRUPT_INST_STORAGE) {
flush_dcache_icache_page(page);
- set_bit(PG_arch_1, &page->flags);
+ set_bit(PG_dcache_clean, &page->flags);
} else
pp |= HPTE_R_N;
}
return pp;
}
-#ifdef CONFIG_PPC_MM_SLICES
static unsigned int get_paca_psize(unsigned long addr)
{
unsigned char *psizes;
@@ -1147,12 +1275,6 @@ static unsigned int get_paca_psize(unsigned long addr)
return (psizes[index >> 1] >> (mask_index * 4)) & 0xF;
}
-#else
-unsigned int get_paca_psize(unsigned long addr)
-{
- return get_paca()->mm_ctx_user_psize;
-}
-#endif
/*
* Demote a segment to using 4k pages.
@@ -1209,7 +1331,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea)
spp >>= 30 - 2 * ((ea >> 12) & 0xf);
/*
- * 0 -> full premission
+ * 0 -> full permission
* 1 -> Read only
* 2 -> no access.
* We return the flag that need to be cleared.
@@ -1265,7 +1387,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
unsigned long flags)
{
bool is_thp;
- enum ctx_state prev_state = exception_enter();
pgd_t *pgdir;
unsigned long vsid;
pte_t *ptep;
@@ -1294,12 +1415,14 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
psize = mmu_vmalloc_psize;
ssize = mmu_kernel_ssize;
+ flags |= HPTE_USE_KERNEL_KEY;
break;
case IO_REGION_ID:
vsid = get_kernel_vsid(ea, mmu_kernel_ssize);
psize = mmu_io_psize;
ssize = mmu_kernel_ssize;
+ flags |= HPTE_USE_KERNEL_KEY;
break;
default:
/*
@@ -1349,8 +1472,15 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
goto bail;
}
- /* Add _PAGE_PRESENT to the required access perm */
- access |= _PAGE_PRESENT;
+ /*
+ * Add _PAGE_PRESENT to the required access perm. If there are parallel
+ * updates to the pte that can possibly clear _PAGE_PTE, catch that too.
+ *
+ * We can safely use the return pte address in rest of the function
+ * because we do set H_PAGE_BUSY which prevents further updates to pte
+ * from generic code.
+ */
+ access |= _PAGE_PRESENT | _PAGE_PTE;
/*
* Pre-check access permissions (will be re-checked atomically
@@ -1458,7 +1588,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
DBG_LOW(" -> rc=%d\n", rc);
bail:
- exception_exit(prev_state);
return rc;
}
EXPORT_SYMBOL_GPL(hash_page_mm);
@@ -1480,16 +1609,26 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
}
EXPORT_SYMBOL_GPL(hash_page);
-int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr,
- unsigned long msr)
+DEFINE_INTERRUPT_HANDLER(do_hash_fault)
{
+ unsigned long ea = regs->dar;
+ unsigned long dsisr = regs->dsisr;
unsigned long access = _PAGE_PRESENT | _PAGE_READ;
unsigned long flags = 0;
- struct mm_struct *mm = current->mm;
- unsigned int region_id = get_region_id(ea);
+ struct mm_struct *mm;
+ unsigned int region_id;
+ long err;
+
+ if (unlikely(dsisr & (DSISR_BAD_FAULT_64S | DSISR_KEYFAULT))) {
+ hash__do_page_fault(regs);
+ return;
+ }
+ region_id = get_region_id(ea);
if ((region_id == VMALLOC_REGION_ID) || (region_id == IO_REGION_ID))
mm = &init_mm;
+ else
+ mm = current->mm;
if (dsisr & DSISR_NOHPTE)
flags |= HPTE_NOHPTE_UPDATE;
@@ -1505,16 +1644,30 @@ int __hash_page(unsigned long trap, unsigned long ea, unsigned long dsisr,
* 2) user space access kernel space.
*/
access |= _PAGE_PRIVILEGED;
- if ((msr & MSR_PR) || (region_id == USER_REGION_ID))
+ if (user_mode(regs) || (region_id == USER_REGION_ID))
access &= ~_PAGE_PRIVILEGED;
- if (trap == 0x400)
+ if (TRAP(regs) == INTERRUPT_INST_STORAGE)
access |= _PAGE_EXEC;
- return hash_page_mm(mm, ea, access, trap, flags);
+ err = hash_page_mm(mm, ea, access, TRAP(regs), flags);
+ if (unlikely(err < 0)) {
+ // failed to insert a hash PTE due to an hypervisor error
+ if (user_mode(regs)) {
+ if (IS_ENABLED(CONFIG_PPC_SUBPAGE_PROT) && err == -2)
+ _exception(SIGSEGV, regs, SEGV_ACCERR, ea);
+ else
+ _exception(SIGBUS, regs, BUS_ADRERR, ea);
+ } else {
+ bad_page_fault(regs, SIGBUS);
+ }
+ err = 0;
+
+ } else if (err) {
+ hash__do_page_fault(regs);
+ }
}
-#ifdef CONFIG_PPC_MM_SLICES
static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
{
int psize = get_slice_psize(mm, ea);
@@ -1531,23 +1684,15 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
return true;
}
-#else
-static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
-{
- return true;
-}
-#endif
-static void hash_preload(struct mm_struct *mm, unsigned long ea,
+static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
bool is_exec, unsigned long trap)
{
- int hugepage_shift;
unsigned long vsid;
pgd_t *pgdir;
- pte_t *ptep;
- unsigned long flags;
int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
+ unsigned long flags;
BUG_ON(get_region_id(ea) != USER_REGION_ID);
@@ -1567,32 +1712,34 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
vsid = get_user_vsid(&mm->context, ea, ssize);
if (!vsid)
return;
- /*
- * Hash doesn't like irqs. Walking linux page table with irq disabled
- * saves us from holding multiple locks.
- */
- local_irq_save(flags);
- /*
- * THP pages use update_mmu_cache_pmd. We don't do
- * hash preload there. Hence can ignore THP here
- */
- ptep = find_current_mm_pte(pgdir, ea, NULL, &hugepage_shift);
- if (!ptep)
- goto out_exit;
-
- WARN_ON(hugepage_shift);
#ifdef CONFIG_PPC_64K_PAGES
/* If either H_PAGE_4K_PFN or cache inhibited is set (and we are on
* a 64K kernel), then we don't preload, hash_page() will take
* care of it once we actually try to access the page.
* That way we don't have to duplicate all of the logic for segment
* page size demotion here
+ * Called with PTL held, hence can be sure the value won't change in
+ * between.
*/
if ((pte_val(*ptep) & H_PAGE_4K_PFN) || pte_ci(*ptep))
- goto out_exit;
+ return;
#endif /* CONFIG_PPC_64K_PAGES */
+ /*
+ * __hash_page_* must run with interrupts off, including PMI interrupts
+ * off, as it sets the H_PAGE_BUSY bit.
+ *
+ * It's otherwise possible for perf interrupts to hit at any time and
+ * may take a hash fault reading the user stack, which could take a
+ * hash miss and deadlock on the same H_PAGE_BUSY bit.
+ *
+ * Interrupts must also be off for the duration of the
+ * mm_is_thread_local test and update, to prevent preempt running the
+ * mm on another CPU (XXX: this may be racy vs kthread_use_mm).
+ */
+ powerpc_local_irq_pmu_save(flags);
+
/* Is that local to this CPU ? */
if (mm_is_thread_local(mm))
update_flags |= HPTE_LOCAL_UPDATE;
@@ -1615,8 +1762,8 @@ static void hash_preload(struct mm_struct *mm, unsigned long ea,
mm_ctx_user_psize(&mm->context),
mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
-out_exit:
- local_irq_restore(flags);
+
+ powerpc_local_irq_pmu_restore(flags);
}
/*
@@ -1627,7 +1774,7 @@ out_exit:
*
* This must always be called with the pte lock held.
*/
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
/*
@@ -1637,11 +1784,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
unsigned long trap;
bool is_exec;
- if (radix_enabled()) {
- prefetch((void *)address);
- return;
- }
-
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(*ptep) || address >= TASK_SIZE)
return;
@@ -1667,33 +1809,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
return;
}
- hash_preload(vma->vm_mm, address, is_exec, trap);
+ hash_preload(vma->vm_mm, ptep, address, is_exec, trap);
}
-#ifdef CONFIG_PPC_MEM_KEYS
-/*
- * Return the protection key associated with the given address and the
- * mm_struct.
- */
-u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address)
-{
- pte_t *ptep;
- u16 pkey = 0;
- unsigned long flags;
-
- if (!mm || !mm->pgd)
- return 0;
-
- local_irq_save(flags);
- ptep = find_linux_pte(mm->pgd, address, NULL, NULL);
- if (ptep)
- pkey = pte_to_pkey_bits(pte_val(READ_ONCE(*ptep)));
- local_irq_restore(flags);
-
- return pkey;
-}
-#endif /* CONFIG_PPC_MEM_KEYS */
-
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline void tm_flush_hash_page(int local)
{
@@ -1735,10 +1853,6 @@ unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
return gslot;
}
-/*
- * WARNING: This is called from hash_low_64.S, if you change this prototype,
- * do not forget to update the assembly call site !
- */
void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
unsigned long flags)
{
@@ -1833,27 +1947,6 @@ void flush_hash_range(unsigned long number, int local)
}
}
-/*
- * low_hash_fault is called when we the low level hash code failed
- * to instert a PTE due to an hypervisor error
- */
-void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
-{
- enum ctx_state prev_state = exception_enter();
-
- if (user_mode(regs)) {
-#ifdef CONFIG_PPC_SUBPAGE_PROT
- if (rc == -2)
- _exception(SIGSEGV, regs, SEGV_ACCERR, address);
- else
-#endif
- _exception(SIGBUS, regs, BUS_ADRERR, address);
- } else
- bad_page_fault(regs, address, SIGBUS);
-
- exception_exit(prev_state);
-}
-
long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
unsigned long pa, unsigned long rflags,
unsigned long vflags, int psize, int ssize)
@@ -1887,13 +1980,15 @@ repeat:
return slot;
}
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
+
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
- unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL));
+ unsigned long mode = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL), HPTE_USE_KERNEL_KEY);
long ret;
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
@@ -1902,15 +1997,18 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
if (!vsid)
return;
+ if (linear_map_hash_slots[lmi] & 0x80)
+ return;
+
ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);
BUG_ON (ret < 0);
- spin_lock(&linear_map_hash_lock);
+ raw_spin_lock(&linear_map_hash_lock);
BUG_ON(linear_map_hash_slots[lmi] & 0x80);
linear_map_hash_slots[lmi] = ret | 0x80;
- spin_unlock(&linear_map_hash_lock);
+ raw_spin_unlock(&linear_map_hash_lock);
}
static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
@@ -1920,11 +2018,14 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
- spin_lock(&linear_map_hash_lock);
- BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
+ raw_spin_lock(&linear_map_hash_lock);
+ if (!(linear_map_hash_slots[lmi] & 0x80)) {
+ raw_spin_unlock(&linear_map_hash_lock);
+ return;
+ }
hidx = linear_map_hash_slots[lmi] & 0x7f;
linear_map_hash_slots[lmi] = 0;
- spin_unlock(&linear_map_hash_lock);
+ raw_spin_unlock(&linear_map_hash_lock);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -1934,7 +2035,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
mmu_kernel_ssize, 0);
}
-void __kernel_map_pages(struct page *page, int numpages, int enable)
+void hash__kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;
@@ -1952,7 +2053,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
}
local_irq_restore(flags);
}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */
void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
@@ -2018,11 +2119,8 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_hpt_order, hpt_order_get, hpt_order_set, "%llu\n")
static int __init hash64_debugfs(void)
{
- if (!debugfs_create_file_unsafe("hpt_order", 0600, powerpc_debugfs_root,
- NULL, &fops_hpt_order)) {
- pr_err("lpar: unable to create hpt_order debugsfs file\n");
- }
-
+ debugfs_create_file("hpt_order", 0600, arch_debugfs_dir, NULL,
+ &fops_hpt_order);
return 0;
}
machine_device_initcall(pseries, hash64_debugfs);
@@ -2035,3 +2133,20 @@ void __init print_system_hash_info(void)
if (htab_hash_mask)
pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+ /*
+ * If we are using 1TB segments and we are allowed to randomise
+ * the heap, we can put it above 1TB so it is backed by a 1TB
+ * segment. Otherwise the heap will be in the bottom 1TB
+ * which always uses 256MB segments and this may result in a
+ * performance penalty.
+ */
+ if (is_32bit_task())
+ return randomize_page(mm->brk, SZ_32M);
+ else if (!radix_enabled() && mmu_highuser_ssize == MMU_SEGSIZE_1T)
+ return randomize_page(max_t(unsigned long, mm->brk, SZ_1T), SZ_1G);
+ else
+ return randomize_page(mm->brk, SZ_1G);
+}
diff --git a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c b/arch/powerpc/mm/book3s64/hugetlbpage.c
index eefa89c6117b..3bc0eb21b2a0 100644
--- a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/hugetlbpage.c
@@ -10,18 +10,13 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/machdep.h>
unsigned int hpage_shift;
EXPORT_SYMBOL(hpage_shift);
-extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
- unsigned long pa, unsigned long rlags,
- unsigned long vflags, int psize, int ssize);
-
+#ifdef CONFIG_PPC_64S_HASH_MMU
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, unsigned long flags,
int ssize, unsigned int shift, unsigned int mmu_psize)
@@ -72,7 +67,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
return 0;
- rflags = htab_convert_pte_flags(new_pte);
+ rflags = htab_convert_pte_flags(new_pte, flags);
if (unlikely(mmu_psize == MMU_PAGE_16G))
offset = PTRS_PER_PUD;
else
@@ -128,6 +123,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
return 0;
}
+#endif
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
@@ -154,7 +150,7 @@ void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
-void hugetlbpage_init_default(void)
+void __init hugetlbpage_init_defaultsize(void)
{
/* Set default large page size. Currently, we pick 16M or 1M
* depending on what is available
diff --git a/arch/powerpc/mm/book3s64/internal.h b/arch/powerpc/mm/book3s64/internal.h
new file mode 100644
index 000000000000..5045048ce244
--- /dev/null
+++ b/arch/powerpc/mm/book3s64/internal.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H
+#define ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H
+
+#include <linux/jump_label.h>
+
+extern bool stress_slb_enabled;
+
+DECLARE_STATIC_KEY_FALSE(stress_slb_key);
+
+static inline bool stress_slb(void)
+{
+ return static_branch_unlikely(&stress_slb_key);
+}
+
+void slb_setup_new_exec(void);
+
+void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush);
+
+#endif /* ARCH_POWERPC_MM_BOOK3S64_INTERNAL_H */
diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c
index eba73ebd8ae5..7fcfba162e0d 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -96,7 +96,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
goto unlock_exit;
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) /
sizeof(struct vm_area_struct *);
chunk = min(chunk, entries);
@@ -114,31 +114,13 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
pinned += ret;
break;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (pinned != entries) {
if (!ret)
ret = -EFAULT;
goto free_exit;
}
- pageshift = PAGE_SHIFT;
- for (i = 0; i < entries; ++i) {
- struct page *page = mem->hpages[i];
-
- /*
- * Allow to use larger than 64k IOMMU pages. Only do that
- * if we are backed by hugetlb.
- */
- if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
- pageshift = page_shift(compound_head(page));
- mem->pageshift = min(mem->pageshift, pageshift);
- /*
- * We don't need struct page reference any more, switch
- * to physical address.
- */
- mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
- }
-
good_exit:
atomic64_set(&mem->mapped, 1);
mem->used = 1;
@@ -147,7 +129,8 @@ good_exit:
mutex_lock(&mem_list_mutex);
- list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next) {
+ list_for_each_entry_rcu(mem2, &mm->context.iommu_group_mem_list, next,
+ lockdep_is_held(&mem_list_mutex)) {
/* Overlap? */
if ((mem2->ua < (ua + (entries << PAGE_SHIFT))) &&
(ua < (mem2->ua +
@@ -158,6 +141,27 @@ good_exit:
}
}
+ if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
+ /*
+ * Allow to use larger than 64k IOMMU pages. Only do that
+ * if we are backed by hugetlb. Skip device memory as it is not
+ * backed with page structs.
+ */
+ pageshift = PAGE_SHIFT;
+ for (i = 0; i < entries; ++i) {
+ struct page *page = mem->hpages[i];
+
+ if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
+ pageshift = page_shift(compound_head(page));
+ mem->pageshift = min(mem->pageshift, pageshift);
+ /*
+ * We don't need struct page reference any more, switch
+ * to physical address.
+ */
+ mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
+ }
+ }
+
list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
mutex_unlock(&mem_list_mutex);
@@ -260,7 +264,7 @@ long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
goto unlock_exit;
/* Are there still mappings? */
- if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
+ if (atomic64_cmpxchg(&mem->mapped, 1, 0) != 1) {
++mem->used;
ret = -EBUSY;
goto unlock_exit;
@@ -286,6 +290,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
{
struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
+ rcu_read_lock();
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
if ((mem->ua <= ua) &&
(ua + size <= mem->ua +
@@ -294,29 +299,12 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
break;
}
}
+ rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(mm_iommu_lookup);
-struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
- unsigned long ua, unsigned long size)
-{
- struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
-
- list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
- next) {
- if ((mem->ua <= ua) &&
- (ua + size <= mem->ua +
- (mem->entries << PAGE_SHIFT))) {
- ret = mem;
- break;
- }
- }
-
- return ret;
-}
-
struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
unsigned long ua, unsigned long entries)
{
@@ -324,7 +312,8 @@ struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
mutex_lock(&mem_list_mutex);
- list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
+ list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next,
+ lockdep_is_held(&mem_list_mutex)) {
if ((mem->ua == ua) && (mem->entries == entries)) {
ret = mem;
++mem->used;
@@ -362,62 +351,13 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
}
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
-long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned int pageshift, unsigned long *hpa)
-{
- const long entry = (ua - mem->ua) >> PAGE_SHIFT;
- unsigned long *pa;
-
- if (entry >= mem->entries)
- return -EFAULT;
-
- if (pageshift > mem->pageshift)
- return -EFAULT;
-
- if (!mem->hpas) {
- *hpa = mem->dev_hpa + (ua - mem->ua);
- return 0;
- }
-
- pa = (void *) vmalloc_to_phys(&mem->hpas[entry]);
- if (!pa)
- return -EFAULT;
-
- *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
-
- return 0;
-}
-
-extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
-{
- struct mm_iommu_table_group_mem_t *mem;
- long entry;
- void *va;
- unsigned long *pa;
-
- mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
- if (!mem)
- return;
-
- if (mem->dev_hpa != MM_IOMMU_TABLE_INVALID_HPA)
- return;
-
- entry = (ua - mem->ua) >> PAGE_SHIFT;
- va = &mem->hpas[entry];
-
- pa = (void *) vmalloc_to_phys(va);
- if (!pa)
- return;
-
- *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
-}
-
bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
unsigned int pageshift, unsigned long *size)
{
struct mm_iommu_table_group_mem_t *mem;
unsigned long end;
+ rcu_read_lock();
list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
if (mem->dev_hpa == MM_IOMMU_TABLE_INVALID_HPA)
continue;
@@ -434,6 +374,7 @@ bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa,
return true;
}
}
+ rcu_read_unlock();
return false;
}
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index 0ba30b8b935b..c766e4c26e42 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -17,10 +17,13 @@
#include <linux/export.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/cpu.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
+#include "internal.h"
+
static DEFINE_IDA(mmu_context_ida);
static int alloc_context_id(int min_id, int max_id)
@@ -28,7 +31,8 @@ static int alloc_context_id(int min_id, int max_id)
return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
}
-void hash__reserve_context_id(int id)
+#ifdef CONFIG_PPC_64S_HASH_MMU
+void __init hash__reserve_context_id(int id)
{
int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
@@ -47,9 +51,9 @@ int hash__alloc_context_id(void)
return alloc_context_id(MIN_USER_CONTEXT, max);
}
EXPORT_SYMBOL_GPL(hash__alloc_context_id);
+#endif
-void slb_setup_new_exec(void);
-
+#ifdef CONFIG_PPC_64S_HASH_MMU
static int realloc_context_ids(mm_context_t *ctx)
{
int i, id;
@@ -118,7 +122,7 @@ static int hash__init_new_context(struct mm_struct *mm)
/* This is fork. Copy hash_context details from current->mm */
memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context));
#ifdef CONFIG_PPC_SUBPAGE_PROT
- /* inherit subpage prot detalis if we have one. */
+ /* inherit subpage prot details if we have one. */
if (current->mm->context.hash_context->spt) {
mm->context.hash_context->spt = kmalloc(sizeof(struct subpage_prot_table),
GFP_KERNEL);
@@ -149,6 +153,13 @@ void hash__setup_new_exec(void)
slb_setup_new_exec();
}
+#else
+static inline int hash__init_new_context(struct mm_struct *mm)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif
static int radix__init_new_context(struct mm_struct *mm)
{
@@ -174,7 +185,9 @@ static int radix__init_new_context(struct mm_struct *mm)
*/
asm volatile("ptesync;isync" : : : "memory");
+#ifdef CONFIG_PPC_64S_HASH_MMU
mm->context.hash_context = NULL;
+#endif
return index;
}
@@ -212,14 +225,22 @@ EXPORT_SYMBOL_GPL(__destroy_context);
static void destroy_contexts(mm_context_t *ctx)
{
- int index, context_id;
+ if (radix_enabled()) {
+ ida_free(&mmu_context_ida, ctx->id);
+ } else {
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ int index, context_id;
- for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
- context_id = ctx->extended_id[index];
- if (context_id)
- ida_free(&mmu_context_ida, context_id);
+ for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
+ context_id = ctx->extended_id[index];
+ if (context_id)
+ ida_free(&mmu_context_ida, context_id);
+ }
+ kfree(ctx->hash_context);
+#else
+ BUILD_BUG(); // radix_enabled() should be constant true
+#endif
}
- kfree(ctx->hash_context);
}
static void pmd_frag_destroy(void *pmd_frag)
@@ -307,3 +328,22 @@ void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
isync();
}
#endif
+
+/**
+ * cleanup_cpu_mmu_context - Clean up MMU details for this CPU (newly offlined)
+ *
+ * This clears the CPU from mm_cpumask for all processes, and then flushes the
+ * local TLB to ensure TLB coherency in case the CPU is onlined again.
+ *
+ * KVM guest translations are not necessarily flushed here. If KVM started
+ * using mm_cpumask or the Linux APIs which do, this would have to be resolved.
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+void cleanup_cpu_mmu_context(void)
+{
+ int cpu = smp_processor_id();
+
+ clear_tasks_mm_cpumask(cpu);
+ tlbiel_all();
+}
+#endif
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 2bf7e1b4fd82..f6151a589298 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -6,19 +6,31 @@
#include <linux/sched.h>
#include <linux/mm_types.h>
#include <linux/memblock.h>
+#include <linux/memremap.h>
+#include <linux/pkeys.h>
+#include <linux/debugfs.h>
#include <misc/cxl-base.h>
-#include <asm/debugfs.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/trace.h>
#include <asm/powernv.h>
#include <asm/firmware.h>
#include <asm/ultravisor.h>
+#include <asm/kexec.h>
#include <mm/mmu_decl.h>
#include <trace/events/thp.h>
+#include "internal.h"
+
+struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+EXPORT_SYMBOL_GPL(mmu_psize_defs);
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif
+
unsigned long __pmd_frag_nr;
EXPORT_SYMBOL(__pmd_frag_nr);
unsigned long __pmd_frag_size_shift;
@@ -78,10 +90,15 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
-static void do_nothing(void *unused)
+static void do_serialize(void *arg)
{
-
+ /* We've taken the IPI, so try to trim the mask while here */
+ if (radix_enabled()) {
+ struct mm_struct *mm = arg;
+ exit_lazy_flush_tlb(mm, false);
+ }
}
+
/*
* Serialize against find_current_mm_pte which does lock-less
* lookup in page tables with local interrupts disabled. For huge pages
@@ -95,7 +112,7 @@ static void do_nothing(void *unused)
void serialize_against_pte_lookup(struct mm_struct *mm)
{
smp_mb();
- smp_call_function_many(mm_cpumask(mm), do_nothing, NULL, 1);
+ smp_call_function_many(mm_cpumask(mm), do_serialize, mm, 1);
}
/*
@@ -109,15 +126,25 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+ return __pmd(old_pmd);
+}
+
+pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t *pmdp, int full)
+{
+ pmd_t pmd;
+ VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
+ VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
+ !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
+ pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
/*
- * This ensures that generic code that rely on IRQ disabling
- * to prevent a parallel THP split work as expected.
- *
- * Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires
- * a special case check in pmd_access_permitted.
+ * if it not a fullmm flush, then we can possibly end up converting
+ * this PMD pte entry to a regular level 0 PTE by a parallel page fault.
+ * Make sure we flush the tlb in this case.
*/
- serialize_against_pte_lookup(vma->vm_mm);
- return __pmd(old_pmd);
+ if (!full)
+ flush_pmd_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
+ return pmd;
}
static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
@@ -125,12 +152,18 @@ static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
}
+/*
+ * At some point we should be able to get rid of
+ * pmd_mkhuge() and mk_huge_pmd() when we update all the
+ * other archs to mark the pmd huge in pfn_pmd()
+ */
pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
{
unsigned long pmdv;
pmdv = (pfn << PAGE_SHIFT) & PTE_RPN_MASK;
- return pmd_set_protbits(__pmd(pmdv), pgprot);
+
+ return __pmd_mkhuge(pmd_set_protbits(__pmd(pmdv), pgprot));
}
pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
@@ -146,37 +179,27 @@ pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
pmdv &= _HPAGE_CHG_MASK;
return pmd_set_protbits(__pmd(pmdv), newprot);
}
-
-/*
- * This is called at the end of handling a user page fault, when the
- * fault has been handled by updating a HUGE PMD entry in the linux page tables.
- * We use it to preload an HPTE into the hash table corresponding to
- * the updated linux HUGE PMD entry.
- */
-void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd)
-{
- if (radix_enabled())
- prefetch((void *)addr);
-}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-/* For use by kexec */
-void mmu_cleanup_all(void)
+/* For use by kexec, called with MMU off */
+notrace void mmu_cleanup_all(void)
{
if (radix_enabled())
radix__mmu_cleanup_all();
else if (mmu_hash_ops.hpte_clear_all)
mmu_hash_ops.hpte_clear_all();
+
+ reset_sprs();
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
+int __meminit create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot)
{
if (radix_enabled())
- return radix__create_section_mapping(start, end, nid);
+ return radix__create_section_mapping(start, end, nid, prot);
- return hash__create_section_mapping(start, end, nid);
+ return hash__create_section_mapping(start, end, nid, prot);
}
int __meminit remove_section_mapping(unsigned long start, unsigned long end)
@@ -193,17 +216,12 @@ void __init mmu_partition_table_init(void)
unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
unsigned long ptcr;
- BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
/* Initialize the Partition Table with no entries */
partition_tb = memblock_alloc(patb_size, patb_size);
if (!partition_tb)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, patb_size, patb_size);
- /*
- * update partition table control register,
- * 64 K size.
- */
ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
set_ptcr_when_no_uv(ptcr);
powernv_set_nmmu_ptcr(ptcr);
@@ -314,7 +332,7 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
- * the allocated page with single fragement
+ * the allocated page with single fragment
* count.
*/
if (likely(!mm->context.pmd_frag)) {
@@ -341,6 +359,9 @@ void pmd_fragment_free(unsigned long *pmd)
{
struct page *page = virt_to_page(pmd);
+ if (PageReserved(page))
+ return free_reserved_page(page);
+
BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
pgtable_pmd_page_dtor(page);
@@ -358,7 +379,7 @@ static inline void pgtable_free(void *table, int index)
pmd_fragment_free(table);
break;
case PUD_INDEX:
- kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
+ __pud_free(table);
break;
#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
/* 16M hugepd directory at pud level */
@@ -503,9 +524,52 @@ static int __init pgtable_debugfs_setup(void)
* invalidated as expected.
*/
debugfs_create_bool("tlbie_enabled", 0600,
- powerpc_debugfs_root,
+ arch_debugfs_dir,
&tlbie_enabled);
return 0;
}
arch_initcall(pgtable_debugfs_setup);
+
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN)
+/*
+ * Override the generic version in mm/memremap.c.
+ *
+ * With hash translation, the direct-map range is mapped with just one
+ * page size selected by htab_init_page_sizes(). Consult
+ * mmu_psize_defs[] to determine the minimum page size alignment.
+*/
+unsigned long memremap_compat_align(void)
+{
+ if (!radix_enabled()) {
+ unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
+ return max(SUBSECTION_SIZE, 1UL << shift);
+ }
+
+ return SUBSECTION_SIZE;
+}
+EXPORT_SYMBOL_GPL(memremap_compat_align);
+#endif
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+ unsigned long prot;
+
+ /* Radix supports execute-only, but protection_map maps X -> RX */
+ if (radix_enabled() && ((vm_flags & VM_ACCESS_FLAGS) == VM_EXEC)) {
+ prot = pgprot_val(PAGE_EXECONLY);
+ } else {
+ prot = pgprot_val(protection_map[vm_flags &
+ (VM_ACCESS_FLAGS | VM_SHARED)]);
+ }
+
+ if (vm_flags & VM_SAO)
+ prot |= _PAGE_SAO;
+
+#ifdef CONFIG_PPC_MEM_KEYS
+ prot |= vmflag_to_pte_pkey_bits(vm_flags);
+#endif
+
+ return __pgprot(prot);
+}
+EXPORT_SYMBOL(vm_get_page_prot);
diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index 59e0ebbd8036..1d2675ab6711 100644
--- a/arch/powerpc/mm/book3s64/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -9,60 +9,107 @@
#include <asm/mmu_context.h>
#include <asm/mmu.h>
#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/firmware.h>
+
#include <linux/pkeys.h>
-#include <linux/of_device.h>
+#include <linux/of_fdt.h>
-DEFINE_STATIC_KEY_TRUE(pkey_disabled);
-int pkeys_total; /* Total pkeys as per device tree */
-u32 initial_allocation_mask; /* Bits set for the initially allocated keys */
-u32 reserved_allocation_mask; /* Bits set for reserved keys */
-static bool pkey_execute_disable_supported;
-static bool pkeys_devtree_defined; /* property exported by device tree */
-static u64 pkey_amr_mask; /* Bits in AMR not to be touched */
-static u64 pkey_iamr_mask; /* Bits in AMR not to be touched */
-static u64 pkey_uamor_mask; /* Bits in UMOR not to be touched */
+
+int num_pkey; /* Max number of pkeys supported */
+/*
+ * Keys marked in the reservation list cannot be allocated by userspace
+ */
+u32 reserved_allocation_mask __ro_after_init;
+
+/* Bits set for the initially allocated keys */
+static u32 initial_allocation_mask __ro_after_init;
+
+/*
+ * Even if we allocate keys with sys_pkey_alloc(), we need to make sure
+ * other thread still find the access denied using the same keys.
+ */
+u64 default_amr __ro_after_init = ~0x0UL;
+u64 default_iamr __ro_after_init = 0x5555555555555555UL;
+u64 default_uamor __ro_after_init;
+EXPORT_SYMBOL(default_amr);
+/*
+ * Key used to implement PROT_EXEC mmap. Denies READ/WRITE
+ * We pick key 2 because 0 is special key and 1 is reserved as per ISA.
+ */
static int execute_only_key = 2;
+static bool pkey_execute_disable_supported;
+
#define AMR_BITS_PER_PKEY 2
#define AMR_RD_BIT 0x1UL
#define AMR_WR_BIT 0x2UL
#define IAMR_EX_BIT 0x1UL
-#define PKEY_REG_BITS (sizeof(u64)*8)
+#define PKEY_REG_BITS (sizeof(u64) * 8)
#define pkeyshift(pkey) (PKEY_REG_BITS - ((pkey+1) * AMR_BITS_PER_PKEY))
-static void scan_pkey_feature(void)
+static int __init dt_scan_storage_keys(unsigned long node,
+ const char *uname, int depth,
+ void *data)
{
- u32 vals[2];
- struct device_node *cpu;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *prop;
+ int *pkeys_total = (int *) data;
- cpu = of_find_node_by_type(NULL, "cpu");
- if (!cpu)
- return;
+ /* We are scanning "cpu" nodes only */
+ if (type == NULL || strcmp(type, "cpu") != 0)
+ return 0;
- if (of_property_read_u32_array(cpu,
- "ibm,processor-storage-keys", vals, 2))
- return;
+ prop = of_get_flat_dt_prop(node, "ibm,processor-storage-keys", NULL);
+ if (!prop)
+ return 0;
+ *pkeys_total = be32_to_cpu(prop[0]);
+ return 1;
+}
+
+static int __init scan_pkey_feature(void)
+{
+ int ret;
+ int pkeys_total = 0;
/*
- * Since any pkey can be used for data or execute, we will just treat
- * all keys as equal and track them as one entity.
+ * Pkey is not supported with Radix translation.
*/
- pkeys_total = vals[0];
- pkeys_devtree_defined = true;
-}
+ if (early_radix_enabled())
+ return 0;
-static inline bool pkey_mmu_enabled(void)
-{
- if (firmware_has_feature(FW_FEATURE_LPAR))
- return pkeys_total;
- else
- return cpu_has_feature(CPU_FTR_PKEY);
+ ret = of_scan_flat_dt(dt_scan_storage_keys, &pkeys_total);
+ if (ret == 0) {
+ /*
+ * Let's assume 32 pkeys on P8/P9 bare metal, if its not defined by device
+ * tree. We make this exception since some version of skiboot forgot to
+ * expose this property on power8/9.
+ */
+ if (!firmware_has_feature(FW_FEATURE_LPAR)) {
+ unsigned long pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) == PVR_POWER8 || PVR_VER(pvr) == PVR_POWER8E ||
+ PVR_VER(pvr) == PVR_POWER8NVL || PVR_VER(pvr) == PVR_POWER9)
+ pkeys_total = 32;
+ }
+ }
+
+#ifdef CONFIG_PPC_MEM_KEYS
+ /*
+ * Adjust the upper limit, based on the number of bits supported by
+ * arch-neutral code.
+ */
+ pkeys_total = min_t(int, pkeys_total,
+ ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) + 1));
+#endif
+ return pkeys_total;
}
-static int pkey_initialize(void)
+void __init pkey_early_init_devtree(void)
{
- int os_reserved, i;
+ int pkeys_total, i;
+#ifdef CONFIG_PPC_MEM_KEYS
/*
* We define PKEY_DISABLE_EXECUTE in addition to the arch-neutral
* generic defines for PKEY_DISABLE_ACCESS and PKEY_DISABLE_WRITE.
@@ -78,33 +125,22 @@ static int pkey_initialize(void)
BUILD_BUG_ON(__builtin_clzl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT) +
__builtin_popcountl(ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)
!= (sizeof(u64) * BITS_PER_BYTE));
-
- /* scan the device tree for pkey feature */
- scan_pkey_feature();
-
+#endif
/*
- * Let's assume 32 pkeys on P8 bare metal, if its not defined by device
- * tree. We make this exception since skiboot forgot to expose this
- * property on power8.
+ * Only P7 and above supports SPRN_AMR update with MSR[PR] = 1
*/
- if (!pkeys_devtree_defined && !firmware_has_feature(FW_FEATURE_LPAR) &&
- cpu_has_feature(CPU_FTRS_POWER8))
- pkeys_total = 32;
+ if (!early_cpu_has_feature(CPU_FTR_ARCH_206))
+ return;
- /*
- * Adjust the upper limit, based on the number of bits supported by
- * arch-neutral code.
- */
- pkeys_total = min_t(int, pkeys_total,
- ((ARCH_VM_PKEY_FLAGS >> VM_PKEY_SHIFT)+1));
+ /* scan the device tree for pkey feature */
+ pkeys_total = scan_pkey_feature();
+ if (!pkeys_total)
+ goto out;
- if (!pkey_mmu_enabled() || radix_enabled() || !pkeys_total)
- static_branch_enable(&pkey_disabled);
- else
- static_branch_disable(&pkey_disabled);
+ /* Allow all keys to be modified by default */
+ default_uamor = ~0x0UL;
- if (static_branch_likely(&pkey_disabled))
- return 0;
+ cur_cpu_spec->mmu_features |= MMU_FTR_PKEY;
/*
* The device tree cannot be relied to indicate support for
@@ -118,122 +154,180 @@ static int pkey_initialize(void)
#ifdef CONFIG_PPC_4K_PAGES
/*
* The OS can manage only 8 pkeys due to its inability to represent them
- * in the Linux 4K PTE.
+ * in the Linux 4K PTE. Mark all other keys reserved.
*/
- os_reserved = pkeys_total - 8;
+ num_pkey = min(8, pkeys_total);
#else
- os_reserved = 0;
+ num_pkey = pkeys_total;
#endif
- /* Bits are in LE format. */
- reserved_allocation_mask = (0x1 << 1) | (0x1 << execute_only_key);
- /* register mask is in BE format */
- pkey_amr_mask = ~0x0ul;
- pkey_amr_mask &= ~(0x3ul << pkeyshift(0));
-
- pkey_iamr_mask = ~0x0ul;
- pkey_iamr_mask &= ~(0x3ul << pkeyshift(0));
- pkey_iamr_mask &= ~(0x3ul << pkeyshift(execute_only_key));
+ if (unlikely(num_pkey <= execute_only_key) || !pkey_execute_disable_supported) {
+ /*
+ * Insufficient number of keys to support
+ * execute only key. Mark it unavailable.
+ */
+ execute_only_key = -1;
+ } else {
+ /*
+ * Mark the execute_only_pkey as not available for
+ * user allocation via pkey_alloc.
+ */
+ reserved_allocation_mask |= (0x1 << execute_only_key);
- pkey_uamor_mask = ~0x0ul;
- pkey_uamor_mask &= ~(0x3ul << pkeyshift(0));
- pkey_uamor_mask &= ~(0x3ul << pkeyshift(execute_only_key));
+ /*
+ * Deny READ/WRITE for execute_only_key.
+ * Allow execute in IAMR.
+ */
+ default_amr |= (0x3ul << pkeyshift(execute_only_key));
+ default_iamr &= ~(0x1ul << pkeyshift(execute_only_key));
- /* mark the rest of the keys as reserved and hence unavailable */
- for (i = (pkeys_total - os_reserved); i < pkeys_total; i++) {
- reserved_allocation_mask |= (0x1 << i);
- pkey_uamor_mask &= ~(0x3ul << pkeyshift(i));
+ /*
+ * Clear the uamor bits for this key.
+ */
+ default_uamor &= ~(0x3ul << pkeyshift(execute_only_key));
}
- initial_allocation_mask = reserved_allocation_mask | (0x1 << 0);
- if (unlikely((pkeys_total - os_reserved) <= execute_only_key)) {
+ if (unlikely(num_pkey <= 3)) {
/*
* Insufficient number of keys to support
- * execute only key. Mark it unavailable.
- * Any AMR, UAMOR, IAMR bit set for
- * this key is irrelevant since this key
- * can never be allocated.
+ * KUAP/KUEP feature.
*/
- execute_only_key = -1;
+ disable_kuep = true;
+ disable_kuap = true;
+ WARN(1, "Disabling kernel user protection due to low (%d) max supported keys\n", num_pkey);
+ } else {
+ /* handle key which is used by kernel for KAUP */
+ reserved_allocation_mask |= (0x1 << 3);
+ /*
+ * Mark access for kup_key in default amr so that
+ * we continue to operate with that AMR in
+ * copy_to/from_user().
+ */
+ default_amr &= ~(0x3ul << pkeyshift(3));
+ default_iamr &= ~(0x1ul << pkeyshift(3));
+ default_uamor &= ~(0x3ul << pkeyshift(3));
}
- return 0;
-}
-
-arch_initcall(pkey_initialize);
-
-void pkey_mm_init(struct mm_struct *mm)
-{
- if (static_branch_likely(&pkey_disabled))
- return;
- mm_pkey_allocation_map(mm) = initial_allocation_mask;
- mm->context.execute_only_pkey = execute_only_key;
-}
+ /*
+ * Allow access for only key 0. And prevent any other modification.
+ */
+ default_amr &= ~(0x3ul << pkeyshift(0));
+ default_iamr &= ~(0x1ul << pkeyshift(0));
+ default_uamor &= ~(0x3ul << pkeyshift(0));
+ /*
+ * key 0 is special in that we want to consider it an allocated
+ * key which is preallocated. We don't allow changing AMR bits
+ * w.r.t key 0. But one can pkey_free(key0)
+ */
+ initial_allocation_mask |= (0x1 << 0);
-static inline u64 read_amr(void)
-{
- return mfspr(SPRN_AMR);
-}
+ /*
+ * key 1 is recommended not to be used. PowerISA(3.0) page 1015,
+ * programming note.
+ */
+ reserved_allocation_mask |= (0x1 << 1);
+ default_uamor &= ~(0x3ul << pkeyshift(1));
-static inline void write_amr(u64 value)
-{
- mtspr(SPRN_AMR, value);
-}
+ /*
+ * Prevent the usage of OS reserved keys. Update UAMOR
+ * for those keys. Also mark the rest of the bits in the
+ * 32 bit mask as reserved.
+ */
+ for (i = num_pkey; i < 32 ; i++) {
+ reserved_allocation_mask |= (0x1 << i);
+ default_uamor &= ~(0x3ul << pkeyshift(i));
+ }
+ /*
+ * Prevent the allocation of reserved keys too.
+ */
+ initial_allocation_mask |= reserved_allocation_mask;
-static inline u64 read_iamr(void)
-{
- if (!likely(pkey_execute_disable_supported))
- return 0x0UL;
+ pr_info("Enabling pkeys with max key count %d\n", num_pkey);
+out:
+ /*
+ * Setup uamor on boot cpu
+ */
+ mtspr(SPRN_UAMOR, default_uamor);
- return mfspr(SPRN_IAMR);
+ return;
}
-static inline void write_iamr(u64 value)
+#ifdef CONFIG_PPC_KUEP
+void setup_kuep(bool disabled)
{
- if (!likely(pkey_execute_disable_supported))
+ if (disabled)
+ return;
+ /*
+ * On hash if PKEY feature is not enabled, disable KUAP too.
+ */
+ if (!early_radix_enabled() && !early_mmu_has_feature(MMU_FTR_PKEY))
return;
- mtspr(SPRN_IAMR, value);
-}
+ if (smp_processor_id() == boot_cpuid) {
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+ cur_cpu_spec->mmu_features |= MMU_FTR_BOOK3S_KUEP;
+ }
-static inline u64 read_uamor(void)
-{
- return mfspr(SPRN_UAMOR);
+ /*
+ * Radix always uses key0 of the IAMR to determine if an access is
+ * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
+ * fetch.
+ */
+ mtspr(SPRN_IAMR, AMR_KUEP_BLOCKED);
+ isync();
}
+#endif
-static inline void write_uamor(u64 value)
+#ifdef CONFIG_PPC_KUAP
+void setup_kuap(bool disabled)
{
- mtspr(SPRN_UAMOR, value);
-}
+ if (disabled)
+ return;
+ /*
+ * On hash if PKEY feature is not enabled, disable KUAP too.
+ */
+ if (!early_radix_enabled() && !early_mmu_has_feature(MMU_FTR_PKEY))
+ return;
-static bool is_pkey_enabled(int pkey)
-{
- u64 uamor = read_uamor();
- u64 pkey_bits = 0x3ul << pkeyshift(pkey);
- u64 uamor_pkey_bits = (uamor & pkey_bits);
+ if (smp_processor_id() == boot_cpuid) {
+ pr_info("Activating Kernel Userspace Access Prevention\n");
+ cur_cpu_spec->mmu_features |= MMU_FTR_BOOK3S_KUAP;
+ }
/*
- * Both the bits in UAMOR corresponding to the key should be set or
- * reset.
+ * Set the default kernel AMR values on all cpus.
*/
- WARN_ON(uamor_pkey_bits && (uamor_pkey_bits != pkey_bits));
- return !!(uamor_pkey_bits);
+ mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
+ isync();
+}
+#endif
+
+#ifdef CONFIG_PPC_MEM_KEYS
+void pkey_mm_init(struct mm_struct *mm)
+{
+ if (!mmu_has_feature(MMU_FTR_PKEY))
+ return;
+ mm_pkey_allocation_map(mm) = initial_allocation_mask;
+ mm->context.execute_only_pkey = execute_only_key;
}
static inline void init_amr(int pkey, u8 init_bits)
{
u64 new_amr_bits = (((u64)init_bits & 0x3UL) << pkeyshift(pkey));
- u64 old_amr = read_amr() & ~((u64)(0x3ul) << pkeyshift(pkey));
+ u64 old_amr = current_thread_amr() & ~((u64)(0x3ul) << pkeyshift(pkey));
- write_amr(old_amr | new_amr_bits);
+ current->thread.regs->amr = old_amr | new_amr_bits;
}
static inline void init_iamr(int pkey, u8 init_bits)
{
u64 new_iamr_bits = (((u64)init_bits & 0x1UL) << pkeyshift(pkey));
- u64 old_iamr = read_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey));
+ u64 old_iamr = current_thread_iamr() & ~((u64)(0x1ul) << pkeyshift(pkey));
- write_iamr(old_iamr | new_iamr_bits);
+ if (!likely(pkey_execute_disable_supported))
+ return;
+
+ current->thread.regs->iamr = old_iamr | new_iamr_bits;
}
/*
@@ -245,8 +339,18 @@ int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
{
u64 new_amr_bits = 0x0ul;
u64 new_iamr_bits = 0x0ul;
+ u64 pkey_bits, uamor_pkey_bits;
+
+ /*
+ * Check whether the key is disabled by UAMOR.
+ */
+ pkey_bits = 0x3ul << pkeyshift(pkey);
+ uamor_pkey_bits = (default_uamor & pkey_bits);
- if (!is_pkey_enabled(pkey))
+ /*
+ * Both the bits in UAMOR corresponding to the key should be set
+ */
+ if (uamor_pkey_bits != pkey_bits)
return -EINVAL;
if (init_val & PKEY_DISABLE_EXECUTE) {
@@ -266,48 +370,7 @@ int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
return 0;
}
-void thread_pkey_regs_save(struct thread_struct *thread)
-{
- if (static_branch_likely(&pkey_disabled))
- return;
-
- /*
- * TODO: Skip saving registers if @thread hasn't used any keys yet.
- */
- thread->amr = read_amr();
- thread->iamr = read_iamr();
- thread->uamor = read_uamor();
-}
-
-void thread_pkey_regs_restore(struct thread_struct *new_thread,
- struct thread_struct *old_thread)
-{
- if (static_branch_likely(&pkey_disabled))
- return;
-
- if (old_thread->amr != new_thread->amr)
- write_amr(new_thread->amr);
- if (old_thread->iamr != new_thread->iamr)
- write_iamr(new_thread->iamr);
- if (old_thread->uamor != new_thread->uamor)
- write_uamor(new_thread->uamor);
-}
-
-void thread_pkey_regs_init(struct thread_struct *thread)
-{
- if (static_branch_likely(&pkey_disabled))
- return;
-
- thread->amr = pkey_amr_mask;
- thread->iamr = pkey_iamr_mask;
- thread->uamor = pkey_uamor_mask;
-
- write_uamor(pkey_uamor_mask);
- write_amr(pkey_amr_mask);
- write_iamr(pkey_iamr_mask);
-}
-
-int __execute_only_pkey(struct mm_struct *mm)
+int execute_only_pkey(struct mm_struct *mm)
{
return mm->context.execute_only_pkey;
}
@@ -315,7 +378,7 @@ int __execute_only_pkey(struct mm_struct *mm)
static inline bool vma_is_pkey_exec_only(struct vm_area_struct *vma)
{
/* Do this check first since the vm_flags should be hot */
- if ((vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) != VM_EXEC)
+ if ((vma->vm_flags & VM_ACCESS_FLAGS) != VM_EXEC)
return false;
return (vma_pkey(vma) == vma->vm_mm->context.execute_only_pkey);
@@ -353,21 +416,20 @@ static bool pkey_access_permitted(int pkey, bool write, bool execute)
int pkey_shift;
u64 amr;
- if (!is_pkey_enabled(pkey))
- return true;
-
pkey_shift = pkeyshift(pkey);
- if (execute && !(read_iamr() & (IAMR_EX_BIT << pkey_shift)))
- return true;
+ if (execute)
+ return !(current_thread_iamr() & (IAMR_EX_BIT << pkey_shift));
- amr = read_amr(); /* Delay reading amr until absolutely needed */
- return ((!write && !(amr & (AMR_RD_BIT << pkey_shift))) ||
- (write && !(amr & (AMR_WR_BIT << pkey_shift))));
+ amr = current_thread_amr();
+ if (write)
+ return !(amr & (AMR_WR_BIT << pkey_shift));
+
+ return !(amr & (AMR_RD_BIT << pkey_shift));
}
bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
{
- if (static_branch_likely(&pkey_disabled))
+ if (!mmu_has_feature(MMU_FTR_PKEY))
return true;
return pkey_access_permitted(pte_to_pkey_bits(pte), write, execute);
@@ -381,22 +443,10 @@ bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
* So do not enforce things if the VMA is not from the current mm, or if we are
* in a kernel thread.
*/
-static inline bool vma_is_foreign(struct vm_area_struct *vma)
-{
- if (!current->mm)
- return true;
-
- /* if it is not our ->mm, it has to be foreign */
- if (current->mm != vma->vm_mm)
- return true;
-
- return false;
-}
-
bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
bool execute, bool foreign)
{
- if (static_branch_likely(&pkey_disabled))
+ if (!mmu_has_feature(MMU_FTR_PKEY))
return true;
/*
* Do not enforce our key-permissions on a foreign vma.
@@ -409,10 +459,12 @@ bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
{
- if (static_branch_likely(&pkey_disabled))
+ if (!mmu_has_feature(MMU_FTR_PKEY))
return;
/* Duplicate the oldmm pkey state in mm: */
mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
}
+
+#endif /* CONFIG_PPC_MEM_KEYS */
diff --git a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
index cab06331c0c0..5e3195568525 100644
--- a/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/radix_hugetlbpage.c
@@ -2,8 +2,6 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/security.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/machdep.h>
#include <asm/mman.h>
@@ -34,62 +32,13 @@ void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long st
struct hstate *hstate = hstate_file(vma->vm_file);
psize = hstate_get_psize(hstate);
- radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
-}
-
-/*
- * A vairant of hugetlb_get_unmapped_area doing topdown search
- * FIXME!! should we do as x86 does or non hugetlb area does ?
- * ie, use topdown or not based on mmap_is_legacy check ?
- */
-unsigned long
-radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- struct hstate *h = hstate_file(file);
- int fixed = (flags & MAP_FIXED);
- unsigned long high_limit;
- struct vm_unmapped_area_info info;
-
- high_limit = DEFAULT_MAP_WINDOW;
- if (addr >= high_limit || (fixed && (addr + len > high_limit)))
- high_limit = TASK_SIZE;
-
- if (len & ~huge_page_mask(h))
- return -EINVAL;
- if (len > high_limit)
- return -ENOMEM;
-
- if (fixed) {
- if (addr > high_limit - len)
- return -ENOMEM;
- if (prepare_hugepage_range(file, addr, len))
- return -EINVAL;
- return addr;
- }
-
- if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
- vma = find_vma(mm, addr);
- if (high_limit - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
- }
/*
- * We are always doing an topdown search here. Slice code
- * does that too.
+ * Flush PWC even if we get PUD_SIZE hugetlb invalidate to keep this simpler.
*/
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.low_limit = max(PAGE_SIZE, mmap_min_addr);
- info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
-
- return vm_unmapped_area(&info);
+ if (end - start >= PUD_SIZE)
+ radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize);
+ else
+ radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
}
void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
@@ -99,11 +48,13 @@ void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
/*
- * To avoid NMMU hang while relaxing access we need to flush the tlb before
- * we set the new value.
+ * POWER9 NMMU must flush the TLB after clearing the PTE before
+ * installing a PTE with more relaxed access permissions, see
+ * radix__ptep_set_access_flags.
*/
- if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
- (atomic_read(&mm->context.copros) > 0))
+ if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
+ is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
+ atomic_read(&mm->context.copros) > 0)
radix__flush_hugetlb_page(vma, addr);
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index dd1bea45325c..cac727b01799 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -11,13 +11,13 @@
#include <linux/kernel.h>
#include <linux/sched/mm.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/string_helpers.h>
-#include <linux/stop_machine.h>
+#include <linux/memory.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/dma.h>
@@ -26,14 +26,18 @@
#include <asm/firmware.h>
#include <asm/powernv.h>
#include <asm/sections.h>
+#include <asm/smp.h>
#include <asm/trace.h>
#include <asm/uaccess.h>
#include <asm/ultravisor.h>
+#include <asm/set_memory.h>
#include <trace/events/thp.h>
-unsigned int mmu_pid_bits;
+#include <mm/mmu_decl.h>
+
unsigned int mmu_base_pid;
+unsigned long radix_mem_block_size __ro_after_init;
static __ref void *early_alloc_pgtable(unsigned long size, int nid,
unsigned long region_start, unsigned long region_end)
@@ -56,6 +60,13 @@ static __ref void *early_alloc_pgtable(unsigned long size, int nid,
return ptr;
}
+/*
+ * When allocating pud or pmd pointers, we allocate a complete page
+ * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
+ * is to ensure that the page obtained from the memblock allocator
+ * can be completely used as page table page and can be freed
+ * correctly when the page table entries are removed.
+ */
static int early_map_kernel_page(unsigned long ea, unsigned long pa,
pgprot_t flags,
unsigned int map_page_size,
@@ -64,24 +75,26 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
{
unsigned long pfn = pa >> PAGE_SHIFT;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
pgdp = pgd_offset_k(ea);
- if (pgd_none(*pgdp)) {
- pudp = early_alloc_pgtable(PUD_TABLE_SIZE, nid,
- region_start, region_end);
- pgd_populate(&init_mm, pgdp, pudp);
+ p4dp = p4d_offset(pgdp, ea);
+ if (p4d_none(*p4dp)) {
+ pudp = early_alloc_pgtable(PAGE_SIZE, nid,
+ region_start, region_end);
+ p4d_populate(&init_mm, p4dp, pudp);
}
- pudp = pud_offset(pgdp, ea);
+ pudp = pud_offset(p4dp, ea);
if (map_page_size == PUD_SIZE) {
ptep = (pte_t *)pudp;
goto set_the_pte;
}
if (pud_none(*pudp)) {
- pmdp = early_alloc_pgtable(PMD_TABLE_SIZE, nid,
- region_start, region_end);
+ pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
+ region_end);
pud_populate(&init_mm, pudp, pmdp);
}
pmdp = pmd_offset(pudp, ea);
@@ -98,7 +111,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
set_the_pte:
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
- smp_wmb();
+ asm volatile("ptesync": : :"memory");
return 0;
}
@@ -114,6 +127,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
{
unsigned long pfn = pa >> PAGE_SHIFT;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -136,7 +150,8 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
* boot.
*/
pgdp = pgd_offset_k(ea);
- pudp = pud_alloc(&init_mm, pgdp, ea);
+ p4dp = p4d_offset(pgdp, ea);
+ pudp = pud_alloc(&init_mm, p4dp, ea);
if (!pudp)
return -ENOMEM;
if (map_page_size == PUD_SIZE) {
@@ -156,7 +171,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
set_the_pte:
set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
- smp_wmb();
+ asm volatile("ptesync": : :"memory");
return 0;
}
@@ -168,11 +183,12 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
}
#ifdef CONFIG_STRICT_KERNEL_RWX
-void radix__change_memory_range(unsigned long start, unsigned long end,
- unsigned long clear)
+static void radix__change_memory_range(unsigned long start, unsigned long end,
+ unsigned long clear)
{
unsigned long idx;
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -185,7 +201,8 @@ void radix__change_memory_range(unsigned long start, unsigned long end,
for (idx = start; idx < end; idx += PAGE_SIZE) {
pgdp = pgd_offset_k(idx);
- pudp = pud_alloc(&init_mm, pgdp, idx);
+ p4dp = p4d_offset(pgdp, idx);
+ pudp = pud_alloc(&init_mm, p4dp, idx);
if (!pudp)
continue;
if (pud_is_leaf(*pudp)) {
@@ -214,7 +231,7 @@ void radix__mark_rodata_ro(void)
unsigned long start, end;
start = (unsigned long)_stext;
- end = (unsigned long)__init_begin;
+ end = (unsigned long)__end_rodata;
radix__change_memory_range(start, end, _PAGE_WRITE);
}
@@ -245,27 +262,34 @@ print_mapping(unsigned long start, unsigned long end, unsigned long size, bool e
static unsigned long next_boundary(unsigned long addr, unsigned long end)
{
#ifdef CONFIG_STRICT_KERNEL_RWX
- if (addr < __pa_symbol(__init_begin))
- return __pa_symbol(__init_begin);
+ if (addr < __pa_symbol(__srwx_boundary))
+ return __pa_symbol(__srwx_boundary);
#endif
return end;
}
static int __meminit create_physical_mapping(unsigned long start,
unsigned long end,
- int nid)
+ int nid, pgprot_t _prot)
{
unsigned long vaddr, addr, mapping_size = 0;
bool prev_exec, exec = false;
pgprot_t prot;
int psize;
+ unsigned long max_mapping_size = radix_mem_block_size;
+
+ if (debug_pagealloc_enabled_or_kfence())
+ max_mapping_size = PAGE_SIZE;
- start = _ALIGN_UP(start, PAGE_SIZE);
+ start = ALIGN(start, PAGE_SIZE);
+ end = ALIGN_DOWN(end, PAGE_SIZE);
for (addr = start; addr < end; addr += mapping_size) {
unsigned long gap, previous_size;
int rc;
gap = next_boundary(addr, end) - addr;
+ if (gap > max_mapping_size)
+ gap = max_mapping_size;
previous_size = mapping_size;
prev_exec = exec;
@@ -289,7 +313,7 @@ static int __meminit create_physical_mapping(unsigned long start,
prot = PAGE_KERNEL_X;
exec = true;
} else {
- prot = PAGE_KERNEL;
+ prot = _prot;
exec = false;
}
@@ -312,55 +336,40 @@ static int __meminit create_physical_mapping(unsigned long start,
static void __init radix_init_pgtable(void)
{
unsigned long rts_field;
- struct memblock_region *reg;
+ phys_addr_t start, end;
+ u64 i;
/* We don't support slb for radix */
- mmu_slb_size = 0;
+ slb_set_size(0);
+
/*
- * Create the linear mapping, using standard page size for now
+ * Create the linear mapping
*/
- for_each_memblock(memory, reg) {
+ for_each_mem_range(i, &start, &end) {
/*
* The memblock allocator is up at this point, so the
* page tables will be allocated within the range. No
* need or a node (which we don't have yet).
*/
- if ((reg->base + reg->size) >= RADIX_VMALLOC_START) {
+ if (end >= RADIX_VMALLOC_START) {
pr_warn("Outside the supported range\n");
continue;
}
- WARN_ON(create_physical_mapping(reg->base,
- reg->base + reg->size,
- -1));
+ WARN_ON(create_physical_mapping(start, end,
+ -1, PAGE_KERNEL));
}
- /* Find out how many PID bits are supported */
- if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
- if (!mmu_pid_bits)
- mmu_pid_bits = 20;
- mmu_base_pid = 1;
- } else if (cpu_has_feature(CPU_FTR_HVMODE)) {
- if (!mmu_pid_bits)
- mmu_pid_bits = 20;
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ if (!cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
/*
- * When KVM is possible, we only use the top half of the
- * PID space to avoid collisions between host and guest PIDs
- * which can cause problems due to prefetch when exiting the
- * guest with AIL=3
+ * Older versions of KVM on these machines prefer if the
+ * guest only uses the low 19 PID bits.
*/
- mmu_base_pid = 1 << (mmu_pid_bits - 1);
-#else
- mmu_base_pid = 1;
-#endif
- } else {
- /* The guest uses the bottom half of the PID space */
- if (!mmu_pid_bits)
- mmu_pid_bits = 19;
- mmu_base_pid = 1;
+ mmu_pid_bits = 19;
}
+ mmu_base_pid = 1;
/*
* Allocate Partition table and process table for the
@@ -439,11 +448,6 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
- /* Find MMU PID size */
- prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
- if (prop && size == 4)
- mmu_pid_bits = be32_to_cpup(prop);
-
/* Grab page size encodings */
prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
if (!prop)
@@ -466,6 +470,7 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
def = &mmu_psize_defs[idx];
def->shift = shift;
def->ap = ap;
+ def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
}
/* needed ? */
@@ -473,80 +478,99 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
return 1;
}
-void __init radix__early_init_devtree(void)
+#ifdef CONFIG_MEMORY_HOTPLUG
+static int __init probe_memory_block_size(unsigned long node, const char *uname, int
+ depth, void *data)
{
- int rc;
+ unsigned long *mem_block_size = (unsigned long *)data;
+ const __be32 *prop;
+ int len;
- /*
- * Try to find the available page sizes in the device-tree
- */
- rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
- if (rc != 0) /* Found */
- goto found;
- /*
- * let's assume we have page 4k and 64k support
- */
- mmu_psize_defs[MMU_PAGE_4K].shift = 12;
- mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
+ if (depth != 1)
+ return 0;
- mmu_psize_defs[MMU_PAGE_64K].shift = 16;
- mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
-found:
- return;
+ if (strcmp(uname, "ibm,dynamic-reconfiguration-memory"))
+ return 0;
+
+ prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
+
+ if (!prop || len < dt_root_size_cells * sizeof(__be32))
+ /*
+ * Nothing in the device tree
+ */
+ *mem_block_size = MIN_MEMORY_BLOCK_SIZE;
+ else
+ *mem_block_size = of_read_number(prop, dt_root_size_cells);
+ return 1;
}
-static void radix_init_amor(void)
+static unsigned long __init radix_memory_block_size(void)
{
+ unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
+
/*
- * In HV mode, we init AMOR (Authority Mask Override Register) so that
- * the hypervisor and guest can setup IAMR (Instruction Authority Mask
- * Register), enable key 0 and set it to 1.
- *
- * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
- */
- mtspr(SPRN_AMOR, (3ul << 62));
+ * OPAL firmware feature is set by now. Hence we are ok
+ * to test OPAL feature.
+ */
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ mem_block_size = 1UL * 1024 * 1024 * 1024;
+ else
+ of_scan_flat_dt(probe_memory_block_size, &mem_block_size);
+
+ return mem_block_size;
}
-#ifdef CONFIG_PPC_KUEP
-void setup_kuep(bool disabled)
+#else /* CONFIG_MEMORY_HOTPLUG */
+
+static unsigned long __init radix_memory_block_size(void)
{
- if (disabled || !early_radix_enabled())
- return;
+ return 1UL * 1024 * 1024 * 1024;
+}
- if (smp_processor_id() == boot_cpuid)
- pr_info("Activating Kernel Userspace Execution Prevention\n");
+#endif /* CONFIG_MEMORY_HOTPLUG */
- /*
- * Radix always uses key0 of the IAMR to determine if an access is
- * allowed. We set bit 0 (IBM bit 1) of key0, to prevent instruction
- * fetch.
- */
- mtspr(SPRN_IAMR, (1ul << 62));
-}
-#endif
-#ifdef CONFIG_PPC_KUAP
-void setup_kuap(bool disabled)
+void __init radix__early_init_devtree(void)
{
- if (disabled || !early_radix_enabled())
- return;
+ int rc;
- if (smp_processor_id() == boot_cpuid) {
- pr_info("Activating Kernel Userspace Access Prevention\n");
- cur_cpu_spec->mmu_features |= MMU_FTR_RADIX_KUAP;
+ /*
+ * Try to find the available page sizes in the device-tree
+ */
+ rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
+ if (!rc) {
+ /*
+ * No page size details found in device tree.
+ * Let's assume we have page 4k and 64k support
+ */
+ mmu_psize_defs[MMU_PAGE_4K].shift = 12;
+ mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
+ mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
+ psize_to_rpti_pgsize(MMU_PAGE_4K);
+
+ mmu_psize_defs[MMU_PAGE_64K].shift = 16;
+ mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
+ mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
+ psize_to_rpti_pgsize(MMU_PAGE_64K);
}
- /* Make sure userspace can't change the AMR */
- mtspr(SPRN_UAMOR, 0);
- mtspr(SPRN_AMR, AMR_KUAP_BLOCKED);
- isync();
+ /*
+ * Max mapping size used when mapping pages. We don't use
+ * ppc_md.memory_block_size() here because this get called
+ * early and we don't have machine probe called yet. Also
+ * the pseries implementation only check for ibm,lmb-size.
+ * All hypervisor supporting radix do expose that device
+ * tree node.
+ */
+ radix_mem_block_size = radix_memory_block_size();
+ return;
}
-#endif
void __init radix__early_init_mmu(void)
{
unsigned long lpcr;
+#ifdef CONFIG_PPC_64S_HASH_MMU
#ifdef CONFIG_PPC_64K_PAGES
/* PAGE_SIZE mappings */
mmu_virtual_psize = MMU_PAGE_64K;
@@ -564,6 +588,7 @@ void __init radix__early_init_mmu(void)
} else
mmu_vmemmap_psize = mmu_virtual_psize;
#endif
+#endif
/*
* initialize page table size
*/
@@ -603,7 +628,6 @@ void __init radix__early_init_mmu(void)
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
radix_init_partition_table();
- radix_init_amor();
} else {
radix_init_pseries();
}
@@ -627,15 +651,17 @@ void radix__early_init_mmu_secondary(void)
set_ptcr_when_no_uv(__pa(partition_tb) |
(PATB_SIZE_SHIFT - 12));
-
- radix_init_amor();
}
radix__switch_mmu_context(NULL, &init_mm);
tlbiel_all();
+
+ /* Make sure userspace can't change the AMR */
+ mtspr(SPRN_UAMOR, 0);
}
-void radix__mmu_cleanup_all(void)
+/* Called during kexec sequence with MMU off */
+notrace void radix__mmu_cleanup_all(void)
{
unsigned long lpcr;
@@ -648,21 +674,6 @@ void radix__mmu_cleanup_all(void)
}
}
-void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
- phys_addr_t first_memblock_size)
-{
- /*
- * We don't currently support the first MEMBLOCK not mapping 0
- * physical on those processors
- */
- BUG_ON(first_memblock_base != 0);
-
- /*
- * Radix mode is not limited by RMA / VRMA addressing.
- */
- ppc64_rma_size = ULONG_MAX;
-}
-
#ifdef CONFIG_MEMORY_HOTPLUG
static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
{
@@ -694,28 +705,19 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
pud_clear(pud);
}
-struct change_mapping_params {
- pte_t *pte;
- unsigned long start;
- unsigned long end;
- unsigned long aligned_start;
- unsigned long aligned_end;
-};
-
-static int __meminit stop_machine_change_mapping(void *data)
+static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
{
- struct change_mapping_params *params =
- (struct change_mapping_params *)data;
+ pud_t *pud;
+ int i;
- if (!data)
- return -1;
+ for (i = 0; i < PTRS_PER_PUD; i++) {
+ pud = pud_start + i;
+ if (!pud_none(*pud))
+ return;
+ }
- spin_unlock(&init_mm.page_table_lock);
- pte_clear(&init_mm, params->aligned_start, params->pte);
- create_physical_mapping(__pa(params->aligned_start), __pa(params->start), -1);
- create_physical_mapping(__pa(params->end), __pa(params->aligned_end), -1);
- spin_lock(&init_mm.page_table_lock);
- return 0;
+ pud_free(&init_mm, pud_start);
+ p4d_clear(p4d);
}
static void remove_pte_table(pte_t *pte_start, unsigned long addr,
@@ -746,53 +748,7 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
}
}
-/*
- * clear the pte and potentially split the mapping helper
- */
-static void __meminit split_kernel_mapping(unsigned long addr, unsigned long end,
- unsigned long size, pte_t *pte)
-{
- unsigned long mask = ~(size - 1);
- unsigned long aligned_start = addr & mask;
- unsigned long aligned_end = addr + size;
- struct change_mapping_params params;
- bool split_region = false;
-
- if ((end - addr) < size) {
- /*
- * We're going to clear the PTE, but not flushed
- * the mapping, time to remap and flush. The
- * effects if visible outside the processor or
- * if we are running in code close to the
- * mapping we cleared, we are in trouble.
- */
- if (overlaps_kernel_text(aligned_start, addr) ||
- overlaps_kernel_text(end, aligned_end)) {
- /*
- * Hack, just return, don't pte_clear
- */
- WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
- "text, not splitting\n", addr, end);
- return;
- }
- split_region = true;
- }
-
- if (split_region) {
- params.pte = pte;
- params.start = addr;
- params.end = end;
- params.aligned_start = addr & ~(size - 1);
- params.aligned_end = min_t(unsigned long, aligned_end,
- (unsigned long)__va(memblock_end_of_DRAM()));
- stop_machine(stop_machine_change_mapping, &params, NULL);
- return;
- }
-
- pte_clear(&init_mm, addr, pte);
-}
-
-static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
+static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
unsigned long end)
{
unsigned long next;
@@ -807,7 +763,12 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
continue;
if (pmd_is_leaf(*pmd)) {
- split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
+ if (!IS_ALIGNED(addr, PMD_SIZE) ||
+ !IS_ALIGNED(next, PMD_SIZE)) {
+ WARN_ONCE(1, "%s: unaligned range\n", __func__);
+ continue;
+ }
+ pte_clear(&init_mm, addr, (pte_t *)pmd);
continue;
}
@@ -817,7 +778,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
}
}
-static void remove_pud_table(pud_t *pud_start, unsigned long addr,
+static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
unsigned long end)
{
unsigned long next;
@@ -832,11 +793,16 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
continue;
if (pud_is_leaf(*pud)) {
- split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
+ if (!IS_ALIGNED(addr, PUD_SIZE) ||
+ !IS_ALIGNED(next, PUD_SIZE)) {
+ WARN_ONCE(1, "%s: unaligned range\n", __func__);
+ continue;
+ }
+ pte_clear(&init_mm, addr, (pte_t *)pud);
continue;
}
- pmd_base = (pmd_t *)pud_page_vaddr(*pud);
+ pmd_base = pud_pgtable(*pud);
remove_pmd_table(pmd_base, addr, next);
free_pmd_table(pmd_base, pud);
}
@@ -847,6 +813,7 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
unsigned long addr, next;
pud_t *pud_base;
pgd_t *pgd;
+ p4d_t *p4d;
spin_lock(&init_mm.page_table_lock);
@@ -854,30 +821,41 @@ static void __meminit remove_pagetable(unsigned long start, unsigned long end)
next = pgd_addr_end(addr, end);
pgd = pgd_offset_k(addr);
- if (!pgd_present(*pgd))
+ p4d = p4d_offset(pgd, addr);
+ if (!p4d_present(*p4d))
continue;
- if (pgd_is_leaf(*pgd)) {
- split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
+ if (p4d_is_leaf(*p4d)) {
+ if (!IS_ALIGNED(addr, P4D_SIZE) ||
+ !IS_ALIGNED(next, P4D_SIZE)) {
+ WARN_ONCE(1, "%s: unaligned range\n", __func__);
+ continue;
+ }
+
+ pte_clear(&init_mm, addr, (pte_t *)pgd);
continue;
}
- pud_base = (pud_t *)pgd_page_vaddr(*pgd);
+ pud_base = p4d_pgtable(*p4d);
remove_pud_table(pud_base, addr, next);
+ free_pud_table(pud_base, p4d);
}
spin_unlock(&init_mm.page_table_lock);
radix__flush_tlb_kernel_range(start, end);
}
-int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
+int __meminit radix__create_section_mapping(unsigned long start,
+ unsigned long end, int nid,
+ pgprot_t prot)
{
if (end >= RADIX_VMALLOC_START) {
pr_warn("Outside the supported range\n");
return -1;
}
- return create_physical_mapping(__pa(start), __pa(end), nid);
+ return create_physical_mapping(__pa(start), __pa(end),
+ nid, prot);
}
int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
@@ -923,6 +901,20 @@ void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long
#endif
#endif
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
+void radix__kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ unsigned long addr;
+
+ addr = (unsigned long)page_address(page);
+
+ if (enable)
+ set_memory_p(addr, numpages);
+ else
+ set_memory_np(addr, numpages);
+}
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
@@ -957,9 +949,6 @@ pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long addre
pmd = *pmdp;
pmd_clear(pmdp);
- /*FIXME!! Verify whether we need this kick below */
- serialize_against_pte_lookup(vma->vm_mm);
-
radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
return pmd;
@@ -1018,17 +1007,6 @@ pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old);
- /*
- * Serialize against find_current_mm_pte which does lock-less
- * lookup in page tables with local interrupts disabled. For huge pages
- * it casts pmd_t to pte_t. Since format of pte_t is different from
- * pmd_t we want to prevent transit from pmd pointing to page table
- * to pmd pointing to huge page (and back) while interrupts are disabled.
- * We clear pmd to possibly replace it with page table pointer in
- * different code paths. So make sure we wait for the parallel
- * find_current_mm_pte to finish.
- */
- serialize_against_pte_lookup(mm);
return old_pmd;
}
@@ -1043,16 +1021,21 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
unsigned long change = pte_val(entry) ^ pte_val(*ptep);
/*
- * To avoid NMMU hang while relaxing access, we need mark
- * the pte invalid in between.
+ * On POWER9, the NMMU is not able to relax PTE access permissions
+ * for a translation with a TLB. The PTE must be invalidated, TLB
+ * flushed before the new PTE is installed.
+ *
+ * This only needs to be done for radix, because hash translation does
+ * flush when updating the linux pte (and we don't support NMMU
+ * accelerators on HPT on POWER9 anyway XXX: do we?).
+ *
+ * POWER10 (and P9P) NMMU does behave as per ISA.
*/
- if ((change & _PAGE_RW) && atomic_read(&mm->context.copros) > 0) {
+ if (!cpu_has_feature(CPU_FTR_ARCH_31) && (change & _PAGE_RW) &&
+ atomic_read(&mm->context.copros) > 0) {
unsigned long old_pte, new_pte;
old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
- /*
- * new value of pte
- */
new_pte = old_pte | set;
radix__flush_tlb_page_psize(mm, address, psize);
__radix_pte_update(ptep, _PAGE_INVALID, new_pte);
@@ -1060,9 +1043,12 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
__radix_pte_update(ptep, 0, set);
/*
* Book3S does not require a TLB flush when relaxing access
- * restrictions when the address space is not attached to a
- * NMMU, because the core MMU will reload the pte after taking
- * an access fault, which is defined by the architectue.
+ * restrictions when the address space (modulo the POWER9 nest
+ * MMU issue above) because the MMU will reload the PTE after
+ * taking an access fault, as defined by the architecture. See
+ * "Setting a Reference or Change Bit or Upgrading Access
+ * Authority (PTE Subject to Atomic Hardware Updates)" in
+ * Power ISA Version 3.1B.
*/
}
/* See ptesync comment in radix__set_pte_at */
@@ -1075,33 +1061,18 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
/*
- * To avoid NMMU hang while relaxing access we need to flush the tlb before
- * we set the new value. We need to do this only for radix, because hash
- * translation does flush when updating the linux pte.
+ * POWER9 NMMU must flush the TLB after clearing the PTE before
+ * installing a PTE with more relaxed access permissions, see
+ * radix__ptep_set_access_flags.
*/
- if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
+ if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
+ is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
(atomic_read(&mm->context.copros) > 0))
radix__flush_tlb_page(vma, addr);
set_pte_at(mm, addr, ptep, pte);
}
-int __init arch_ioremap_pud_supported(void)
-{
- /* HPT does not cope with large pages in the vmalloc area */
- return radix_enabled();
-}
-
-int __init arch_ioremap_pmd_supported(void)
-{
- return radix_enabled();
-}
-
-int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
-{
- return 0;
-}
-
int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
pte_t *ptep = (pte_t *)pud;
@@ -1117,7 +1088,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
int pud_clear_huge(pud_t *pud)
{
- if (pud_huge(*pud)) {
+ if (pud_is_leaf(*pud)) {
pud_clear(pud);
return 1;
}
@@ -1130,7 +1101,7 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
pmd_t *pmd;
int i;
- pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pmd = pud_pgtable(*pud);
pud_clear(pud);
flush_tlb_kernel_range(addr, addr + PUD_SIZE);
@@ -1164,7 +1135,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
int pmd_clear_huge(pmd_t *pmd)
{
- if (pmd_huge(*pmd)) {
+ if (pmd_is_leaf(*pmd)) {
pmd_clear(pmd);
return 1;
}
@@ -1185,8 +1156,3 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
return 1;
}
-
-int __init arch_ioremap_p4d_supported(void)
-{
- return 0;
-}
diff --git a/arch/powerpc/mm/book3s64/radix_tlb.c b/arch/powerpc/mm/book3s64/radix_tlb.c
index 03f43c924e00..4e29b619578c 100644
--- a/arch/powerpc/mm/book3s64/radix_tlb.c
+++ b/arch/powerpc/mm/book3s64/radix_tlb.c
@@ -10,16 +10,16 @@
#include <linux/memblock.h>
#include <linux/mmu_context.h>
#include <linux/sched/mm.h>
+#include <linux/debugfs.h>
#include <asm/ppc-opcode.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/trace.h>
#include <asm/cputhreads.h>
+#include <asm/plpar_wrappers.h>
-#define RIC_FLUSH_TLB 0
-#define RIC_FLUSH_PWC 1
-#define RIC_FLUSH_ALL 2
+#include "internal.h"
/*
* tlbiel instruction for radix, set invalidation
@@ -55,16 +55,23 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
if (early_cpu_has_feature(CPU_FTR_HVMODE)) {
/* MSR[HV] should flush partition scope translations first. */
tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
- for (set = 1; set < num_sets; set++)
- tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
+
+ if (!early_cpu_has_feature(CPU_FTR_ARCH_31)) {
+ for (set = 1; set < num_sets; set++)
+ tlbiel_radix_set_isa300(set, is, 0,
+ RIC_FLUSH_TLB, 0);
+ }
}
/* Flush process scoped entries. */
tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
- for (set = 1; set < num_sets; set++)
- tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
- asm volatile("ptesync": : :"memory");
+ if (!early_cpu_has_feature(CPU_FTR_ARCH_31)) {
+ for (set = 1; set < num_sets; set++)
+ tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
+ }
+
+ ppc_after_tlbiel_barrier();
}
void radix__tlbiel_all(unsigned int action)
@@ -120,6 +127,21 @@ static __always_inline void __tlbie_pid(unsigned long pid, unsigned long ric)
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
+static __always_inline void __tlbie_pid_lpid(unsigned long pid,
+ unsigned long lpid,
+ unsigned long ric)
+{
+ unsigned long rb, rs, prs, r;
+
+ rb = PPC_BIT(53); /* IS = 1 */
+ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+ prs = 1; /* process scoped */
+ r = 1; /* radix format */
+
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
static __always_inline void __tlbie_lpid(unsigned long lpid, unsigned long ric)
{
unsigned long rb,rs,prs,r;
@@ -180,6 +202,23 @@ static __always_inline void __tlbie_va(unsigned long va, unsigned long pid,
trace_tlbie(0, 0, rb, rs, ric, prs, r);
}
+static __always_inline void __tlbie_va_lpid(unsigned long va, unsigned long pid,
+ unsigned long lpid,
+ unsigned long ap, unsigned long ric)
+{
+ unsigned long rb, rs, prs, r;
+
+ rb = va & ~(PPC_BITMASK(52, 63));
+ rb |= ap << PPC_BITLSHIFT(58);
+ rs = (pid << PPC_BITLSHIFT(31)) | (lpid & ~(PPC_BITMASK(0, 31)));
+ prs = 1; /* process scoped */
+ r = 1; /* radix format */
+
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
+ trace_tlbie(0, 0, rb, rs, ric, prs, r);
+}
+
static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid,
unsigned long ap, unsigned long ric)
{
@@ -225,6 +264,22 @@ static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid,
}
}
+static inline void fixup_tlbie_va_range_lpid(unsigned long va,
+ unsigned long pid,
+ unsigned long lpid,
+ unsigned long ap)
+{
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_va_lpid(va, pid, lpid, ap, RIC_FLUSH_TLB);
+ }
+}
+
static inline void fixup_tlbie_pid(unsigned long pid)
{
/*
@@ -244,6 +299,25 @@ static inline void fixup_tlbie_pid(unsigned long pid)
}
}
+static inline void fixup_tlbie_pid_lpid(unsigned long pid, unsigned long lpid)
+{
+ /*
+ * We can use any address for the invalidation, pick one which is
+ * probably unused as an optimisation.
+ */
+ unsigned long va = ((1UL << 52) - 1);
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_pid_lpid(0, lpid, RIC_FLUSH_TLB);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+ asm volatile("ptesync" : : : "memory");
+ __tlbie_va_lpid(va, pid, lpid, mmu_get_ap(MMU_PAGE_64K),
+ RIC_FLUSH_TLB);
+ }
+}
static inline void fixup_tlbie_lpid_va(unsigned long va, unsigned long lpid,
unsigned long ap)
@@ -281,29 +355,39 @@ static inline void fixup_tlbie_lpid(unsigned long lpid)
/*
* We use 128 set in radix mode and 256 set in hpt mode.
*/
-static __always_inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
+static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
{
int set;
asm volatile("ptesync": : :"memory");
- /*
- * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
- * also flush the entire Page Walk Cache.
- */
- __tlbiel_pid(pid, 0, ric);
+ switch (ric) {
+ case RIC_FLUSH_PWC:
- /* For PWC, only one flush is needed */
- if (ric == RIC_FLUSH_PWC) {
- asm volatile("ptesync": : :"memory");
+ /* For PWC, only one flush is needed */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
+ ppc_after_tlbiel_barrier();
return;
+ case RIC_FLUSH_TLB:
+ __tlbiel_pid(pid, 0, RIC_FLUSH_TLB);
+ break;
+ case RIC_FLUSH_ALL:
+ default:
+ /*
+ * Flush the first set of the TLB, and if
+ * we're doing a RIC_FLUSH_ALL, also flush
+ * the entire Page Walk Cache.
+ */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_ALL);
}
- /* For the remaining sets, just flush the TLB */
- for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
- __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+ /* For the remaining sets, just flush the TLB */
+ for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
+ __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
+ }
- asm volatile("ptesync": : :"memory");
+ ppc_after_tlbiel_barrier();
asm volatile(PPC_RADIX_INVALIDATE_ERAT_USER "; isync" : : :"memory");
}
@@ -313,7 +397,7 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
/*
* Workaround the fact that the "ric" argument to __tlbie_pid
- * must be a compile-time contraint to match the "i" constraint
+ * must be a compile-time constraint to match the "i" constraint
* in the asm statement.
*/
switch (ric) {
@@ -332,6 +416,31 @@ static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
+static inline void _tlbie_pid_lpid(unsigned long pid, unsigned long lpid,
+ unsigned long ric)
+{
+ asm volatile("ptesync" : : : "memory");
+
+ /*
+ * Workaround the fact that the "ric" argument to __tlbie_pid
+ * must be a compile-time contraint to match the "i" constraint
+ * in the asm statement.
+ */
+ switch (ric) {
+ case RIC_FLUSH_TLB:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+ fixup_tlbie_pid_lpid(pid, lpid);
+ break;
+ case RIC_FLUSH_PWC:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+ break;
+ case RIC_FLUSH_ALL:
+ default:
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
+ fixup_tlbie_pid_lpid(pid, lpid);
+ }
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
struct tlbiel_pid {
unsigned long pid;
unsigned long ric;
@@ -430,7 +539,7 @@ static __always_inline void _tlbiel_va(unsigned long va, unsigned long pid,
asm volatile("ptesync": : :"memory");
__tlbiel_va(va, pid, ap, ric);
- asm volatile("ptesync": : :"memory");
+ ppc_after_tlbiel_barrier();
}
static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
@@ -441,7 +550,7 @@ static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
if (also_pwc)
__tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
__tlbiel_va_range(start, end, pid, page_size, psize);
- asm volatile("ptesync": : :"memory");
+ ppc_after_tlbiel_barrier();
}
static inline void __tlbie_va_range(unsigned long start, unsigned long end,
@@ -457,6 +566,20 @@ static inline void __tlbie_va_range(unsigned long start, unsigned long end,
fixup_tlbie_va_range(addr - page_size, pid, ap);
}
+static inline void __tlbie_va_range_lpid(unsigned long start, unsigned long end,
+ unsigned long pid, unsigned long lpid,
+ unsigned long page_size,
+ unsigned long psize)
+{
+ unsigned long addr;
+ unsigned long ap = mmu_get_ap(psize);
+
+ for (addr = start; addr < end; addr += page_size)
+ __tlbie_va_lpid(addr, pid, lpid, ap, RIC_FLUSH_TLB);
+
+ fixup_tlbie_va_range_lpid(addr - page_size, pid, lpid, ap);
+}
+
static __always_inline void _tlbie_va(unsigned long va, unsigned long pid,
unsigned long psize, unsigned long ric)
{
@@ -537,6 +660,18 @@ static inline void _tlbie_va_range(unsigned long start, unsigned long end,
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
+static inline void _tlbie_va_range_lpid(unsigned long start, unsigned long end,
+ unsigned long pid, unsigned long lpid,
+ unsigned long page_size,
+ unsigned long psize, bool also_pwc)
+{
+ asm volatile("ptesync" : : : "memory");
+ if (also_pwc)
+ __tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
+ __tlbie_va_range_lpid(start, end, pid, lpid, page_size, psize);
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+}
+
static inline void _tlbiel_va_range_multicast(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned long pid, unsigned long page_size,
@@ -587,6 +722,11 @@ void radix__local_flush_all_mm(struct mm_struct *mm)
preempt_enable();
}
EXPORT_SYMBOL(radix__local_flush_all_mm);
+
+static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
+{
+ radix__local_flush_all_mm(mm);
+}
#endif /* CONFIG_SMP */
void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
@@ -612,47 +752,78 @@ void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmadd
}
EXPORT_SYMBOL(radix__local_flush_tlb_page);
-static bool mm_is_singlethreaded(struct mm_struct *mm)
-{
- if (atomic_read(&mm->context.copros) > 0)
- return false;
- if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm)
- return true;
- return false;
-}
-
static bool mm_needs_flush_escalation(struct mm_struct *mm)
{
/*
- * P9 nest MMU has issues with the page walk cache
- * caching PTEs and not flushing them properly when
- * RIC = 0 for a PID/LPID invalidate
+ * The P9 nest MMU has issues with the page walk cache caching PTEs
+ * and not flushing them when RIC = 0 for a PID/LPID invalidate.
+ *
+ * This may have been fixed in shipping firmware (by disabling PWC
+ * or preventing it from caching PTEs), but until that is confirmed,
+ * this workaround is required - escalate all RIC=0 IS=1/2/3 flushes
+ * to RIC=2.
+ *
+ * POWER10 (and P9P) does not have this problem.
*/
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ return false;
if (atomic_read(&mm->context.copros) > 0)
return true;
return false;
}
-#ifdef CONFIG_SMP
-static void do_exit_flush_lazy_tlb(void *arg)
+/*
+ * If always_flush is true, then flush even if this CPU can't be removed
+ * from mm_cpumask.
+ */
+void exit_lazy_flush_tlb(struct mm_struct *mm, bool always_flush)
{
- struct mm_struct *mm = arg;
unsigned long pid = mm->context.id;
+ int cpu = smp_processor_id();
+ /*
+ * A kthread could have done a mmget_not_zero() after the flushing CPU
+ * checked mm_cpumask, and be in the process of kthread_use_mm when
+ * interrupted here. In that case, current->mm will be set to mm,
+ * because kthread_use_mm() setting ->mm and switching to the mm is
+ * done with interrupts off.
+ */
if (current->mm == mm)
- return; /* Local CPU */
+ goto out;
if (current->active_mm == mm) {
- /*
- * Must be a kernel thread because sender is single-threaded.
- */
- BUG_ON(current->mm);
+ WARN_ON_ONCE(current->mm != NULL);
+ /* Is a kernel thread and is using mm as the lazy tlb */
mmgrab(&init_mm);
- switch_mm(mm, &init_mm, current);
current->active_mm = &init_mm;
+ switch_mm_irqs_off(mm, &init_mm, current);
mmdrop(mm);
}
- _tlbiel_pid(pid, RIC_FLUSH_ALL);
+
+ /*
+ * This IPI may be initiated from any source including those not
+ * running the mm, so there may be a racing IPI that comes after
+ * this one which finds the cpumask already clear. Check and avoid
+ * underflowing the active_cpus count in that case. The race should
+ * not otherwise be a problem, but the TLB must be flushed because
+ * that's what the caller expects.
+ */
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ atomic_dec(&mm->context.active_cpus);
+ cpumask_clear_cpu(cpu, mm_cpumask(mm));
+ always_flush = true;
+ }
+
+out:
+ if (always_flush)
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
+}
+
+#ifdef CONFIG_SMP
+static void do_exit_flush_lazy_tlb(void *arg)
+{
+ struct mm_struct *mm = arg;
+ exit_lazy_flush_tlb(mm, true);
}
static void exit_flush_lazy_tlbs(struct mm_struct *mm)
@@ -666,12 +837,112 @@ static void exit_flush_lazy_tlbs(struct mm_struct *mm)
*/
smp_call_function_many(mm_cpumask(mm), do_exit_flush_lazy_tlb,
(void *)mm, 1);
- mm_reset_thread_local(mm);
}
+#else /* CONFIG_SMP */
+static inline void exit_flush_lazy_tlbs(struct mm_struct *mm) { }
+#endif /* CONFIG_SMP */
+
+static DEFINE_PER_CPU(unsigned int, mm_cpumask_trim_clock);
+
+/*
+ * Interval between flushes at which we send out IPIs to check whether the
+ * mm_cpumask can be trimmed for the case where it's not a single-threaded
+ * process flushing its own mm. The intent is to reduce the cost of later
+ * flushes. Don't want this to be so low that it adds noticable cost to TLB
+ * flushing, or so high that it doesn't help reduce global TLBIEs.
+ */
+static unsigned long tlb_mm_cpumask_trim_timer = 1073;
+
+static bool tick_and_test_trim_clock(void)
+{
+ if (__this_cpu_inc_return(mm_cpumask_trim_clock) ==
+ tlb_mm_cpumask_trim_timer) {
+ __this_cpu_write(mm_cpumask_trim_clock, 0);
+ return true;
+ }
+ return false;
+}
+
+enum tlb_flush_type {
+ FLUSH_TYPE_NONE,
+ FLUSH_TYPE_LOCAL,
+ FLUSH_TYPE_GLOBAL,
+};
+
+static enum tlb_flush_type flush_type_needed(struct mm_struct *mm, bool fullmm)
+{
+ int active_cpus = atomic_read(&mm->context.active_cpus);
+ int cpu = smp_processor_id();
+
+ if (active_cpus == 0)
+ return FLUSH_TYPE_NONE;
+ if (active_cpus == 1 && cpumask_test_cpu(cpu, mm_cpumask(mm))) {
+ if (current->mm != mm) {
+ /*
+ * Asynchronous flush sources may trim down to nothing
+ * if the process is not running, so occasionally try
+ * to trim.
+ */
+ if (tick_and_test_trim_clock()) {
+ exit_lazy_flush_tlb(mm, true);
+ return FLUSH_TYPE_NONE;
+ }
+ }
+ return FLUSH_TYPE_LOCAL;
+ }
+
+ /* Coprocessors require TLBIE to invalidate nMMU. */
+ if (atomic_read(&mm->context.copros) > 0)
+ return FLUSH_TYPE_GLOBAL;
+
+ /*
+ * In the fullmm case there's no point doing the exit_flush_lazy_tlbs
+ * because the mm is being taken down anyway, and a TLBIE tends to
+ * be faster than an IPI+TLBIEL.
+ */
+ if (fullmm)
+ return FLUSH_TYPE_GLOBAL;
+
+ /*
+ * If we are running the only thread of a single-threaded process,
+ * then we should almost always be able to trim off the rest of the
+ * CPU mask (except in the case of use_mm() races), so always try
+ * trimming the mask.
+ */
+ if (atomic_read(&mm->mm_users) <= 1 && current->mm == mm) {
+ exit_flush_lazy_tlbs(mm);
+ /*
+ * use_mm() race could prevent IPIs from being able to clear
+ * the cpumask here, however those users are established
+ * after our first check (and so after the PTEs are removed),
+ * and the TLB still gets flushed by the IPI, so this CPU
+ * will only require a local flush.
+ */
+ return FLUSH_TYPE_LOCAL;
+ }
+
+ /*
+ * Occasionally try to trim down the cpumask. It's possible this can
+ * bring the mask to zero, which results in no flush.
+ */
+ if (tick_and_test_trim_clock()) {
+ exit_flush_lazy_tlbs(mm);
+ if (current->mm == mm)
+ return FLUSH_TYPE_LOCAL;
+ if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
+ exit_lazy_flush_tlb(mm, true);
+ return FLUSH_TYPE_NONE;
+ }
+
+ return FLUSH_TYPE_GLOBAL;
+}
+
+#ifdef CONFIG_SMP
void radix__flush_tlb_mm(struct mm_struct *mm)
{
unsigned long pid;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
@@ -679,17 +950,23 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
/*
- * Order loads of mm_cpumask vs previous stores to clear ptes before
- * the invalidate. See barrier in switch_mm_irqs_off
+ * Order loads of mm_cpumask (in flush_type_needed) vs previous
+ * stores to clear ptes before the invalidate. See barrier in
+ * switch_mm_irqs_off
*/
smp_mb();
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- exit_flush_lazy_tlbs(mm);
- goto local;
- }
-
- if (cputlb_use_tlbie()) {
+ type = flush_type_needed(mm, false);
+ if (type == FLUSH_TYPE_LOCAL) {
+ _tlbiel_pid(pid, RIC_FLUSH_TLB);
+ } else if (type == FLUSH_TYPE_GLOBAL) {
+ if (!mmu_has_feature(MMU_FTR_GTSE)) {
+ unsigned long tgt = H_RPTI_TARGET_CMMU;
+
+ if (atomic_read(&mm->context.copros) > 0)
+ tgt |= H_RPTI_TARGET_NMMU;
+ pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB,
+ H_RPTI_PAGE_ALL, 0, -1UL);
+ } else if (cputlb_use_tlbie()) {
if (mm_needs_flush_escalation(mm))
_tlbie_pid(pid, RIC_FLUSH_ALL);
else
@@ -697,9 +974,6 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
} else {
_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
}
- } else {
-local:
- _tlbiel_pid(pid, RIC_FLUSH_TLB);
}
preempt_enable();
}
@@ -708,6 +982,7 @@ EXPORT_SYMBOL(radix__flush_tlb_mm);
static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
{
unsigned long pid;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
@@ -715,20 +990,23 @@ static void __flush_all_mm(struct mm_struct *mm, bool fullmm)
preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- if (!fullmm) {
- exit_flush_lazy_tlbs(mm);
- goto local;
- }
- }
- if (cputlb_use_tlbie())
+ type = flush_type_needed(mm, fullmm);
+ if (type == FLUSH_TYPE_LOCAL) {
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ } else if (type == FLUSH_TYPE_GLOBAL) {
+ if (!mmu_has_feature(MMU_FTR_GTSE)) {
+ unsigned long tgt = H_RPTI_TARGET_CMMU;
+ unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
+ H_RPTI_TYPE_PRT;
+
+ if (atomic_read(&mm->context.copros) > 0)
+ tgt |= H_RPTI_TARGET_NMMU;
+ pseries_rpt_invalidate(pid, tgt, type,
+ H_RPTI_PAGE_ALL, 0, -1UL);
+ } else if (cputlb_use_tlbie())
_tlbie_pid(pid, RIC_FLUSH_ALL);
else
_tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
- } else {
-local:
- _tlbiel_pid(pid, RIC_FLUSH_ALL);
}
preempt_enable();
}
@@ -743,6 +1021,7 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
int psize)
{
unsigned long pid;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
@@ -750,18 +1029,26 @@ void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- exit_flush_lazy_tlbs(mm);
- goto local;
- }
- if (cputlb_use_tlbie())
+ type = flush_type_needed(mm, false);
+ if (type == FLUSH_TYPE_LOCAL) {
+ _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
+ } else if (type == FLUSH_TYPE_GLOBAL) {
+ if (!mmu_has_feature(MMU_FTR_GTSE)) {
+ unsigned long tgt, pg_sizes, size;
+
+ tgt = H_RPTI_TARGET_CMMU;
+ pg_sizes = psize_to_rpti_pgsize(psize);
+ size = 1UL << mmu_psize_to_shift(psize);
+
+ if (atomic_read(&mm->context.copros) > 0)
+ tgt |= H_RPTI_TARGET_NMMU;
+ pseries_rpt_invalidate(pid, tgt, H_RPTI_TYPE_TLB,
+ pg_sizes, vmaddr,
+ vmaddr + size);
+ } else if (cputlb_use_tlbie())
_tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
else
_tlbiel_va_multicast(mm, vmaddr, pid, psize, RIC_FLUSH_TLB);
- } else {
-local:
- _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
}
preempt_enable();
}
@@ -776,8 +1063,6 @@ void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
}
EXPORT_SYMBOL(radix__flush_tlb_page);
-#else /* CONFIG_SMP */
-#define radix__flush_all_mm radix__local_flush_all_mm
#endif /* CONFIG_SMP */
static void do_tlbiel_kernel(void *info)
@@ -805,7 +1090,14 @@ static inline void _tlbiel_kernel_broadcast(void)
*/
void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- if (cputlb_use_tlbie())
+ if (!mmu_has_feature(MMU_FTR_GTSE)) {
+ unsigned long tgt = H_RPTI_TARGET_CMMU | H_RPTI_TARGET_NMMU;
+ unsigned long type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
+ H_RPTI_TYPE_PRT;
+
+ pseries_rpt_invalidate(0, tgt, type, H_RPTI_PAGE_ALL,
+ start, end);
+ } else if (cputlb_use_tlbie())
_tlbie_pid(0, RIC_FLUSH_ALL);
else
_tlbiel_kernel_broadcast();
@@ -823,18 +1115,19 @@ EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
* invalidating a full PID, so it has a far lower threshold to change from
* individual page flushes to full-pid flushes.
*/
-static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
-static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
+static u32 tlb_single_page_flush_ceiling __read_mostly = 33;
+static u32 tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
static inline void __radix__flush_tlb_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
-
{
unsigned long pid;
unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
unsigned long page_size = 1UL << page_shift;
unsigned long nr_pages = (end - start) >> page_shift;
- bool local, full;
+ bool fullmm = (end == TLB_FLUSH_ALL);
+ bool flush_pid, flush_pwc = false;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
@@ -842,34 +1135,47 @@ static inline void __radix__flush_tlb_range(struct mm_struct *mm,
preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- if (end != TLB_FLUSH_ALL) {
- exit_flush_lazy_tlbs(mm);
- goto is_local;
- }
- }
- local = false;
- full = (end == TLB_FLUSH_ALL ||
- nr_pages > tlb_single_page_flush_ceiling);
- } else {
-is_local:
- local = true;
- full = (end == TLB_FLUSH_ALL ||
- nr_pages > tlb_local_single_page_flush_ceiling);
- }
-
- if (full) {
- if (local) {
- _tlbiel_pid(pid, RIC_FLUSH_TLB);
+ type = flush_type_needed(mm, fullmm);
+ if (type == FLUSH_TYPE_NONE)
+ goto out;
+
+ if (fullmm)
+ flush_pid = true;
+ else if (type == FLUSH_TYPE_GLOBAL)
+ flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+ else
+ flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
+ /*
+ * full pid flush already does the PWC flush. if it is not full pid
+ * flush check the range is more than PMD and force a pwc flush
+ * mremap() depends on this behaviour.
+ */
+ if (!flush_pid && (end - start) >= PMD_SIZE)
+ flush_pwc = true;
+
+ if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) {
+ unsigned long type = H_RPTI_TYPE_TLB;
+ unsigned long tgt = H_RPTI_TARGET_CMMU;
+ unsigned long pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
+
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
+ pg_sizes |= psize_to_rpti_pgsize(MMU_PAGE_2M);
+ if (atomic_read(&mm->context.copros) > 0)
+ tgt |= H_RPTI_TARGET_NMMU;
+ if (flush_pwc)
+ type |= H_RPTI_TYPE_PWC;
+ pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
+ } else if (flush_pid) {
+ /*
+ * We are now flushing a range larger than PMD size force a RIC_FLUSH_ALL
+ */
+ if (type == FLUSH_TYPE_LOCAL) {
+ _tlbiel_pid(pid, RIC_FLUSH_ALL);
} else {
if (cputlb_use_tlbie()) {
- if (mm_needs_flush_escalation(mm))
- _tlbie_pid(pid, RIC_FLUSH_ALL);
- else
- _tlbie_pid(pid, RIC_FLUSH_TLB);
+ _tlbie_pid(pid, RIC_FLUSH_ALL);
} else {
- _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_TLB);
+ _tlbiel_pid_multicast(mm, pid, RIC_FLUSH_ALL);
}
}
} else {
@@ -879,21 +1185,24 @@ is_local:
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
hstart = (start + PMD_SIZE - 1) & PMD_MASK;
hend = end & PMD_MASK;
- if (hstart == hend)
- hflush = false;
- else
+ if (hstart < hend)
hflush = true;
}
- if (local) {
+ if (type == FLUSH_TYPE_LOCAL) {
asm volatile("ptesync": : :"memory");
+ if (flush_pwc)
+ /* For PWC, only one flush is needed */
+ __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush)
__tlbiel_va_range(hstart, hend, pid,
PMD_SIZE, MMU_PAGE_2M);
- asm volatile("ptesync": : :"memory");
+ ppc_after_tlbiel_barrier();
} else if (cputlb_use_tlbie()) {
asm volatile("ptesync": : :"memory");
+ if (flush_pwc)
+ __tlbie_pid(pid, RIC_FLUSH_PWC);
__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush)
__tlbie_va_range(hstart, hend, pid,
@@ -901,12 +1210,13 @@ is_local:
asm volatile("eieio; tlbsync; ptesync": : :"memory");
} else {
_tlbiel_va_range_multicast(mm,
- start, end, pid, page_size, mmu_virtual_psize, false);
+ start, end, pid, page_size, mmu_virtual_psize, flush_pwc);
if (hflush)
_tlbiel_va_range_multicast(mm,
- hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, false);
+ hstart, hend, pid, PMD_SIZE, MMU_PAGE_2M, flush_pwc);
}
}
+out:
preempt_enable();
}
@@ -977,9 +1287,6 @@ void radix__flush_all_lpid_guest(unsigned int lpid)
_tlbie_lpid_guest(lpid, RIC_FLUSH_ALL);
}
-static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
- unsigned long end, int psize);
-
void radix__tlb_flush(struct mmu_gather *tlb)
{
int psize = 0;
@@ -1010,7 +1317,7 @@ void radix__tlb_flush(struct mmu_gather *tlb)
}
}
-static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
+static void __radix__flush_tlb_range_psize(struct mm_struct *mm,
unsigned long start, unsigned long end,
int psize, bool also_pwc)
{
@@ -1018,33 +1325,41 @@ static __always_inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
unsigned int page_shift = mmu_psize_defs[psize].shift;
unsigned long page_size = 1UL << page_shift;
unsigned long nr_pages = (end - start) >> page_shift;
- bool local, full;
+ bool fullmm = (end == TLB_FLUSH_ALL);
+ bool flush_pid;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
return;
+ fullmm = (end == TLB_FLUSH_ALL);
+
preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- if (end != TLB_FLUSH_ALL) {
- exit_flush_lazy_tlbs(mm);
- goto is_local;
- }
- }
- local = false;
- full = (end == TLB_FLUSH_ALL ||
- nr_pages > tlb_single_page_flush_ceiling);
- } else {
-is_local:
- local = true;
- full = (end == TLB_FLUSH_ALL ||
- nr_pages > tlb_local_single_page_flush_ceiling);
- }
-
- if (full) {
- if (local) {
+ type = flush_type_needed(mm, fullmm);
+ if (type == FLUSH_TYPE_NONE)
+ goto out;
+
+ if (fullmm)
+ flush_pid = true;
+ else if (type == FLUSH_TYPE_GLOBAL)
+ flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+ else
+ flush_pid = nr_pages > tlb_local_single_page_flush_ceiling;
+
+ if (!mmu_has_feature(MMU_FTR_GTSE) && type == FLUSH_TYPE_GLOBAL) {
+ unsigned long tgt = H_RPTI_TARGET_CMMU;
+ unsigned long type = H_RPTI_TYPE_TLB;
+ unsigned long pg_sizes = psize_to_rpti_pgsize(psize);
+
+ if (also_pwc)
+ type |= H_RPTI_TYPE_PWC;
+ if (atomic_read(&mm->context.copros) > 0)
+ tgt |= H_RPTI_TARGET_NMMU;
+ pseries_rpt_invalidate(pid, tgt, type, pg_sizes, start, end);
+ } else if (flush_pid) {
+ if (type == FLUSH_TYPE_LOCAL) {
_tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
} else {
if (cputlb_use_tlbie()) {
@@ -1060,7 +1375,7 @@ is_local:
}
} else {
- if (local)
+ if (type == FLUSH_TYPE_LOCAL)
_tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
else if (cputlb_use_tlbie())
_tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
@@ -1068,6 +1383,7 @@ is_local:
_tlbiel_va_range_multicast(mm,
start, end, pid, page_size, psize, also_pwc);
}
+out:
preempt_enable();
}
@@ -1077,8 +1393,8 @@ void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
}
-static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
- unsigned long end, int psize)
+void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
+ unsigned long end, int psize)
{
__radix__flush_tlb_range_psize(mm, start, end, psize, true);
}
@@ -1087,6 +1403,7 @@ static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long
void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
{
unsigned long pid, end;
+ enum tlb_flush_type type;
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
@@ -1103,19 +1420,27 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
/* Otherwise first do the PWC, then iterate the pages. */
preempt_disable();
smp_mb(); /* see radix__flush_tlb_mm */
- if (!mm_is_thread_local(mm)) {
- if (unlikely(mm_is_singlethreaded(mm))) {
- exit_flush_lazy_tlbs(mm);
- goto local;
- }
- if (cputlb_use_tlbie())
+ type = flush_type_needed(mm, false);
+ if (type == FLUSH_TYPE_LOCAL) {
+ _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
+ } else if (type == FLUSH_TYPE_GLOBAL) {
+ if (!mmu_has_feature(MMU_FTR_GTSE)) {
+ unsigned long tgt, type, pg_sizes;
+
+ tgt = H_RPTI_TARGET_CMMU;
+ type = H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC |
+ H_RPTI_TYPE_PRT;
+ pg_sizes = psize_to_rpti_pgsize(mmu_virtual_psize);
+
+ if (atomic_read(&mm->context.copros) > 0)
+ tgt |= H_RPTI_TARGET_NMMU;
+ pseries_rpt_invalidate(pid, tgt, type, pg_sizes,
+ addr, end);
+ } else if (cputlb_use_tlbie())
_tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
else
_tlbiel_va_range_multicast(mm,
addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
- } else {
-local:
- _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
}
preempt_enable();
@@ -1154,47 +1479,68 @@ void radix__flush_tlb_all(void)
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
+/*
+ * Performs process-scoped invalidations for a given LPID
+ * as part of H_RPT_INVALIDATE hcall.
+ */
+void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
+ unsigned long type, unsigned long pg_sizes,
+ unsigned long start, unsigned long end)
{
- unsigned long pid = mm->context.id;
+ unsigned long psize, nr_pages;
+ struct mmu_psize_def *def;
+ bool flush_pid;
- if (unlikely(pid == MMU_NO_CONTEXT))
+ /*
+ * A H_RPTI_TYPE_ALL request implies RIC=3, hence
+ * do a single IS=1 based flush.
+ */
+ if ((type & H_RPTI_TYPE_ALL) == H_RPTI_TYPE_ALL) {
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_ALL);
return;
+ }
- if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
- return;
+ if (type & H_RPTI_TYPE_PWC)
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_PWC);
- /*
- * If this context hasn't run on that CPU before and KVM is
- * around, there's a slim chance that the guest on another
- * CPU just brought in obsolete translation into the TLB of
- * this CPU due to a bad prefetch using the guest PID on
- * the way into the hypervisor.
- *
- * We work around this here. If KVM is possible, we check if
- * any sibling thread is in KVM. If it is, the window may exist
- * and thus we flush that PID from the core.
- *
- * A potential future improvement would be to mark which PIDs
- * have never been used on the system and avoid it if the PID
- * is new and the process has no other cpumask bit set.
- */
- if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
- int cpu = smp_processor_id();
- int sib = cpu_first_thread_sibling(cpu);
- bool flush = false;
-
- for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
- if (sib == cpu)
- continue;
- if (!cpu_possible(sib))
- continue;
- if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
- flush = true;
+ /* Full PID flush */
+ if (start == 0 && end == -1)
+ return _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+
+ /* Do range invalidation for all the valid page sizes */
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ def = &mmu_psize_defs[psize];
+ if (!(pg_sizes & def->h_rpt_pgsize))
+ continue;
+
+ nr_pages = (end - start) >> def->shift;
+ flush_pid = nr_pages > tlb_single_page_flush_ceiling;
+
+ /*
+ * If the number of pages spanning the range is above
+ * the ceiling, convert the request into a full PID flush.
+ * And since PID flush takes out all the page sizes, there
+ * is no need to consider remaining page sizes.
+ */
+ if (flush_pid) {
+ _tlbie_pid_lpid(pid, lpid, RIC_FLUSH_TLB);
+ return;
}
- if (flush)
- _tlbiel_pid(pid, RIC_FLUSH_ALL);
+ _tlbie_va_range_lpid(start, end, pid, lpid,
+ (1UL << def->shift), psize, false);
}
}
-EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
+EXPORT_SYMBOL_GPL(do_h_rpt_invalidate_prt);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+
+static int __init create_tlb_single_page_flush_ceiling(void)
+{
+ debugfs_create_u32("tlb_single_page_flush_ceiling", 0600,
+ arch_debugfs_dir, &tlb_single_page_flush_ceiling);
+ debugfs_create_u32("tlb_local_single_page_flush_ceiling", 0600,
+ arch_debugfs_dir, &tlb_local_single_page_flush_ceiling);
+ return 0;
+}
+late_initcall(create_tlb_single_page_flush_ceiling);
+
diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
index 716204aee3da..6956f637a38c 100644
--- a/arch/powerpc/mm/book3s64/slb.c
+++ b/arch/powerpc/mm/book3s64/slb.c
@@ -9,8 +9,7 @@
* Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
*/
-#include <asm/asm-prototypes.h>
-#include <asm/pgtable.h>
+#include <asm/interrupt.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
@@ -21,38 +20,26 @@
#include <linux/compiler.h>
#include <linux/context_tracking.h>
#include <linux/mm_types.h>
+#include <linux/pgtable.h>
#include <asm/udbg.h>
#include <asm/code-patching.h>
-enum slb_index {
- LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
- KSTACK_INDEX = 1, /* Kernel stack map */
-};
+#include "internal.h"
-static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
-#define slb_esid_mask(ssize) \
- (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
+static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
-static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
- enum slb_index index)
-{
- return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
-}
+bool stress_slb_enabled __initdata;
-static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
- unsigned long flags)
+static int __init parse_stress_slb(char *p)
{
- return (vsid << slb_vsid_shift(ssize)) | flags |
- ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
+ stress_slb_enabled = true;
+ return 0;
}
+early_param("stress_slb", parse_stress_slb);
-static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
- unsigned long flags)
-{
- return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
-}
+__ro_after_init DEFINE_STATIC_KEY_FALSE(stress_slb_key);
static void assert_slb_presence(bool present, unsigned long ea)
{
@@ -68,7 +55,7 @@ static void assert_slb_presence(bool present, unsigned long ea)
* slbfee. requires bit 24 (PPC bit 39) be clear in RB. Hardware
* ignores all other bits from 0-27, so just clear them all.
*/
- ea &= ~((1UL << 28) - 1);
+ ea &= ~((1UL << SID_SHIFT) - 1);
asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
WARN_ON(present == (tmp == 0));
@@ -153,14 +140,42 @@ void slb_flush_all_realmode(void)
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
}
+static __always_inline void __slb_flush_and_restore_bolted(bool preserve_kernel_lookaside)
+{
+ struct slb_shadow *p = get_slb_shadow();
+ unsigned long ksp_esid_data, ksp_vsid_data;
+ u32 ih;
+
+ /*
+ * SLBIA IH=1 on ISA v2.05 and newer processors may preserve lookaside
+ * information created with Class=0 entries, which we use for kernel
+ * SLB entries (the SLB entries themselves are still invalidated).
+ *
+ * Older processors will ignore this optimisation. Over-invalidation
+ * is fine because we never rely on lookaside information existing.
+ */
+ if (preserve_kernel_lookaside)
+ ih = 1;
+ else
+ ih = 0;
+
+ ksp_esid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
+ ksp_vsid_data = be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
+
+ asm volatile(PPC_SLBIA(%0)" \n"
+ "slbmte %1, %2 \n"
+ :: "i" (ih),
+ "r" (ksp_vsid_data),
+ "r" (ksp_esid_data)
+ : "memory");
+}
+
/*
* This flushes non-bolted entries, it can be run in virtual mode. Must
* be called with interrupts disabled.
*/
void slb_flush_and_restore_bolted(void)
{
- struct slb_shadow *p = get_slb_shadow();
-
BUILD_BUG_ON(SLB_NUM_BOLTED != 2);
WARN_ON(!irqs_disabled());
@@ -171,13 +186,10 @@ void slb_flush_and_restore_bolted(void)
*/
hard_irq_disable();
- asm volatile("isync\n"
- "slbia\n"
- "slbmte %0, %1\n"
- "isync\n"
- :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
- "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
- : "memory");
+ isync();
+ __slb_flush_and_restore_bolted(false);
+ isync();
+
assert_slb_presence(true, get_paca()->kstack);
get_paca()->slb_cache_ptr = 0;
@@ -216,7 +228,6 @@ void slb_dump_contents(struct slb_entry *slb_ptr)
return;
pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
- pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr);
for (i = 0; i < mmu_slb_size; i++) {
e = slb_ptr->esid;
@@ -226,34 +237,38 @@ void slb_dump_contents(struct slb_entry *slb_ptr)
if (!e && !v)
continue;
- pr_err("%02d %016lx %016lx\n", i, e, v);
+ pr_err("%02d %016lx %016lx %s\n", i, e, v,
+ (e & SLB_ESID_V) ? "VALID" : "NOT VALID");
- if (!(e & SLB_ESID_V)) {
- pr_err("\n");
+ if (!(e & SLB_ESID_V))
continue;
- }
+
llp = v & SLB_VSID_LLP;
if (v & SLB_VSID_B_1T) {
- pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n",
+ pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n",
GET_ESID_1T(e),
(v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp);
} else {
- pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n",
+ pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n",
GET_ESID(e),
(v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp);
}
}
- pr_err("----------------------------------\n");
-
- /* Dump slb cache entires as well. */
- pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
- pr_err("Valid SLB cache entries:\n");
- n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
- for (i = 0; i < n; i++)
- pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
- pr_err("Rest of SLB cache entries:\n");
- for (i = n; i < SLB_CACHE_ENTRIES; i++)
- pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
+
+ if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /* RR is not so useful as it's often not used for allocation */
+ pr_err("SLB RR allocator index %d\n", get_paca()->stab_rr);
+
+ /* Dump slb cache entires as well. */
+ pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
+ pr_err("Valid SLB cache entries:\n");
+ n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
+ for (i = 0; i < n; i++)
+ pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
+ pr_err("Rest of SLB cache entries:\n");
+ for (i = n; i < SLB_CACHE_ENTRIES; i++)
+ pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
+ }
}
void slb_vmalloc_update(void)
@@ -332,7 +347,7 @@ void slb_setup_new_exec(void)
/*
* We have no good place to clear the slb preload cache on exec,
* flush_thread is about the earliest arch hook but that happens
- * after we switch to the mm and have aleady preloaded the SLBEs.
+ * after we switch to the mm and have already preloaded the SLBEs.
*
* For the most part that's probably okay to use entries from the
* previous exec, they will age out if unused. It may turn out to
@@ -400,6 +415,30 @@ void preload_new_slb_context(unsigned long start, unsigned long sp)
local_irq_enable();
}
+static void slb_cache_slbie_kernel(unsigned int index)
+{
+ unsigned long slbie_data = get_paca()->slb_cache[index];
+ unsigned long ksp = get_paca()->kstack;
+
+ slbie_data <<= SID_SHIFT;
+ slbie_data |= 0xc000000000000000ULL;
+ if ((ksp & slb_esid_mask(mmu_kernel_ssize)) == slbie_data)
+ return;
+ slbie_data |= mmu_kernel_ssize << SLBIE_SSIZE_SHIFT;
+
+ asm volatile("slbie %0" : : "r" (slbie_data));
+}
+
+static void slb_cache_slbie_user(unsigned int index)
+{
+ unsigned long slbie_data = get_paca()->slb_cache[index];
+
+ slbie_data <<= SID_SHIFT;
+ slbie_data |= user_segment_size(slbie_data) << SLBIE_SSIZE_SHIFT;
+ slbie_data |= SLBIE_C; /* user slbs have C=1 */
+
+ asm volatile("slbie %0" : : "r" (slbie_data));
+}
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
@@ -414,8 +453,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
* which would update the slb_cache/slb_cache_ptr fields in the PACA.
*/
hard_irq_disable();
- asm volatile("isync" : : : "memory");
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ isync();
+ if (stress_slb()) {
+ __slb_flush_and_restore_bolted(false);
+ isync();
+ get_paca()->slb_cache_ptr = 0;
+ get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
+
+ } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
* SLBIA IH=3 invalidates all Class=1 SLBEs and their
* associated lookaside structures, which matches what
@@ -423,47 +468,29 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
* cache.
*/
asm volatile(PPC_SLBIA(3));
+
} else {
unsigned long offset = get_paca()->slb_cache_ptr;
if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
offset <= SLB_CACHE_ENTRIES) {
- unsigned long slbie_data = 0;
-
- for (i = 0; i < offset; i++) {
- unsigned long ea;
-
- ea = (unsigned long)
- get_paca()->slb_cache[i] << SID_SHIFT;
- /*
- * Could assert_slb_presence(true) here, but
- * hypervisor or machine check could have come
- * in and removed the entry at this point.
- */
-
- slbie_data = ea;
- slbie_data |= user_segment_size(slbie_data)
- << SLBIE_SSIZE_SHIFT;
- slbie_data |= SLBIE_C; /* user slbs have C=1 */
- asm volatile("slbie %0" : : "r" (slbie_data));
- }
+ /*
+ * Could assert_slb_presence(true) here, but
+ * hypervisor or machine check could have come
+ * in and removed the entry at this point.
+ */
+
+ for (i = 0; i < offset; i++)
+ slb_cache_slbie_user(i);
/* Workaround POWER5 < DD2.1 issue */
if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1)
- asm volatile("slbie %0" : : "r" (slbie_data));
+ slb_cache_slbie_user(0);
} else {
- struct slb_shadow *p = get_slb_shadow();
- unsigned long ksp_esid_data =
- be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
- unsigned long ksp_vsid_data =
- be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
-
- asm volatile(PPC_SLBIA(1) "\n"
- "slbmte %0,%1\n"
- "isync"
- :: "r"(ksp_vsid_data),
- "r"(ksp_esid_data));
+ /* Flush but retain kernel lookaside information */
+ __slb_flush_and_restore_bolted(true);
+ isync();
get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
}
@@ -503,7 +530,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
* address accesses by the kernel (user mode won't happen until
* rfid, which is safe).
*/
- asm volatile("isync" : : : "memory");
+ isync();
}
void slb_set_size(u16 size)
@@ -571,6 +598,9 @@ static void slb_cache_update(unsigned long esid_data)
if (cpu_has_feature(CPU_FTR_ARCH_300))
return; /* ISAv3.0B and later does not use slb_cache */
+ if (stress_slb())
+ return;
+
/*
* Now update slb cache entries
*/
@@ -580,12 +610,12 @@ static void slb_cache_update(unsigned long esid_data)
* We have space in slb cache for optimized switch_slb().
* Top 36 bits from esid_data as per ISA
*/
- local_paca->slb_cache[slb_cache_index++] = esid_data >> 28;
+ local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
local_paca->slb_cache_ptr++;
} else {
/*
* Our cache is full and the current cache content strictly
- * doesn't indicate the active SLB conents. Bump the ptr
+ * doesn't indicate the active SLB contents. Bump the ptr
* so that switch_slb() will ignore the cache.
*/
local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
@@ -671,6 +701,28 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
* accesses user memory before it returns to userspace with rfid.
*/
assert_slb_presence(false, ea);
+ if (stress_slb()) {
+ int slb_cache_index = local_paca->slb_cache_ptr;
+
+ /*
+ * stress_slb() does not use slb cache, repurpose as a
+ * cache of inserted (non-bolted) kernel SLB entries. All
+ * non-bolted kernel entries are flushed on any user fault,
+ * or if there are already 3 non-boled kernel entries.
+ */
+ BUILD_BUG_ON(SLB_CACHE_ENTRIES < 3);
+ if (!kernel || slb_cache_index == 3) {
+ int i;
+
+ for (i = 0; i < slb_cache_index; i++)
+ slb_cache_slbie_kernel(i);
+ slb_cache_index = 0;
+ }
+
+ if (kernel)
+ local_paca->slb_cache[slb_cache_index++] = esid_data >> SID_SHIFT;
+ local_paca->slb_cache_ptr = slb_cache_index;
+ }
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
barrier();
@@ -689,8 +741,8 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
if (id == LINEAR_MAP_REGION_ID) {
- /* We only support upto MAX_PHYSMEM_BITS */
- if ((ea & EA_MASK) > (1UL << MAX_PHYSMEM_BITS))
+ /* We only support upto H_MAX_PHYSMEM_BITS */
+ if ((ea & EA_MASK) > (1UL << H_MAX_PHYSMEM_BITS))
return -EFAULT;
flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
@@ -761,30 +813,33 @@ static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
return slb_insert_entry(ea, context, flags, ssize, false);
}
-long do_slb_fault(struct pt_regs *regs, unsigned long ea)
+DEFINE_INTERRUPT_HANDLER_RAW(do_slb_fault)
{
+ unsigned long ea = regs->dar;
unsigned long id = get_region_id(ea);
/* IRQs are not reconciled here, so can't check irqs_disabled */
VM_WARN_ON(mfmsr() & MSR_EE);
- if (unlikely(!(regs->msr & MSR_RI)))
+ if (regs_is_unrecoverable(regs))
return -EINVAL;
/*
- * SLB kernel faults must be very careful not to touch anything
- * that is not bolted. E.g., PACA and global variables are okay,
- * mm->context stuff is not.
- *
- * SLB user faults can access all of kernel memory, but must be
- * careful not to touch things like IRQ state because it is not
- * "reconciled" here. The difficulty is that we must use
- * fast_exception_return to return from kernel SLB faults without
- * looking at possible non-bolted memory. We could test user vs
- * kernel faults in the interrupt handler asm and do a full fault,
- * reconcile, ret_from_except for user faults which would make them
- * first class kernel code. But for performance it's probably nicer
- * if they go via fast_exception_return too.
+ * SLB kernel faults must be very careful not to touch anything that is
+ * not bolted. E.g., PACA and global variables are okay, mm->context
+ * stuff is not. SLB user faults may access all of memory (and induce
+ * one recursive SLB kernel fault), so the kernel fault must not
+ * trample on the user fault state at those points.
+ */
+
+ /*
+ * This is a raw interrupt handler, for performance, so that
+ * fast_interrupt_return can be used. The handler must not touch local
+ * irq state, or schedule. We could test for usermode and upgrade to a
+ * normal process context (synchronous) interrupt for those, which
+ * would make them first-class kernel code and able to be traced and
+ * instrumented, although performance would suffer a bit, it would
+ * probably be a good tradeoff.
*/
if (id >= LINEAR_MAP_REGION_ID) {
long err;
@@ -812,17 +867,3 @@ long do_slb_fault(struct pt_regs *regs, unsigned long ea)
return err;
}
}
-
-void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err)
-{
- if (err == -EFAULT) {
- if (user_mode(regs))
- _exception(SIGSEGV, regs, SEGV_BNDERR, ea);
- else
- bad_page_fault(regs, ea, SIGSEGV);
- } else if (err == -EINVAL) {
- unrecoverable_exception(regs);
- } else {
- BUG();
- }
-}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/book3s64/slice.c
index dffe1a45b6ed..c0b58afb9a47 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/book3s64/slice.c
@@ -276,20 +276,18 @@ static bool slice_scan_available(unsigned long addr,
}
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
- unsigned long len,
+ unsigned long addr, unsigned long len,
const struct slice_mask *available,
int psize, unsigned long high_limit)
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
- unsigned long addr, found, next_end;
+ unsigned long found, next_end;
struct vm_unmapped_area_info info;
info.flags = 0;
info.length = len;
info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
info.align_offset = 0;
-
- addr = TASK_UNMAPPED_BASE;
/*
* Check till the allow max value for this mmap request
*/
@@ -322,12 +320,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
}
static unsigned long slice_find_area_topdown(struct mm_struct *mm,
- unsigned long len,
+ unsigned long addr, unsigned long len,
const struct slice_mask *available,
int psize, unsigned long high_limit)
{
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
- unsigned long addr, found, prev;
+ unsigned long found, prev;
struct vm_unmapped_area_info info;
unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
@@ -335,8 +333,6 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
info.length = len;
info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
info.align_offset = 0;
-
- addr = mm->mmap_base;
/*
* If we are trying to allocate above DEFAULT_MAP_WINDOW
* Add the different to the mmap_base.
@@ -377,7 +373,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* can happen with large stack limits and large mmap()
* allocations.
*/
- return slice_find_area_bottomup(mm, len, available, psize, high_limit);
+ return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit);
}
@@ -386,9 +382,9 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
int topdown, unsigned long high_limit)
{
if (topdown)
- return slice_find_area_topdown(mm, len, mask, psize, high_limit);
+ return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit);
else
- return slice_find_area_bottomup(mm, len, mask, psize, high_limit);
+ return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit);
}
static inline void slice_copy_mask(struct slice_mask *dst,
@@ -478,7 +474,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* If hint, make sure it matches our alignment restrictions */
if (!fixed && addr) {
- addr = _ALIGN_UP(addr, page_size);
+ addr = ALIGN(addr, page_size);
slice_dbg(" aligned addr=%lx\n", addr);
/* Ignore hint if it's too large or overlaps a VMA */
if (addr > high_limit - len || addr < mmap_min_addr ||
@@ -645,6 +641,9 @@ unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long pgoff,
unsigned long flags)
{
+ if (radix_enabled())
+ return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+
return slice_get_unmapped_area(addr, len, flags,
mm_ctx_user_psize(&current->mm->context), 0);
}
@@ -655,6 +654,9 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long pgoff,
const unsigned long flags)
{
+ if (radix_enabled())
+ return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
+
return slice_get_unmapped_area(addr0, len, flags,
mm_ctx_user_psize(&current->mm->context), 1);
}
@@ -712,7 +714,6 @@ void slice_init_new_context_exec(struct mm_struct *mm)
bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
}
-#ifdef CONFIG_PPC_BOOK3S_64
void slice_setup_new_exec(void)
{
struct mm_struct *mm = current->mm;
@@ -724,7 +725,6 @@ void slice_setup_new_exec(void)
mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
}
-#endif
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize)
@@ -779,4 +779,29 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
return !slice_check_range_fits(mm, maskp, addr, len);
}
+
+unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
+{
+ /* With radix we don't use slice, so derive it from vma*/
+ if (radix_enabled())
+ return vma_kernel_pagesize(vma);
+
+ return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
+}
+
+static int file_to_psize(struct file *file)
+{
+ struct hstate *hstate = hstate_file(file);
+ return shift_to_mmu_psize(huge_page_shift(hstate));
+}
+
+unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ if (radix_enabled())
+ return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
+
+ return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
+}
#endif
diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c
index 2ef24a53f4c9..d73b3b4176e8 100644
--- a/arch/powerpc/mm/book3s64/subpage_prot.c
+++ b/arch/powerpc/mm/book3s64/subpage_prot.c
@@ -11,7 +11,7 @@
#include <linux/hugetlb.h>
#include <linux/syscalls.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include <linux/uaccess.h>
/*
@@ -54,15 +54,17 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
int npages)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
pgd = pgd_offset(mm, addr);
- if (pgd_none(*pgd))
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_none(*p4d))
return;
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
if (pud_none(*pud))
return;
pmd = pmd_offset(pud, addr);
@@ -92,7 +94,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
size_t nw;
unsigned long next, limit;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
spt = mm_ctx_subpage_prot(&mm->context);
if (!spt)
@@ -127,7 +129,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
}
err_out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -147,24 +149,15 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
unsigned long len)
{
struct vm_area_struct *vma;
+ VMA_ITERATOR(vmi, mm, addr);
/*
* We don't try too hard, we just mark all the vma in that range
* VM_NOHUGEPAGE and split them.
*/
- vma = find_vma(mm, addr);
- /*
- * If the range is in unmapped range, just return
- */
- if (vma && ((addr + len) <= vma->vm_start))
- return;
-
- while (vma) {
- if (vma->vm_start >= (addr + len))
- break;
+ for_each_vma_range(vmi, vma, addr + len) {
vma->vm_flags |= VM_NOHUGEPAGE;
walk_page_vma(vma, &subpage_walk_ops, NULL);
- vma = vma->vm_next;
}
}
#else
@@ -217,13 +210,13 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
return -EFAULT;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
spt = mm_ctx_subpage_prot(&mm->context);
if (!spt) {
/*
* Allocate subpage prot table if not already done.
- * Do this with mmap_sem held
+ * Do this with mmap_lock held
*/
spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
if (!spt) {
@@ -267,11 +260,11 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
if (addr + (nw << PAGE_SHIFT) > next)
nw = (next - addr) >> PAGE_SHIFT;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (__copy_from_user(spp, map, nw * sizeof(u32)))
return -EFAULT;
map += nw;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/* now flush any existing HPTEs for the range */
hpte_flush_range(mm, addr, nw);
@@ -280,6 +273,6 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
spt->maxaddr = limit;
err = 0;
out:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return err;
}
diff --git a/arch/powerpc/mm/book3s64/trace.c b/arch/powerpc/mm/book3s64/trace.c
new file mode 100644
index 000000000000..ccd64b5e6cac
--- /dev/null
+++ b/arch/powerpc/mm/book3s64/trace.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file is for defining trace points and trace related helpers.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#include <trace/events/thp.h>
+#endif
diff --git a/arch/powerpc/mm/cacheflush.c b/arch/powerpc/mm/cacheflush.c
new file mode 100644
index 000000000000..0e9b4879c0f9
--- /dev/null
+++ b/arch/powerpc/mm/cacheflush.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/highmem.h>
+#include <linux/kprobes.h>
+
+/**
+ * flush_coherent_icache() - if a CPU has a coherent icache, flush it
+ * Return true if the cache was flushed, false otherwise
+ */
+static inline bool flush_coherent_icache(void)
+{
+ /*
+ * For a snooping icache, we still need a dummy icbi to purge all the
+ * prefetched instructions from the ifetch buffers. We also need a sync
+ * before the icbi to order the actual stores to memory that might
+ * have modified instructions with the icbi.
+ */
+ if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
+ mb(); /* sync */
+ icbi((void *)PAGE_OFFSET);
+ mb(); /* sync */
+ isync();
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
+ * @start: the start address
+ * @stop: the stop address (exclusive)
+ */
+static void invalidate_icache_range(unsigned long start, unsigned long stop)
+{
+ unsigned long shift = l1_icache_shift();
+ unsigned long bytes = l1_icache_bytes();
+ char *addr = (char *)(start & ~(bytes - 1));
+ unsigned long size = stop - (unsigned long)addr + (bytes - 1);
+ unsigned long i;
+
+ for (i = 0; i < size >> shift; i++, addr += bytes)
+ icbi(addr);
+
+ mb(); /* sync */
+ isync();
+}
+
+/**
+ * flush_icache_range: Write any modified data cache blocks out to memory
+ * and invalidate the corresponding blocks in the instruction cache
+ *
+ * Generic code will call this after writing memory, before executing from it.
+ *
+ * @start: the start address
+ * @stop: the stop address (exclusive)
+ */
+void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ if (flush_coherent_icache())
+ return;
+
+ clean_dcache_range(start, stop);
+
+ if (IS_ENABLED(CONFIG_44x)) {
+ /*
+ * Flash invalidate on 44x because we are passed kmapped
+ * addresses and this doesn't work for userspace pages due to
+ * the virtually tagged icache.
+ */
+ iccci((void *)start);
+ mb(); /* sync */
+ isync();
+ } else
+ invalidate_icache_range(start, stop);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+#ifdef CONFIG_HIGHMEM
+/**
+ * flush_dcache_icache_phys() - Flush a page by it's physical address
+ * @physaddr: the physical address of the page
+ */
+static void flush_dcache_icache_phys(unsigned long physaddr)
+{
+ unsigned long bytes = l1_dcache_bytes();
+ unsigned long nb = PAGE_SIZE / bytes;
+ unsigned long addr = physaddr & PAGE_MASK;
+ unsigned long msr, msr0;
+ unsigned long loop1 = addr, loop2 = addr;
+
+ msr0 = mfmsr();
+ msr = msr0 & ~MSR_DR;
+ /*
+ * This must remain as ASM to prevent potential memory accesses
+ * while the data MMU is disabled
+ */
+ asm volatile(
+ " mtctr %2;\n"
+ " mtmsr %3;\n"
+ " isync;\n"
+ "0: dcbst 0, %0;\n"
+ " addi %0, %0, %4;\n"
+ " bdnz 0b;\n"
+ " sync;\n"
+ " mtctr %2;\n"
+ "1: icbi 0, %1;\n"
+ " addi %1, %1, %4;\n"
+ " bdnz 1b;\n"
+ " sync;\n"
+ " mtmsr %5;\n"
+ " isync;\n"
+ : "+&r" (loop1), "+&r" (loop2)
+ : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
+ : "ctr", "memory");
+}
+NOKPROBE_SYMBOL(flush_dcache_icache_phys)
+#else
+static void flush_dcache_icache_phys(unsigned long physaddr)
+{
+}
+#endif
+
+/**
+ * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
+ * Note: this is necessary because the instruction cache does *not*
+ * snoop from the data cache.
+ *
+ * @p: the address of the page to flush
+ */
+static void __flush_dcache_icache(void *p)
+{
+ unsigned long addr = (unsigned long)p & PAGE_MASK;
+
+ clean_dcache_range(addr, addr + PAGE_SIZE);
+
+ /*
+ * We don't flush the icache on 44x. Those have a virtual icache and we
+ * don't have access to the virtual address here (it's not the page
+ * vaddr but where it's mapped in user space). The flushing of the
+ * icache on these is handled elsewhere, when a change in the address
+ * space occurs, before returning to user space.
+ */
+
+ if (mmu_has_feature(MMU_FTR_TYPE_44x))
+ return;
+
+ invalidate_icache_range(addr, addr + PAGE_SIZE);
+}
+
+static void flush_dcache_icache_hugepage(struct page *page)
+{
+ int i;
+ int nr = compound_nr(page);
+
+ if (!PageHighMem(page)) {
+ for (i = 0; i < nr; i++)
+ __flush_dcache_icache(lowmem_page_address(page + i));
+ } else {
+ for (i = 0; i < nr; i++) {
+ void *start = kmap_local_page(page + i);
+
+ __flush_dcache_icache(start);
+ kunmap_local(start);
+ }
+ }
+}
+
+void flush_dcache_icache_page(struct page *page)
+{
+ if (flush_coherent_icache())
+ return;
+
+ if (PageCompound(page))
+ return flush_dcache_icache_hugepage(page);
+
+ if (!PageHighMem(page)) {
+ __flush_dcache_icache(lowmem_page_address(page));
+ } else if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
+ void *start = kmap_local_page(page);
+
+ __flush_dcache_icache(start);
+ kunmap_local(start);
+ } else {
+ flush_dcache_icache_phys(page_to_phys(page));
+ }
+}
+EXPORT_SYMBOL(flush_dcache_icache_page);
+
+void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
+{
+ clear_page(page);
+
+ /*
+ * We shouldn't have to do this, but some versions of glibc
+ * require it (ld.so assumes zero filled pages are icache clean)
+ * - Anton
+ */
+ flush_dcache_page(pg);
+}
+EXPORT_SYMBOL(clear_user_page);
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+ struct page *pg)
+{
+ copy_page(vto, vfrom);
+
+ /*
+ * We should be able to use the following optimisation, however
+ * there are two problems.
+ * Firstly a bug in some versions of binutils meant PLT sections
+ * were not marked executable.
+ * Secondly the first word in the GOT section is blrl, used
+ * to establish the GOT address. Until recently the GOT was
+ * not marked executable.
+ * - Anton
+ */
+#if 0
+ if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
+ return;
+#endif
+
+ flush_dcache_page(pg);
+}
+
+void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len)
+{
+ void *maddr;
+
+ maddr = kmap_local_page(page) + (addr & ~PAGE_MASK);
+ flush_icache_range((unsigned long)maddr, (unsigned long)maddr + len);
+ kunmap_local(maddr);
+}
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index beb060b96632..7c507fb48182 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -33,7 +33,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (mm->pgd == NULL)
return -EFAULT;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = -EFAULT;
vma = find_vma(mm, ea);
if (!vma)
@@ -64,7 +64,12 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
}
ret = 0;
- *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
+ *flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0, NULL);
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (*flt & VM_FAULT_COMPLETED)
+ return 0;
+
if (unlikely(*flt & VM_FAULT_ERROR)) {
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
@@ -76,17 +81,13 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
BUG();
}
- if (*flt & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
-
out_unlock:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return ret;
}
EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
+#ifdef CONFIG_PPC_64S_HASH_MMU
int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
{
u64 vsid, vsidkey;
@@ -151,3 +152,4 @@ void copro_flush_all_slbs(struct mm_struct *mm)
cxl_slbia(mm);
}
EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
+#endif
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 5ab4f868e919..30260b5d146d 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -11,7 +11,7 @@
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/dma-direct.h>
-#include <linux/dma-noncoherent.h>
+#include <linux/dma-map-ops.h>
#include <asm/tlbflush.h>
#include <asm/dma.h>
diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c
index 59327cefbc6a..2369d1bf2411 100644
--- a/arch/powerpc/mm/drmem.c
+++ b/arch/powerpc/mm/drmem.c
@@ -11,11 +11,14 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/memblock.h>
-#include <asm/prom.h>
+#include <linux/slab.h>
#include <asm/drmem.h>
+static int n_root_addr_cells, n_root_size_cells;
+
static struct drmem_lmb_info __drmem_info;
struct drmem_lmb_info *drmem_info = &__drmem_info;
+static bool in_drmem_update;
u64 drmem_lmb_memory_max(void)
{
@@ -176,6 +179,11 @@ int drmem_update_dt(void)
if (!memory)
return -1;
+ /*
+ * Set in_drmem_update to prevent the notifier callback to process the
+ * DT property back since the change is coming from the LMB tree.
+ */
+ in_drmem_update = true;
prop = of_find_property(memory, "ibm,dynamic-memory", NULL);
if (prop) {
rc = drmem_update_dt_v1(memory, prop);
@@ -184,17 +192,19 @@ int drmem_update_dt(void)
if (prop)
rc = drmem_update_dt_v2(memory, prop);
}
+ in_drmem_update = false;
of_node_put(memory);
return rc;
}
-static void __init read_drconf_v1_cell(struct drmem_lmb *lmb,
+static void read_drconf_v1_cell(struct drmem_lmb *lmb,
const __be32 **prop)
{
const __be32 *p = *prop;
- lmb->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
+ lmb->base_addr = of_read_number(p, n_root_addr_cells);
+ p += n_root_addr_cells;
lmb->drc_index = of_read_number(p++, 1);
p++; /* skip reserved field */
@@ -205,29 +215,33 @@ static void __init read_drconf_v1_cell(struct drmem_lmb *lmb,
*prop = p;
}
-static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
- void (*func)(struct drmem_lmb *, const __be32 **))
+static int
+__walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm, void *data,
+ int (*func)(struct drmem_lmb *, const __be32 **, void *))
{
struct drmem_lmb lmb;
u32 i, n_lmbs;
+ int ret = 0;
n_lmbs = of_read_number(prop++, 1);
- if (n_lmbs == 0)
- return;
-
for (i = 0; i < n_lmbs; i++) {
read_drconf_v1_cell(&lmb, &prop);
- func(&lmb, &usm);
+ ret = func(&lmb, &usm, data);
+ if (ret)
+ break;
}
+
+ return ret;
}
-static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
+static void read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
const __be32 **prop)
{
const __be32 *p = *prop;
dr_cell->seq_lmbs = of_read_number(p++, 1);
- dr_cell->base_addr = dt_mem_next_cell(dt_root_addr_cells, &p);
+ dr_cell->base_addr = of_read_number(p, n_root_addr_cells);
+ p += n_root_addr_cells;
dr_cell->drc_index = of_read_number(p++, 1);
dr_cell->aa_index = of_read_number(p++, 1);
dr_cell->flags = of_read_number(p++, 1);
@@ -235,17 +249,16 @@ static void __init read_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
*prop = p;
}
-static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
- void (*func)(struct drmem_lmb *, const __be32 **))
+static int
+__walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm, void *data,
+ int (*func)(struct drmem_lmb *, const __be32 **, void *))
{
struct of_drconf_cell_v2 dr_cell;
struct drmem_lmb lmb;
u32 i, j, lmb_sets;
+ int ret = 0;
lmb_sets = of_read_number(prop++, 1);
- if (lmb_sets == 0)
- return;
-
for (i = 0; i < lmb_sets; i++) {
read_drconf_v2_cell(&dr_cell, &prop);
@@ -259,21 +272,29 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
lmb.aa_index = dr_cell.aa_index;
lmb.flags = dr_cell.flags;
- func(&lmb, &usm);
+ ret = func(&lmb, &usm, data);
+ if (ret)
+ break;
}
}
+
+ return ret;
}
#ifdef CONFIG_PPC_PSERIES
-void __init walk_drmem_lmbs_early(unsigned long node,
- void (*func)(struct drmem_lmb *, const __be32 **))
+int __init walk_drmem_lmbs_early(unsigned long node, void *data,
+ int (*func)(struct drmem_lmb *, const __be32 **, void *))
{
const __be32 *prop, *usm;
- int len;
+ int len, ret = -ENODEV;
prop = of_get_flat_dt_prop(node, "ibm,lmb-size", &len);
if (!prop || len < dt_root_size_cells * sizeof(__be32))
- return;
+ return ret;
+
+ /* Get the address & size cells */
+ n_root_addr_cells = dt_root_addr_cells;
+ n_root_size_cells = dt_root_size_cells;
drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
@@ -281,20 +302,60 @@ void __init walk_drmem_lmbs_early(unsigned long node,
prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory", &len);
if (prop) {
- __walk_drmem_v1_lmbs(prop, usm, func);
+ ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
} else {
prop = of_get_flat_dt_prop(node, "ibm,dynamic-memory-v2",
&len);
if (prop)
- __walk_drmem_v2_lmbs(prop, usm, func);
+ ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
}
memblock_dump_all();
+ return ret;
}
+/*
+ * Update the LMB associativity index.
+ */
+static int update_lmb(struct drmem_lmb *updated_lmb,
+ __maybe_unused const __be32 **usm,
+ __maybe_unused void *data)
+{
+ struct drmem_lmb *lmb;
+
+ for_each_drmem_lmb(lmb) {
+ if (lmb->drc_index != updated_lmb->drc_index)
+ continue;
+
+ lmb->aa_index = updated_lmb->aa_index;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Update the LMB associativity index.
+ *
+ * This needs to be called when the hypervisor is updating the
+ * dynamic-reconfiguration-memory node property.
+ */
+void drmem_update_lmbs(struct property *prop)
+{
+ /*
+ * Don't update the LMBs if triggered by the update done in
+ * drmem_update_dt(), the LMB values have been used to the update the DT
+ * property in that case.
+ */
+ if (in_drmem_update)
+ return;
+ if (!strcmp(prop->name, "ibm,dynamic-memory"))
+ __walk_drmem_v1_lmbs(prop->value, NULL, NULL, update_lmb);
+ else if (!strcmp(prop->name, "ibm,dynamic-memory-v2"))
+ __walk_drmem_v2_lmbs(prop->value, NULL, NULL, update_lmb);
+}
#endif
-static int __init init_drmem_lmb_size(struct device_node *dn)
+static int init_drmem_lmb_size(struct device_node *dn)
{
const __be32 *prop;
int len;
@@ -303,12 +364,12 @@ static int __init init_drmem_lmb_size(struct device_node *dn)
return 0;
prop = of_get_property(dn, "ibm,lmb-size", &len);
- if (!prop || len < dt_root_size_cells * sizeof(__be32)) {
+ if (!prop || len < n_root_size_cells * sizeof(__be32)) {
pr_info("Could not determine LMB size\n");
return -1;
}
- drmem_info->lmb_size = dt_mem_next_cell(dt_root_size_cells, &prop);
+ drmem_info->lmb_size = of_read_number(prop, n_root_size_cells);
return 0;
}
@@ -329,24 +390,36 @@ static const __be32 *of_get_usable_memory(struct device_node *dn)
return prop;
}
-void __init walk_drmem_lmbs(struct device_node *dn,
- void (*func)(struct drmem_lmb *, const __be32 **))
+int walk_drmem_lmbs(struct device_node *dn, void *data,
+ int (*func)(struct drmem_lmb *, const __be32 **, void *))
{
const __be32 *prop, *usm;
+ int ret = -ENODEV;
+
+ if (!of_root)
+ return ret;
+
+ /* Get the address & size cells */
+ of_node_get(of_root);
+ n_root_addr_cells = of_n_addr_cells(of_root);
+ n_root_size_cells = of_n_size_cells(of_root);
+ of_node_put(of_root);
if (init_drmem_lmb_size(dn))
- return;
+ return ret;
usm = of_get_usable_memory(dn);
prop = of_get_property(dn, "ibm,dynamic-memory", NULL);
if (prop) {
- __walk_drmem_v1_lmbs(prop, usm, func);
+ ret = __walk_drmem_v1_lmbs(prop, usm, data, func);
} else {
prop = of_get_property(dn, "ibm,dynamic-memory-v2", NULL);
if (prop)
- __walk_drmem_v2_lmbs(prop, usm, func);
+ ret = __walk_drmem_v2_lmbs(prop, usm, data, func);
}
+
+ return ret;
}
static void __init init_drmem_v1_lmbs(const __be32 *prop)
@@ -362,10 +435,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
if (!drmem_info->lmbs)
return;
- for_each_drmem_lmb(lmb) {
+ for_each_drmem_lmb(lmb)
read_drconf_v1_cell(lmb, &prop);
- lmb_set_nid(lmb);
- }
}
static void __init init_drmem_v2_lmbs(const __be32 *prop)
@@ -410,8 +481,6 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
lmb->aa_index = dr_cell.aa_index;
lmb->flags = dr_cell.flags;
-
- lmb_set_nid(lmb);
}
}
}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 8db0507619e2..2bef19cc1b98 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -32,49 +32,20 @@
#include <linux/context_tracking.h>
#include <linux/hugetlb.h>
#include <linux/uaccess.h>
+#include <linux/kfence.h>
+#include <linux/pkeys.h>
#include <asm/firmware.h>
+#include <asm/interrupt.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/siginfo.h>
#include <asm/debug.h>
#include <asm/kup.h>
+#include <asm/inst.h>
+
-/*
- * Check whether the instruction inst is a store using
- * an update addressing form which will update r1.
- */
-static bool store_updates_sp(unsigned int inst)
-{
- /* check for 1 in the rA field */
- if (((inst >> 16) & 0x1f) != 1)
- return false;
- /* check major opcode */
- switch (inst >> 26) {
- case OP_STWU:
- case OP_STBU:
- case OP_STHU:
- case OP_STFSU:
- case OP_STFDU:
- return true;
- case OP_STD: /* std or stdu */
- return (inst & 3) == 1;
- case OP_31:
- /* check minor opcode */
- switch ((inst >> 1) & 0x3ff) {
- case OP_31_XOP_STDUX:
- case OP_31_XOP_STWUX:
- case OP_31_XOP_STBUX:
- case OP_31_XOP_STHUX:
- case OP_31_XOP_STFSUX:
- case OP_31_XOP_STFDUX:
- return true;
- }
- }
- return false;
-}
/*
* do_page_fault error handling helpers
*/
@@ -108,7 +79,7 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code)
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return __bad_area_nosemaphore(regs, address, si_code);
}
@@ -118,9 +89,33 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
return __bad_area(regs, address, SEGV_MAPERR);
}
-static int bad_key_fault_exception(struct pt_regs *regs, unsigned long address,
- int pkey)
+static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address,
+ struct vm_area_struct *vma)
{
+ struct mm_struct *mm = current->mm;
+ int pkey;
+
+ /*
+ * We don't try to fetch the pkey from page table because reading
+ * page table without locking doesn't guarantee stable pte value.
+ * Hence the pkey value that we return to userspace can be different
+ * from the pkey that actually caused access error.
+ *
+ * It does *not* guarantee that the VMA we find here
+ * was the one that we faulted on.
+ *
+ * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
+ * 2. T1 : set AMR to deny access to pkey=4, touches, page
+ * 3. T1 : faults...
+ * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
+ * 5. T1 : enters fault handler, takes mmap_lock, etc...
+ * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
+ * faulted on a pte with its pkey=4.
+ */
+ pkey = vma_pkey(vma);
+
+ mmap_read_unlock(mm);
+
/*
* If we are in kernel mode, bail out with a SEGV, this will
* be caught by the assembly which will restore the non-volatile
@@ -202,11 +197,9 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
unsigned long address, bool is_write)
{
- int is_exec = TRAP(regs) == 0x400;
+ int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
- /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
- if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
- DSISR_PROTFAULT))) {
+ if (is_exec) {
pr_crit_ratelimited("kernel tried to execute %s page (%lx) - exploit attempt? (uid: %d)\n",
address >= TASK_SIZE ? "exec-protected" : "user",
address,
@@ -216,81 +209,44 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
return true;
}
- if (!is_exec && address < TASK_SIZE && (error_code & DSISR_PROTFAULT) &&
- !search_exception_tables(regs->nip)) {
- pr_crit_ratelimited("Kernel attempted to access user page (%lx) - exploit attempt? (uid: %d)\n",
- address,
- from_kuid(&init_user_ns, current_uid()));
- }
-
// Kernel fault on kernel address is bad
if (address >= TASK_SIZE)
return true;
- // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
- if (!search_exception_tables(regs->nip))
- return true;
+ // Read/write fault blocked by KUAP is bad, it can never succeed.
+ if (bad_kuap_fault(regs, address, is_write)) {
+ pr_crit_ratelimited("Kernel attempted to %s user page (%lx) - exploit attempt? (uid: %d)\n",
+ is_write ? "write" : "read", address,
+ from_kuid(&init_user_ns, current_uid()));
- // Read/write fault in a valid region (the exception table search passed
- // above), but blocked by KUAP is bad, it can never succeed.
- if (bad_kuap_fault(regs, address, is_write))
- return true;
+ // Fault on user outside of certain regions (eg. copy_tofrom_user()) is bad
+ if (!search_exception_tables(regs->nip))
+ return true;
- // What's left? Kernel fault on user in well defined regions (extable
- // matched), and allowed by KUAP in the faulting context.
+ // Read/write fault in a valid region (the exception table search passed
+ // above), but blocked by KUAP is bad, it can never succeed.
+ return WARN(true, "Bug: %s fault blocked by KUAP!", is_write ? "Write" : "Read");
+ }
+
+ // What's left? Kernel fault on user and allowed by KUAP in the faulting context.
return false;
}
-static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
- struct vm_area_struct *vma, unsigned int flags,
- bool *must_retry)
+static bool access_pkey_error(bool is_write, bool is_exec, bool is_pkey,
+ struct vm_area_struct *vma)
{
/*
- * N.B. The POWER/Open ABI allows programs to access up to
- * 288 bytes below the stack pointer.
- * The kernel signal delivery code writes up to about 1.5kB
- * below the stack pointer (r1) before decrementing it.
- * The exec code can write slightly over 640kB to the stack
- * before setting the user r1. Thus we allow the stack to
- * expand to 1MB without further checks.
+ * Make sure to check the VMA so that we do not perform
+ * faults just to hit a pkey fault as soon as we fill in a
+ * page. Only called for current mm, hence foreign == 0
*/
- if (address + 0x100000 < vma->vm_end) {
- unsigned int __user *nip = (unsigned int __user *)regs->nip;
- /* get user regs even if this fault is in kernel mode */
- struct pt_regs *uregs = current->thread.regs;
- if (uregs == NULL)
- return true;
-
- /*
- * A user-mode access to an address a long way below
- * the stack pointer is only valid if the instruction
- * is one which would update the stack pointer to the
- * address accessed if the instruction completed,
- * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
- * (or the byte, halfword, float or double forms).
- *
- * If we don't check this then any write to the area
- * between the last mapped region and the stack will
- * expand the stack rather than segfaulting.
- */
- if (address + 2048 >= uregs->gpr[1])
- return false;
-
- if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
- access_ok(nip, sizeof(*nip))) {
- unsigned int inst;
-
- if (!probe_user_read(&inst, nip, sizeof(inst)))
- return !store_updates_sp(inst);
- *must_retry = true;
- }
+ if (!arch_vma_access_permitted(vma, is_write, is_exec, 0))
return true;
- }
+
return false;
}
-static bool access_error(bool is_write, bool is_exec,
- struct vm_area_struct *vma)
+static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma)
{
/*
* Allow execution from readable areas if the MMU does not
@@ -314,7 +270,11 @@ static bool access_error(bool is_write, bool is_exec,
return false;
}
- if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+ /*
+ * Check for a read fault. This could be caused by a read on an
+ * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page.
+ */
+ if (unlikely(!(vma->vm_flags & VM_READ)))
return true;
/*
* We should ideally do the vma pkey access check here. But in the
@@ -342,7 +302,6 @@ static inline void cmo_account_page_fault(void)
static inline void cmo_account_page_fault(void) { }
#endif /* CONFIG_PPC_SMLPAR */
-#ifdef CONFIG_PPC_BOOK3S
static void sanity_check_fault(bool is_write, bool is_user,
unsigned long error_code, unsigned long address)
{
@@ -359,6 +318,9 @@ static void sanity_check_fault(bool is_write, bool is_user,
return;
}
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S))
+ return;
+
/*
* For hash translation mode, we should never get a
* PROTFAULT. Any update to pte to reduce access will result in us
@@ -393,10 +355,6 @@ static void sanity_check_fault(bool is_write, bool is_user,
WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
}
-#else
-static void sanity_check_fault(bool is_write, bool is_user,
- unsigned long error_code, unsigned long address) { }
-#endif /* CONFIG_PPC_BOOK3S */
/*
* Define the correct "is_write" bit in error_code based
@@ -404,42 +362,56 @@ static void sanity_check_fault(bool is_write, bool is_user,
*/
#if (defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
#define page_fault_is_write(__err) ((__err) & ESR_DST)
-#define page_fault_is_bad(__err) (0)
#else
#define page_fault_is_write(__err) ((__err) & DSISR_ISSTORE)
-#if defined(CONFIG_PPC_8xx)
+#endif
+
+#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
+#define page_fault_is_bad(__err) (0)
+#elif defined(CONFIG_PPC_8xx)
#define page_fault_is_bad(__err) ((__err) & DSISR_NOEXEC_OR_G)
#elif defined(CONFIG_PPC64)
-#define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_64S)
+static int page_fault_is_bad(unsigned long err)
+{
+ unsigned long flag = DSISR_BAD_FAULT_64S;
+
+ /*
+ * PAPR+ v2.11 § 14.15.3.4.1 (unreleased)
+ * If byte 0, bit 3 of pi-attribute-specifier-type in
+ * ibm,pi-features property is defined, ignore the DSI error
+ * which is caused by the paste instruction on the
+ * suspended NX window.
+ */
+ if (mmu_has_feature(MMU_FTR_NX_DSI))
+ flag &= ~DSISR_BAD_COPYPASTE;
+
+ return err & flag;
+}
#else
#define page_fault_is_bad(__err) ((__err) & DSISR_BAD_FAULT_32S)
#endif
-#endif
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
- * for a data fault, SRR1 for an instruction fault. For 400-family processors
- * the error_code parameter is ESR for a data fault, 0 for an instruction
- * fault.
- * For 64-bit processors, the error_code parameter is
- * - DSISR for a non-SLB data access fault,
- * - SRR1 & 0x08000000 for a non-SLB instruction access fault
- * - 0 any SLB fault.
+ * for a data fault, SRR1 for an instruction fault.
+ * For 400-family processors the error_code parameter is ESR for a data fault,
+ * 0 for an instruction fault.
+ * For 64-bit processors, the error_code parameter is DSISR for a data access
+ * fault, SRR1 & 0x08000000 for an instruction access fault.
*
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
-static int __do_page_fault(struct pt_regs *regs, unsigned long address,
+static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
- int is_exec = TRAP(regs) == 0x400;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
+ int is_exec = TRAP(regs) == INTERRUPT_INST_STORAGE;
int is_user = user_mode(regs);
int is_write = page_fault_is_write(error_code);
vm_fault_t fault, major = 0;
- bool must_retry = false;
bool kprobe_fault = kprobe_page_fault(regs, 11);
if (unlikely(debugger_fault_handler(regs) || kprobe_fault))
@@ -461,8 +433,12 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
* take a page fault to a kernel address or a page fault to a user
* address outside of dedicated places
*/
- if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write)))
+ if (unlikely(!is_user && bad_kernel_fault(regs, error_code, address, is_write))) {
+ if (kfence_handle_page_fault(address, is_write, regs))
+ return 0;
+
return SIGSEGV;
+ }
/*
* If we're in an interrupt, have no user context or are running
@@ -477,20 +453,14 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
return bad_area_nosemaphore(regs, address);
}
- /* We restore the interrupt state now */
- if (!arch_irq_disabled_regs(regs))
- local_irq_enable();
+ interrupt_cond_local_irq_enable(regs);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
- if (error_code & DSISR_KEYFAULT)
- return bad_key_fault_exception(regs, address,
- get_mm_addr_key(mm, address));
-
/*
- * We want to do this outside mmap_sem, because reading code around nip
+ * We want to do this outside mmap_lock, because reading code around nip
* can result in fault, which will cause a deadlock when called with
- * mmap_sem held
+ * mmap_lock held
*/
if (is_user)
flags |= FAULT_FLAG_USER;
@@ -502,7 +472,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
- * erroneous fault occurring in a code path which already holds mmap_sem
+ * erroneous fault occurring in a code path which already holds mmap_lock
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
@@ -514,12 +484,12 @@ static int __do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!mmap_read_trylock(mm))) {
if (!is_user && !search_exception_tables(regs->nip))
return bad_area_nosemaphore(regs, address);
retry:
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
} else {
/*
* The above down_read_trylock() might have succeeded in
@@ -532,29 +502,19 @@ retry:
vma = find_vma(mm, address);
if (unlikely(!vma))
return bad_area(regs, address);
- if (likely(vma->vm_start <= address))
- goto good_area;
- if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
- return bad_area(regs, address);
- /* The stack is being expanded, check if it's valid */
- if (unlikely(bad_stack_expansion(regs, address, vma, flags,
- &must_retry))) {
- if (!must_retry)
+ if (unlikely(vma->vm_start > address)) {
+ if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
return bad_area(regs, address);
- up_read(&mm->mmap_sem);
- if (fault_in_pages_readable((const char __user *)regs->nip,
- sizeof(unsigned int)))
- return bad_area_nosemaphore(regs, address);
- goto retry;
+ if (unlikely(expand_stack(vma, address)))
+ return bad_area(regs, address);
}
- /* Try to expand it */
- if (unlikely(expand_stack(vma, address)))
- return bad_area(regs, address);
+ if (unlikely(access_pkey_error(is_write, is_exec,
+ (error_code & DSISR_KEYFAULT), vma)))
+ return bad_access_pkey(regs, address, vma);
-good_area:
if (unlikely(access_error(is_write, is_exec, vma)))
return bad_access(regs, address);
@@ -563,112 +523,97 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
-#ifdef CONFIG_PPC_MEM_KEYS
- /*
- * we skipped checking for access error due to key earlier.
- * Check that using handle_mm_fault error return.
- */
- if (unlikely(fault & VM_FAULT_SIGSEGV) &&
- !arch_vma_access_permitted(vma, is_write, is_exec, 0)) {
+ major |= fault & VM_FAULT_MAJOR;
- int pkey = vma_pkey(vma);
+ if (fault_signal_pending(fault, regs))
+ return user_mode(regs) ? 0 : SIGBUS;
- up_read(&mm->mmap_sem);
- return bad_key_fault_exception(regs, address, pkey);
- }
-#endif /* CONFIG_PPC_MEM_KEYS */
-
- major |= fault & VM_FAULT_MAJOR;
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ goto out;
/*
- * Handle the retry right now, the mmap_sem has been released in that
+ * Handle the retry right now, the mmap_lock has been released in that
* case.
*/
if (unlikely(fault & VM_FAULT_RETRY)) {
- /* We retry only once */
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- /*
- * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
- * of starvation.
- */
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- if (!fatal_signal_pending(current))
- goto retry;
- }
-
- /*
- * User mode? Just return to handle the fatal exception otherwise
- * return to bad_page_fault
- */
- return is_user ? 0 : SIGBUS;
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (unlikely(fault & VM_FAULT_ERROR))
return mm_fault_error(regs, address, fault);
+out:
/*
* Major/minor page fault accounting.
*/
- if (major) {
- current->maj_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
+ if (major)
cmo_account_page_fault();
- } else {
- current->min_flt++;
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
- }
+
return 0;
}
-NOKPROBE_SYMBOL(__do_page_fault);
+NOKPROBE_SYMBOL(___do_page_fault);
-int do_page_fault(struct pt_regs *regs, unsigned long address,
- unsigned long error_code)
+static __always_inline void __do_page_fault(struct pt_regs *regs)
{
- enum ctx_state prev_state = exception_enter();
- int rc = __do_page_fault(regs, address, error_code);
- exception_exit(prev_state);
- return rc;
+ long err;
+
+ err = ___do_page_fault(regs, regs->dar, regs->dsisr);
+ if (unlikely(err))
+ bad_page_fault(regs, err);
+}
+
+DEFINE_INTERRUPT_HANDLER(do_page_fault)
+{
+ __do_page_fault(regs);
}
-NOKPROBE_SYMBOL(do_page_fault);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+/* Same as do_page_fault but interrupt entry has already run in do_hash_fault */
+void hash__do_page_fault(struct pt_regs *regs)
+{
+ __do_page_fault(regs);
+}
+NOKPROBE_SYMBOL(hash__do_page_fault);
+#endif
/*
* bad_page_fault is called when we have a bad access from the kernel.
* It is called from the DSI and ISI handlers in head.S and from some
* of the procedures in traps.c.
*/
-void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
+static void __bad_page_fault(struct pt_regs *regs, int sig)
{
- const struct exception_table_entry *entry;
int is_write = page_fault_is_write(regs->dsisr);
-
- /* Are we prepared to handle this fault? */
- if ((entry = search_exception_tables(regs->nip)) != NULL) {
- regs->nip = extable_fixup(entry);
- return;
- }
+ const char *msg;
/* kernel has accessed a bad area */
+ if (regs->dar < PAGE_SIZE)
+ msg = "Kernel NULL pointer dereference";
+ else
+ msg = "Unable to handle kernel data access";
+
switch (TRAP(regs)) {
- case 0x300:
- case 0x380:
- case 0xe00:
- pr_alert("BUG: %s on %s at 0x%08lx\n",
- regs->dar < PAGE_SIZE ? "Kernel NULL pointer dereference" :
- "Unable to handle kernel data access",
+ case INTERRUPT_DATA_STORAGE:
+ case INTERRUPT_H_DATA_STORAGE:
+ pr_alert("BUG: %s on %s at 0x%08lx\n", msg,
is_write ? "write" : "read", regs->dar);
break;
- case 0x400:
- case 0x480:
+ case INTERRUPT_DATA_SEGMENT:
+ pr_alert("BUG: %s at 0x%08lx\n", msg, regs->dar);
+ break;
+ case INTERRUPT_INST_STORAGE:
+ case INTERRUPT_INST_SEGMENT:
pr_alert("BUG: Unable to handle kernel instruction fetch%s",
regs->nip < PAGE_SIZE ? " (NULL pointer?)\n" : "\n");
break;
- case 0x600:
+ case INTERRUPT_ALIGNMENT:
pr_alert("BUG: Unable to handle kernel unaligned access at 0x%08lx\n",
regs->dar);
break;
@@ -685,3 +630,45 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
die("Kernel access of bad area", regs, sig);
}
+
+void bad_page_fault(struct pt_regs *regs, int sig)
+{
+ const struct exception_table_entry *entry;
+
+ /* Are we prepared to handle this fault? */
+ entry = search_exception_tables(instruction_pointer(regs));
+ if (entry)
+ instruction_pointer_set(regs, extable_fixup(entry));
+ else
+ __bad_page_fault(regs, sig);
+}
+
+#ifdef CONFIG_PPC_BOOK3S_64
+DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv)
+{
+ bad_page_fault(regs, SIGSEGV);
+}
+
+/*
+ * In radix, segment interrupts indicate the EA is not addressable by the
+ * page table geometry, so they are always sent here.
+ *
+ * In hash, this is called if do_slb_fault returns error. Typically it is
+ * because the EA was outside the region allowed by software.
+ */
+DEFINE_INTERRUPT_HANDLER(do_bad_segment_interrupt)
+{
+ int err = regs->result;
+
+ if (err == -EFAULT) {
+ if (user_mode(regs))
+ _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
+ else
+ bad_page_fault(regs, SIGSEGV);
+ } else if (err == -EINVAL) {
+ unrecoverable_exception(regs);
+ } else {
+ BUG();
+ }
+}
+#endif
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
deleted file mode 100644
index 320c1672b2ae..000000000000
--- a/arch/powerpc/mm/highmem.c
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * highmem.c: virtual kernel memory mappings for high memory
- *
- * PowerPC version, stolen from the i386 version.
- *
- * Used in CONFIG_HIGHMEM systems for memory pages which
- * are not addressable by direct kernel virtual addresses.
- *
- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
- * Gerhard.Wichert@pdb.siemens.de
- *
- *
- * Redesigned the x86 32-bit VM architecture to deal with
- * up to 16 Terrabyte physical memory. With current x86 CPUs
- * we now support up to 64 Gigabytes physical RAM.
- *
- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
- *
- * Reworked for PowerPC by various contributors. Moved from
- * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
- */
-
-#include <linux/highmem.h>
-#include <linux/module.h>
-
-/*
- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
- * gives a more generic (and caching) interface. But kmap_atomic can
- * be used in IRQ contexts, so in some (very limited) cases we need
- * it.
- */
-void *kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
- unsigned long vaddr;
- int idx, type;
-
- preempt_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- WARN_ON(IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !pte_none(*(kmap_pte - idx)));
- __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
- local_flush_tlb_page(NULL, vaddr);
-
- return (void*) vaddr;
-}
-EXPORT_SYMBOL(kmap_atomic_prot);
-
-void __kunmap_atomic(void *kvaddr)
-{
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
-
- if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
- pagefault_enable();
- preempt_enable();
- return;
- }
-
- if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) {
- int type = kmap_atomic_idx();
- unsigned int idx;
-
- idx = type + KM_TYPE_NR * smp_processor_id();
- WARN_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
- /*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
- */
- pte_clear(&init_mm, vaddr, kmap_pte-idx);
- local_flush_tlb_page(NULL, vaddr);
- }
-
- kmap_atomic_idx_pop();
- pagefault_enable();
- preempt_enable();
-}
-EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 33b3461d91e8..5852a86d990d 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -19,18 +19,19 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/kmemleak.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/setup.h>
#include <asm/hugetlb.h>
#include <asm/pte-walk.h>
+#include <asm/firmware.h>
bool hugetlb_disabled = false;
#define hugepd_none(hpd) (hpd_val(hpd) == 0)
-#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *)))
+#define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
+ __builtin_ffs(sizeof(void *)))
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
@@ -53,24 +54,17 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (pshift >= pdshift) {
cachep = PGT_CACHE(PTE_T_ORDER);
num_hugepd = 1 << (pshift - pdshift);
- new = NULL;
- } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
- cachep = NULL;
- num_hugepd = 1;
- new = pte_alloc_one(mm);
} else {
cachep = PGT_CACHE(pdshift - pshift);
num_hugepd = 1;
- new = NULL;
}
- if (!cachep && !new) {
+ if (!cachep) {
WARN_ONCE(1, "No page table cache created for hugetlb tables");
return -ENOMEM;
}
- if (cachep)
- new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
+ new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
@@ -101,10 +95,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (i < num_hugepd) {
for (i = i - 1 ; i >= 0; i--, hpdp--)
*hpdp = __hugepd(0);
- if (cachep)
- kmem_cache_free(cachep, new);
- else
- pte_free(mm, new);
+ kmem_cache_free(cachep, new);
} else {
kmemleak_ignore(new);
}
@@ -116,9 +107,11 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
* At this point we do the placement change only for BOOK3S 64. This would
* possibly work on other subarchs.
*/
-pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long sz)
{
pgd_t *pg;
+ p4d_t *p4;
pud_t *pu;
pmd_t *pm;
hugepd_t *hpdp = NULL;
@@ -128,20 +121,21 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
addr &= ~(sz-1);
pg = pgd_offset(mm, addr);
+ p4 = p4d_offset(pg, addr);
#ifdef CONFIG_PPC_BOOK3S_64
if (pshift == PGDIR_SHIFT)
/* 16GB huge page */
- return (pte_t *) pg;
+ return (pte_t *) p4;
else if (pshift > PUD_SHIFT) {
/*
* We need to use hugepd table
*/
ptl = &mm->page_table_lock;
- hpdp = (hugepd_t *)pg;
+ hpdp = (hugepd_t *)p4;
} else {
pdshift = PUD_SHIFT;
- pu = pud_alloc(mm, pg, addr);
+ pu = pud_alloc(mm, p4, addr);
if (!pu)
return NULL;
if (pshift == PUD_SHIFT)
@@ -166,10 +160,10 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
#else
if (pshift >= PGDIR_SHIFT) {
ptl = &mm->page_table_lock;
- hpdp = (hugepd_t *)pg;
+ hpdp = (hugepd_t *)p4;
} else {
pdshift = PUD_SHIFT;
- pu = pud_alloc(mm, pg, addr);
+ pu = pud_alloc(mm, p4, addr);
if (!pu)
return NULL;
if (pshift >= PUD_SHIFT) {
@@ -188,6 +182,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
if (!hpdp)
return NULL;
+ if (IS_ENABLED(CONFIG_PPC_8xx) && pshift < PMD_SHIFT)
+ return pte_alloc_map(mm, (pmd_t *)hpdp, addr);
+
BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
@@ -222,7 +219,7 @@ void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_p
}
}
-int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
+static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
{
struct huge_bootmem_page *m;
if (nr_gpages == 0)
@@ -233,17 +230,22 @@ int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
m->hstate = hstate;
return 1;
}
+
+bool __init hugetlb_node_alloc_supported(void)
+{
+ return false;
+}
#endif
-int __init alloc_bootmem_huge_page(struct hstate *h)
+int __init alloc_bootmem_huge_page(struct hstate *h, int nid)
{
#ifdef CONFIG_PPC_BOOK3S_64
if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
return pseries_alloc_bootmem_huge_page(h);
#endif
- return __alloc_bootmem_huge_page(h);
+ return __alloc_bootmem_huge_page(h, nid);
}
#ifndef CONFIG_PPC_BOOK3S_64
@@ -253,7 +255,7 @@ int __init alloc_bootmem_huge_page(struct hstate *h)
struct hugepd_freelist {
struct rcu_head rcu;
unsigned int index;
- void *ptes[0];
+ void *ptes[];
};
static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
@@ -299,6 +301,21 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
#endif
+/* Return true when the entry to be freed maps more than the area being freed */
+static bool range_is_outside_limits(unsigned long start, unsigned long end,
+ unsigned long floor, unsigned long ceiling,
+ unsigned long mask)
+{
+ if ((start & mask) < floor)
+ return true;
+ if (ceiling) {
+ ceiling &= mask;
+ if (!ceiling)
+ return true;
+ }
+ return end - 1 > ceiling - 1;
+}
+
static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
unsigned long start, unsigned long end,
unsigned long floor, unsigned long ceiling)
@@ -314,15 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
if (shift > pdshift)
num_hugepd = 1 << (shift - pdshift);
- start &= pdmask;
- if (start < floor)
- return;
- if (ceiling) {
- ceiling &= pdmask;
- if (! ceiling)
- return;
- }
- if (end - 1 > ceiling - 1)
+ if (range_is_outside_limits(start, end, floor, ceiling, pdmask))
return;
for (i = 0; i < num_hugepd; i++, hpdp++)
@@ -330,13 +339,25 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
if (shift >= pdshift)
hugepd_free(tlb, hugepte);
- else if (IS_ENABLED(CONFIG_PPC_8xx))
- pgtable_free_tlb(tlb, hugepte, 0);
else
pgtable_free_tlb(tlb, hugepte,
get_hugepd_cache_index(pdshift - shift));
}
+static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ pgtable_t token = pmd_pgtable(*pmd);
+
+ if (range_is_outside_limits(addr, end, floor, ceiling, PMD_MASK))
+ return;
+
+ pmd_clear(pmd);
+ pte_free_tlb(tlb, token, addr);
+ mm_dec_nr_ptes(tlb->mm);
+}
+
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
@@ -352,11 +373,17 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, addr);
next = pmd_addr_end(addr, end);
if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+
/*
* if it is not hugepd pointer, we should already find
* it cleared.
*/
- WARN_ON(!pmd_none_or_clear_bad(pmd));
+ WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx));
+
+ hugetlb_free_pte_range(tlb, pmd, addr, end, floor, ceiling);
+
continue;
}
/*
@@ -365,7 +392,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
* single hugepage, but all of them point to
* the same kmem cache that holds the hugepte.
*/
- more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
+ more = addr + (1UL << hugepd_shift(*(hugepd_t *)pmd));
if (more > next)
next = more;
@@ -373,24 +400,16 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
addr, next, floor, ceiling);
} while (addr = next, addr != end);
- start &= PUD_MASK;
- if (start < floor)
- return;
- if (ceiling) {
- ceiling &= PUD_MASK;
- if (!ceiling)
- return;
- }
- if (end - 1 > ceiling - 1)
+ if (range_is_outside_limits(start, end, floor, ceiling, PUD_MASK))
return;
- pmd = pmd_offset(pud, start);
+ pmd = pmd_offset(pud, start & PUD_MASK);
pud_clear(pud);
- pmd_free_tlb(tlb, pmd, start);
+ pmd_free_tlb(tlb, pmd, start & PUD_MASK);
mm_dec_nr_pmds(tlb->mm);
}
-static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long addr, unsigned long end,
unsigned long floor, unsigned long ceiling)
{
@@ -400,7 +419,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
start = addr;
do {
- pud = pud_offset(pgd, addr);
+ pud = pud_offset(p4d, addr);
next = pud_addr_end(addr, end);
if (!is_hugepd(__hugepd(pud_val(*pud)))) {
if (pud_none_or_clear_bad(pud))
@@ -415,7 +434,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
* single hugepage, but all of them point to
* the same kmem cache that holds the hugepte.
*/
- more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
+ more = addr + (1UL << hugepd_shift(*(hugepd_t *)pud));
if (more > next)
next = more;
@@ -424,20 +443,12 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
}
} while (addr = next, addr != end);
- start &= PGDIR_MASK;
- if (start < floor)
- return;
- if (ceiling) {
- ceiling &= PGDIR_MASK;
- if (!ceiling)
- return;
- }
- if (end - 1 > ceiling - 1)
+ if (range_is_outside_limits(start, end, floor, ceiling, PGDIR_MASK))
return;
- pud = pud_offset(pgd, start);
- pgd_clear(pgd);
- pud_free_tlb(tlb, pud, start);
+ pud = pud_offset(p4d, start & PGDIR_MASK);
+ p4d_clear(p4d);
+ pud_free_tlb(tlb, pud, start & PGDIR_MASK);
mm_dec_nr_puds(tlb->mm);
}
@@ -449,6 +460,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long floor, unsigned long ceiling)
{
pgd_t *pgd;
+ p4d_t *p4d;
unsigned long next;
/*
@@ -471,10 +483,11 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
do {
next = pgd_addr_end(addr, end);
pgd = pgd_offset(tlb->mm, addr);
+ p4d = p4d_offset(pgd, addr);
if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
- if (pgd_none_or_clear_bad(pgd))
+ if (p4d_none_or_clear_bad(p4d))
continue;
- hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+ hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
} else {
unsigned long more;
/*
@@ -483,11 +496,11 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
* for a single hugepage, but all of them point to the
* same kmem cache that holds the hugepte.
*/
- more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
+ more = addr + (1UL << hugepd_shift(*(hugepd_t *)pgd));
if (more > next)
next = more;
- free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
+ free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT,
addr, next, floor, ceiling);
}
} while (addr = next, addr != end);
@@ -530,35 +543,7 @@ retry:
return page;
}
-#ifdef CONFIG_PPC_MM_SLICES
-unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct hstate *hstate = hstate_file(file);
- int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
-
-#ifdef CONFIG_PPC_RADIX_MMU
- if (radix_enabled())
- return radix__hugetlb_get_unmapped_area(file, addr, len,
- pgoff, flags);
-#endif
- return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
-}
-#endif
-
-unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
-{
- /* With radix we don't use slice, so derive it from vma*/
- if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
- unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
-
- return 1UL << mmu_psize_to_shift(psize);
- }
- return vma_kernel_pagesize(vma);
-}
-
-static int __init add_huge_page_size(unsigned long long size)
+bool __init arch_hugetlb_valid_size(unsigned long size)
{
int shift = __ffs(size);
int mmu_psize;
@@ -566,37 +551,27 @@ static int __init add_huge_page_size(unsigned long long size)
/* Check that it is a page size supported by the hardware and
* that it fits within pagetable and slice limits. */
if (size <= PAGE_SIZE || !is_power_of_2(size))
- return -EINVAL;
+ return false;
mmu_psize = check_and_get_huge_psize(shift);
if (mmu_psize < 0)
- return -EINVAL;
+ return false;
BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
- /* Return if huge page size has already been setup */
- if (size_to_hstate(size))
- return 0;
-
- hugetlb_add_hstate(shift - PAGE_SHIFT);
-
- return 0;
+ return true;
}
-static int __init hugepage_setup_sz(char *str)
+static int __init add_huge_page_size(unsigned long long size)
{
- unsigned long long size;
-
- size = memparse(str, &str);
+ int shift = __ffs(size);
- if (add_huge_page_size(size) != 0) {
- hugetlb_bad_size();
- pr_err("Invalid huge page size specified(%llu)\n", size);
- }
+ if (!arch_hugetlb_valid_size((unsigned long)size))
+ return -EINVAL;
- return 1;
+ hugetlb_add_hstate(shift - PAGE_SHIFT);
+ return 0;
}
-__setup("hugepagesz=", hugepage_setup_sz);
static int __init hugetlbpage_init(void)
{
@@ -648,7 +623,7 @@ static int __init hugetlbpage_init(void)
if (pdshift > shift) {
if (!IS_ENABLED(CONFIG_PPC_8xx))
pgtable_cache_add(pdshift - shift);
- } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
+ } else if (IS_ENABLED(CONFIG_PPC_E500) ||
IS_ENABLED(CONFIG_PPC_8xx)) {
pgtable_cache_add(PTE_T_ORDER);
}
@@ -656,10 +631,7 @@ static int __init hugetlbpage_init(void)
configured = true;
}
- if (configured) {
- if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
- hugetlbpage_init_default();
- } else
+ if (!configured)
pr_info("Failed to initialize. Disabling HugeTLB");
return 0;
@@ -667,20 +639,20 @@ static int __init hugetlbpage_init(void)
arch_initcall(hugetlbpage_init);
-void flush_dcache_icache_hugepage(struct page *page)
+void __init gigantic_hugetlb_cma_reserve(void)
{
- int i;
- void *start;
+ unsigned long order = 0;
- BUG_ON(!PageCompound(page));
+ if (radix_enabled())
+ order = PUD_SHIFT - PAGE_SHIFT;
+ else if (!firmware_has_feature(FW_FEATURE_LPAR) && mmu_psize_defs[MMU_PAGE_16G].shift)
+ /*
+ * For pseries we do use ibm,expected#pages for reserving 16G pages.
+ */
+ order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT;
- for (i = 0; i < compound_nr(page); i++) {
- if (!PageHighMem(page)) {
- __flush_dcache_icache(page_address(page+i));
- } else {
- start = kmap_atomic(page+i);
- __flush_dcache_icache(start);
- kunmap_atomic(start);
- }
+ if (order) {
+ VM_WARN_ON(order < MAX_ORDER);
+ hugetlb_cma_reserve(order);
}
}
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 42ef7a6e6098..119ef491f797 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -17,9 +17,10 @@
#undef DEBUG
#include <linux/string.h>
+#include <linux/pgtable.h>
#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
#include <asm/kup.h>
+#include <asm/smp.h>
phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull;
EXPORT_SYMBOL_GPL(memstart_addr);
@@ -28,11 +29,14 @@ EXPORT_SYMBOL_GPL(kernstart_addr);
unsigned long kernstart_virt_addr __ro_after_init = KERNELBASE;
EXPORT_SYMBOL_GPL(kernstart_virt_addr);
-static bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
-static bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
+bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
+bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
static int __init parse_nosmep(char *p)
{
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ return 0;
+
disable_kuep = true;
pr_warn("Disabling Kernel Userspace Execution Prevention\n");
return 0;
@@ -47,10 +51,21 @@ static int __init parse_nosmap(char *p)
}
early_param("nosmap", parse_nosmap);
-void __ref setup_kup(void)
+void __weak setup_kuep(bool disabled)
+{
+ if (!IS_ENABLED(CONFIG_PPC_KUEP) || disabled)
+ return;
+
+ if (smp_processor_id() != boot_cpuid)
+ return;
+
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+}
+
+void setup_kup(void)
{
- setup_kuep(disable_kuep);
setup_kuap(disable_kuap);
+ setup_kuep(disable_kuep);
}
#define CTOR(shift) static void ctor_##shift(void *addr) \
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 872df48ae41b..d4cc3749e621 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -29,10 +29,7 @@
#include <linux/slab.h>
#include <linux/hugetlb.h>
-#include <asm/pgalloc.h>
-#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
@@ -72,39 +69,10 @@ EXPORT_SYMBOL(agp_special_page);
void MMU_init(void);
-/*
- * this tells the system to map all of ram with the segregs
- * (i.e. page tables) instead of the bats.
- * -- Cort
- */
-int __map_without_bats;
-int __map_without_ltlbs;
-
/* max amount of low RAM to map in */
unsigned long __max_low_memory = MAX_LOW_MEM;
/*
- * Check for command-line options that affect what MMU_init will do.
- */
-static void __init MMU_setup(void)
-{
- /* Check for nobats option (used in mapin_ram). */
- if (strstr(boot_command_line, "nobats")) {
- __map_without_bats = 1;
- }
-
- if (strstr(boot_command_line, "noltlbs")) {
- __map_without_ltlbs = 1;
- }
- if (debug_pagealloc_enabled()) {
- __map_without_bats = 1;
- __map_without_ltlbs = 1;
- }
- if (strict_kernel_rwx_enabled() && !IS_ENABLED(CONFIG_PPC_8xx))
- __map_without_ltlbs = 1;
-}
-
-/*
* MMU_init sets up the basic memory mappings for the kernel,
* including both RAM and possibly some I/O regions,
* and sets up the page tables and the MMU hardware ready to go.
@@ -114,31 +82,15 @@ void __init MMU_init(void)
if (ppc_md.progress)
ppc_md.progress("MMU:enter", 0x111);
- /* parse args from command line */
- MMU_setup();
-
- /*
- * Reserve gigantic pages for hugetlb. This MUST occur before
- * lowmem_end_addr is initialized below.
- */
- if (memblock.memory.cnt > 1) {
-#ifndef CONFIG_WII
- memblock_enforce_memory_limit(memblock.memory.regions[0].size);
- pr_warn("Only using first contiguous memory region\n");
-#else
- wii_memory_fixups();
-#endif
- }
-
total_lowmem = total_memory = memblock_end_of_DRAM() - memstart_addr;
lowmem_end_addr = memstart_addr + total_lowmem;
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_85xx
/* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
* entries, so we need to adjust lowmem to match the amount we can map
* in the fixed entries */
adjust_total_lowmem();
-#endif /* CONFIG_FSL_BOOKE */
+#endif /* CONFIG_PPC_85xx */
if (total_lowmem > __max_low_memory) {
total_lowmem = __max_low_memory;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 4002ced3596f..05b0d584e50b 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -47,7 +47,6 @@
#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
@@ -60,6 +59,7 @@
#include <asm/sections.h>
#include <asm/iommu.h>
#include <asm/vdso.h>
+#include <asm/hugetlb.h>
#include <mm/mmu_decl.h>
@@ -111,7 +111,7 @@ static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_m
}
/*
- * vmemmap virtual address space management does not have a traditonal page
+ * vmemmap virtual address space management does not have a traditional page
* table to track which virtual struct pages are backed by physical mapping.
* The virtual to physical mappings are tracked in a simple linked list
* format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
@@ -128,7 +128,7 @@ static struct vmemmap_backing *next;
/*
* The same pointer 'next' tracks individual chunks inside the allocated
- * full page during the boot time and again tracks the freeed nodes during
+ * full page during the boot time and again tracks the freed nodes during
* runtime. It is racy but it does not happen as they are separated by the
* boot process. Will create problem if some how we have memory hotplug
* operation during boot !!
@@ -163,16 +163,16 @@ static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
return next++;
}
-static __meminit void vmemmap_list_populate(unsigned long phys,
- unsigned long start,
- int node)
+static __meminit int vmemmap_list_populate(unsigned long phys,
+ unsigned long start,
+ int node)
{
struct vmemmap_backing *vmem_back;
vmem_back = vmemmap_list_alloc(node);
if (unlikely(!vmem_back)) {
- WARN_ON(1);
- return;
+ pr_debug("vmemap list allocation failed\n");
+ return -ENOMEM;
}
vmem_back->phys = phys;
@@ -180,6 +180,7 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
vmem_back->list = vmemmap_list;
vmemmap_list = vmem_back;
+ return 0;
}
static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
@@ -200,10 +201,11 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
+ bool altmap_alloc;
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
/* Align to the page size of the linear mapping. */
- start = _ALIGN_DOWN(start, page_size);
+ start = ALIGN_DOWN(start, page_size);
pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
@@ -226,16 +228,35 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
* fall back to system memory if the altmap allocation fail.
*/
if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
- p = altmap_alloc_block_buf(page_size, altmap);
+ p = vmemmap_alloc_block_buf(page_size, node, altmap);
if (!p)
pr_debug("altmap block allocation failed, falling back to system memory");
+ else
+ altmap_alloc = true;
+ }
+ if (!p) {
+ p = vmemmap_alloc_block_buf(page_size, node, NULL);
+ altmap_alloc = false;
}
- if (!p)
- p = vmemmap_alloc_block_buf(page_size, node);
if (!p)
return -ENOMEM;
- vmemmap_list_populate(__pa(p), start, node);
+ if (vmemmap_list_populate(__pa(p), start, node)) {
+ /*
+ * If we don't populate vmemap list, we don't have
+ * the ability to free the allocated vmemmap
+ * pages in section_deactivate. Hence free them
+ * here.
+ */
+ int nr_pfns = page_size >> PAGE_SHIFT;
+ unsigned long page_order = get_order(page_size);
+
+ if (altmap_alloc)
+ vmem_altmap_free(altmap, nr_pfns);
+ else
+ free_pages((unsigned long)p, page_order);
+ return -ENOMEM;
+ }
pr_debug(" * %016lx..%016lx allocated at %p\n",
start, start + page_size, p);
@@ -265,10 +286,8 @@ static unsigned long vmemmap_list_free(unsigned long start)
vmem_back_prev = vmem_back;
}
- if (unlikely(!vmem_back)) {
- WARN_ON(1);
+ if (unlikely(!vmem_back))
return 0;
- }
/* remove it from vmemmap_list */
if (vmem_back == vmemmap_list) /* remove head */
@@ -292,7 +311,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end,
unsigned long alt_start = ~0, alt_end = ~0;
unsigned long base_pfn;
- start = _ALIGN_DOWN(start, page_size);
+ start = ALIGN_DOWN(start, page_size);
if (altmap) {
alt_start = altmap->base_pfn;
alt_end = altmap->base_pfn + altmap->reserve +
@@ -352,6 +371,12 @@ void register_page_bootmem_memmap(unsigned long section_nr,
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
#ifdef CONFIG_PPC_BOOK3S_64
+unsigned int mmu_lpid_bits;
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+EXPORT_SYMBOL_GPL(mmu_lpid_bits);
+#endif
+unsigned int mmu_pid_bits;
+
static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
static int __init parse_disable_radix(char *p)
@@ -407,21 +432,68 @@ static void __init early_check_vec5(void)
}
if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
OV5_FEAT(OV5_RADIX_GTSE))) {
- pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
- }
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
+ } else
+ cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
/* Do radix anyway - the hypervisor said we had to */
cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
} else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
/* Hypervisor only supports hash - disable radix */
cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
}
}
+static int __init dt_scan_mmu_pid_width(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ int size = 0;
+ const __be32 *prop;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+
+ /* We are scanning "cpu" nodes only */
+ if (type == NULL || strcmp(type, "cpu") != 0)
+ return 0;
+
+ /* Find MMU LPID, PID register size */
+ prop = of_get_flat_dt_prop(node, "ibm,mmu-lpid-bits", &size);
+ if (prop && size == 4)
+ mmu_lpid_bits = be32_to_cpup(prop);
+
+ prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
+ if (prop && size == 4)
+ mmu_pid_bits = be32_to_cpup(prop);
+
+ if (!mmu_pid_bits && !mmu_lpid_bits)
+ return 0;
+
+ return 1;
+}
+
void __init mmu_early_init_devtree(void)
{
+ bool hvmode = !!(mfmsr() & MSR_HV);
+
/* Disable radix mode based on kernel command line. */
- if (disable_radix)
- cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ if (disable_radix) {
+ if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ else
+ pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
+ }
+
+ of_scan_flat_dt(dt_scan_mmu_pid_width, NULL);
+ if (hvmode && !mmu_lpid_bits) {
+ if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
+ mmu_lpid_bits = 12; /* POWER8-10 */
+ else
+ mmu_lpid_bits = 10; /* POWER7 */
+ }
+ if (!mmu_pid_bits) {
+ if (early_cpu_has_feature(CPU_FTR_ARCH_300))
+ mmu_pid_bits = 20; /* POWER9-10 */
+ }
/*
* Check /chosen/ibm,architecture-vec-5 if running as a guest.
@@ -429,12 +501,27 @@ void __init mmu_early_init_devtree(void)
* even though the ibm,architecture-vec-5 property created by
* skiboot doesn't have the necessary bits set.
*/
- if (!(mfmsr() & MSR_HV))
+ if (!hvmode)
early_check_vec5();
- if (early_radix_enabled())
+ if (early_radix_enabled()) {
radix__early_init_devtree();
- else
+
+ /*
+ * We have finalized the translation we are going to use by now.
+ * Radix mode is not limited by RMA / VRMA addressing.
+ * Hence don't limit memblock allocations.
+ */
+ ppc64_rma_size = ULONG_MAX;
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
+ } else
hash__early_init_devtree();
+
+ if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
+ hugetlbpage_init_defaultsize();
+
+ if (!(cur_cpu_spec->mmu_features & MMU_FTR_HPTE_TABLE) &&
+ !(cur_cpu_spec->mmu_features & MMU_FTR_TYPE_RADIX))
+ panic("kernel does not support any MMU type offered by platform");
}
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index fc669643ce6a..4f12504fb405 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -2,6 +2,7 @@
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/mmzone.h>
#include <linux/vmalloc.h>
#include <asm/io-workarounds.h>
@@ -92,7 +93,7 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
if (!ret)
return (void __iomem *)area->addr + offset;
- unmap_kernel_range(va, size);
+ vunmap_range(va, va + size);
free_vm_area(area);
return NULL;
diff --git a/arch/powerpc/mm/ioremap_32.c b/arch/powerpc/mm/ioremap_32.c
index 743e11384dea..9d13143b8be4 100644
--- a/arch/powerpc/mm/ioremap_32.c
+++ b/arch/powerpc/mm/ioremap_32.c
@@ -70,10 +70,10 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *call
*/
pr_warn("ioremap() called early from %pS. Use early_ioremap() instead\n", caller);
- err = early_ioremap_range(ioremap_bot - size, p, size, prot);
+ err = early_ioremap_range(ioremap_bot - size - PAGE_SIZE, p, size, prot);
if (err)
return NULL;
- ioremap_bot -= size;
+ ioremap_bot -= size + PAGE_SIZE;
return (void __iomem *)ioremap_bot + offset;
}
diff --git a/arch/powerpc/mm/ioremap_64.c b/arch/powerpc/mm/ioremap_64.c
index 50a99d9684f7..3acece00b33e 100644
--- a/arch/powerpc/mm/ioremap_64.c
+++ b/arch/powerpc/mm/ioremap_64.c
@@ -4,56 +4,6 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
-/**
- * Low level function to establish the page tables for an IO mapping
- */
-void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
-{
- int ret;
- unsigned long va = (unsigned long)ea;
-
- /* We don't support the 4K PFN hack with ioremap */
- if (pgprot_val(prot) & H_PAGE_4K_PFN)
- return NULL;
-
- if ((ea + size) >= (void *)IOREMAP_END) {
- pr_warn("Outside the supported range\n");
- return NULL;
- }
-
- WARN_ON(pa & ~PAGE_MASK);
- WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
- WARN_ON(size & ~PAGE_MASK);
-
- if (slab_is_available()) {
- ret = ioremap_page_range(va, va + size, pa, prot);
- if (ret)
- unmap_kernel_range(va, size);
- } else {
- ret = early_ioremap_range(va, pa, size, prot);
- }
-
- if (ret)
- return NULL;
-
- return (void __iomem *)ea;
-}
-EXPORT_SYMBOL(__ioremap_at);
-
-/**
- * Low level function to tear down the page tables for an IO mapping. This is
- * used for mappings that are manipulated manually, like partial unmapping of
- * PCI IOs or ISA space.
- */
-void __iounmap_at(void *ea, unsigned long size)
-{
- WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
- WARN_ON(size & ~PAGE_MASK);
-
- unmap_kernel_range((unsigned long)ea, size);
-}
-EXPORT_SYMBOL(__iounmap_at);
-
void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
pgprot_t prot, void *caller)
{
@@ -88,7 +38,7 @@ void __iomem *__ioremap_caller(phys_addr_t addr, unsigned long size,
return NULL;
ret = (void __iomem *)ioremap_bot + offset;
- ioremap_bot += size;
+ ioremap_bot += size + PAGE_SIZE;
return ret;
}
diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c
new file mode 100644
index 000000000000..2784224054f8
--- /dev/null
+++ b/arch/powerpc/mm/kasan/8xx.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/memblock.h>
+#include <linux/hugetlb.h>
+
+static int __init
+kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
+{
+ pmd_t *pmd = pmd_off_k(k_start);
+ unsigned long k_cur, k_next;
+
+ for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
+ pte_basic_t *new;
+
+ k_next = pgd_addr_end(k_cur, k_end);
+ k_next = pgd_addr_end(k_next, k_end);
+ if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
+ continue;
+
+ new = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
+ if (!new)
+ return -ENOMEM;
+
+ *new = pte_val(pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block)), PAGE_KERNEL)));
+
+ hugepd_populate_kernel((hugepd_t *)pmd, (pte_t *)new, PAGE_SHIFT_8M);
+ hugepd_populate_kernel((hugepd_t *)pmd + 1, (pte_t *)new, PAGE_SHIFT_8M);
+ }
+ return 0;
+}
+
+int __init kasan_init_region(void *start, size_t size)
+{
+ unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
+ unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
+ unsigned long k_cur;
+ int ret;
+ void *block;
+
+ block = memblock_alloc(k_end - k_start, SZ_8M);
+ if (!block)
+ return -ENOMEM;
+
+ if (IS_ALIGNED(k_start, SZ_8M)) {
+ kasan_init_shadow_8M(k_start, ALIGN_DOWN(k_end, SZ_8M), block);
+ k_cur = ALIGN_DOWN(k_end, SZ_8M);
+ if (k_cur == k_end)
+ goto finish;
+ } else {
+ k_cur = k_start;
+ }
+
+ ret = kasan_init_shadow_page_tables(k_start, k_end);
+ if (ret)
+ return ret;
+
+ for (; k_cur < k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_off_k(k_cur);
+ void *va = block + k_cur - k_start;
+ pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
+
+ if (k_cur < ALIGN_DOWN(k_end, SZ_512K))
+ pte = pte_mkhuge(pte);
+
+ __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
+ }
+finish:
+ flush_tlb_kernel_range(k_start, k_end);
+ return 0;
+}
diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
index 6577897673dd..699eeffd9f55 100644
--- a/arch/powerpc/mm/kasan/Makefile
+++ b/arch/powerpc/mm/kasan/Makefile
@@ -2,4 +2,8 @@
KASAN_SANITIZE := n
-obj-$(CONFIG_PPC32) += kasan_init_32.o
+obj-$(CONFIG_PPC32) += init_32.o
+obj-$(CONFIG_PPC_8xx) += 8xx.o
+obj-$(CONFIG_PPC_BOOK3S_32) += book3s_32.o
+obj-$(CONFIG_PPC_BOOK3S_64) += init_book3s_64.o
+obj-$(CONFIG_PPC_BOOK3E_64) += init_book3e_64.o
diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c
new file mode 100644
index 000000000000..450a67ef0bbe
--- /dev/null
+++ b/arch/powerpc/mm/kasan/book3s_32.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/memblock.h>
+#include <mm/mmu_decl.h>
+
+int __init kasan_init_region(void *start, size_t size)
+{
+ unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
+ unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
+ unsigned long k_nobat = k_start;
+ unsigned long k_cur;
+ phys_addr_t phys;
+ int ret;
+
+ while (k_nobat < k_end) {
+ unsigned int k_size = bat_block_size(k_nobat, k_end);
+ int idx = find_free_bat();
+
+ if (idx == -1)
+ break;
+ if (k_size < SZ_128K)
+ break;
+ phys = memblock_phys_alloc_range(k_size, k_size, 0,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!phys)
+ break;
+
+ setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL);
+ k_nobat += k_size;
+ }
+ if (k_nobat != k_start)
+ update_bats();
+
+ if (k_nobat < k_end) {
+ phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!phys)
+ return -ENOMEM;
+ }
+
+ ret = kasan_init_shadow_page_tables(k_start, k_end);
+ if (ret)
+ return ret;
+
+ kasan_update_early_region(k_start, k_nobat, __pte(0));
+
+ for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_off_k(k_cur);
+ pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL);
+
+ __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
+ }
+ flush_tlb_kernel_range(k_start, k_end);
+ memset(kasan_mem_to_shadow(start), 0, k_end - k_start);
+
+ return 0;
+}
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/init_32.c
index d2bed3fcb719..a70828a6d935 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/init_32.c
@@ -5,9 +5,7 @@
#include <linux/kasan.h>
#include <linux/printk.h>
#include <linux/memblock.h>
-#include <linux/moduleloader.h>
#include <linux/sched/task.h>
-#include <linux/vmalloc.h>
#include <asm/pgalloc.h>
#include <asm/code-patching.h>
#include <mm/mmu_decl.h>
@@ -27,43 +25,34 @@ static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
int i;
for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
- __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
+ __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 1);
}
-static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
{
pmd_t *pmd;
unsigned long k_cur, k_next;
- pte_t *new = NULL;
- pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
+ pmd = pmd_off_k(k_start);
for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
+ pte_t *new;
+
k_next = pgd_addr_end(k_cur, k_end);
if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
continue;
- if (!new)
- new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
+ new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
if (!new)
return -ENOMEM;
kasan_populate_pte(new, PAGE_KERNEL);
-
- smp_wmb(); /* See comment in __pte_alloc */
-
- spin_lock(&init_mm.page_table_lock);
- /* Has another populated it ? */
- if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) {
- pmd_populate_kernel(&init_mm, pmd, new);
- new = NULL;
- }
- spin_unlock(&init_mm.page_table_lock);
+ pmd_populate_kernel(&init_mm, pmd, new);
}
return 0;
}
-static int __init kasan_init_region(void *start, size_t size)
+int __init __weak kasan_init_region(void *start, size_t size)
{
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
@@ -76,78 +65,83 @@ static int __init kasan_init_region(void *start, size_t size)
return ret;
block = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ if (!block)
+ return -ENOMEM;
for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+ pmd_t *pmd = pmd_off_k(k_cur);
void *va = block + k_cur - k_start;
pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
- if (!va)
- return -ENOMEM;
-
__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
}
flush_tlb_kernel_range(k_start, k_end);
return 0;
}
-static void __init kasan_remap_early_shadow_ro(void)
+void __init
+kasan_update_early_region(unsigned long k_start, unsigned long k_end, pte_t pte)
{
- pgprot_t prot = kasan_prot_ro();
- unsigned long k_start = KASAN_SHADOW_START;
- unsigned long k_end = KASAN_SHADOW_END;
unsigned long k_cur;
- phys_addr_t pa = __pa(kasan_early_shadow_page);
-
- kasan_populate_pte(kasan_early_shadow_pte, prot);
- for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
+ for (k_cur = k_start; k_cur != k_end; k_cur += PAGE_SIZE) {
+ pmd_t *pmd = pmd_off_k(k_cur);
pte_t *ptep = pte_offset_kernel(pmd, k_cur);
- if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
+ if (pte_page(*ptep) != virt_to_page(lm_alias(kasan_early_shadow_page)))
continue;
- __set_pte_at(&init_mm, k_cur, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
+ __set_pte_at(&init_mm, k_cur, ptep, pte, 0);
}
- flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ flush_tlb_kernel_range(k_start, k_end);
+}
+
+static void __init kasan_remap_early_shadow_ro(void)
+{
+ pgprot_t prot = kasan_prot_ro();
+ phys_addr_t pa = __pa(kasan_early_shadow_page);
+
+ kasan_populate_pte(kasan_early_shadow_pte, prot);
+
+ kasan_update_early_region(KASAN_SHADOW_START, KASAN_SHADOW_END,
+ pfn_pte(PHYS_PFN(pa), prot));
}
static void __init kasan_unmap_early_shadow_vmalloc(void)
{
unsigned long k_start = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_START);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow((void *)VMALLOC_END);
- unsigned long k_cur;
- phys_addr_t pa = __pa(kasan_early_shadow_page);
- for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
- pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
- pte_t *ptep = pte_offset_kernel(pmd, k_cur);
+ kasan_update_early_region(k_start, k_end, __pte(0));
- if ((pte_val(*ptep) & PTE_RPN_MASK) != pa)
- continue;
-
- __set_pte_at(&init_mm, k_cur, ptep, __pte(0), 0);
- }
- flush_tlb_kernel_range(k_start, k_end);
+#ifdef MODULES_VADDR
+ k_start = (unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR);
+ k_end = (unsigned long)kasan_mem_to_shadow((void *)MODULES_END);
+ kasan_update_early_region(k_start, k_end, __pte(0));
+#endif
}
void __init kasan_mmu_init(void)
{
int ret;
- struct memblock_region *reg;
- if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
- IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
if (ret)
panic("kasan: kasan_init_shadow_page_tables() failed");
}
+}
+
+void __init kasan_init(void)
+{
+ phys_addr_t base, end;
+ u64 i;
+ int ret;
- for_each_memblock(memory, reg) {
- phys_addr_t base = reg->base;
- phys_addr_t top = min(base + reg->size, total_lowmem);
+ for_each_mem_range(i, &base, &end) {
+ phys_addr_t top = min(end, total_lowmem);
if (base >= top)
continue;
@@ -156,10 +150,14 @@ void __init kasan_mmu_init(void)
if (ret)
panic("kasan: kasan_init_region() failed");
}
-}
-void __init kasan_init(void)
-{
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
+ ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ if (ret)
+ panic("kasan: kasan_init_shadow_page_tables() failed");
+ }
+
kasan_remap_early_shadow_ro();
clear_page(kasan_early_shadow_page);
@@ -175,28 +173,12 @@ void __init kasan_late_init(void)
kasan_unmap_early_shadow_vmalloc();
}
-#ifdef CONFIG_PPC_BOOK3S_32
-u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
-
-static void __init kasan_early_hash_table(void)
-{
- unsigned int hash = __pa(early_hash);
-
- modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
- modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
-
- Hash = (struct hash_pte *)early_hash;
-}
-#else
-static void __init kasan_early_hash_table(void) {}
-#endif
-
void __init kasan_early_init(void)
{
unsigned long addr = KASAN_SHADOW_START;
unsigned long end = KASAN_SHADOW_END;
unsigned long next;
- pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
+ pmd_t *pmd = pmd_off_k(addr);
BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
@@ -206,7 +188,4 @@ void __init kasan_early_init(void)
next = pgd_addr_end(addr, end);
pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
} while (pmd++, addr = next, addr != end);
-
- if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
- kasan_early_hash_table();
}
diff --git a/arch/powerpc/mm/kasan/init_book3e_64.c b/arch/powerpc/mm/kasan/init_book3e_64.c
new file mode 100644
index 000000000000..11519e88dc6b
--- /dev/null
+++ b/arch/powerpc/mm/kasan/init_book3e_64.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KASAN for 64-bit Book3e powerpc
+ *
+ * Copyright 2022, Christophe Leroy, CS GROUP France
+ */
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/printk.h>
+#include <linux/memblock.h>
+#include <linux/set_memory.h>
+
+#include <asm/pgalloc.h>
+
+static inline bool kasan_pud_table(p4d_t p4d)
+{
+ return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
+}
+
+static inline bool kasan_pmd_table(pud_t pud)
+{
+ return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
+}
+
+static inline bool kasan_pte_table(pmd_t pmd)
+{
+ return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
+}
+
+static int __init kasan_map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
+{
+ pgd_t *pgdp;
+ p4d_t *p4dp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+ pte_t *ptep;
+
+ pgdp = pgd_offset_k(ea);
+ p4dp = p4d_offset(pgdp, ea);
+ if (kasan_pud_table(*p4dp)) {
+ pudp = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
+ memcpy(pudp, kasan_early_shadow_pud, PUD_TABLE_SIZE);
+ p4d_populate(&init_mm, p4dp, pudp);
+ }
+ pudp = pud_offset(p4dp, ea);
+ if (kasan_pmd_table(*pudp)) {
+ pmdp = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
+ memcpy(pmdp, kasan_early_shadow_pmd, PMD_TABLE_SIZE);
+ pud_populate(&init_mm, pudp, pmdp);
+ }
+ pmdp = pmd_offset(pudp, ea);
+ if (kasan_pte_table(*pmdp)) {
+ ptep = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
+ memcpy(ptep, kasan_early_shadow_pte, PTE_TABLE_SIZE);
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
+ }
+ ptep = pte_offset_kernel(pmdp, ea);
+
+ __set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot), 0);
+
+ return 0;
+}
+
+static void __init kasan_init_phys_region(void *start, void *end)
+{
+ unsigned long k_start, k_end, k_cur;
+ void *va;
+
+ if (start >= end)
+ return;
+
+ k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
+ k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
+
+ va = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
+ kasan_map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
+}
+
+void __init kasan_early_init(void)
+{
+ int i;
+ unsigned long addr;
+ pgd_t *pgd = pgd_offset_k(KASAN_SHADOW_START);
+ pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL);
+
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
+ &kasan_early_shadow_pte[i], zero_pte, 0);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i],
+ kasan_early_shadow_pte);
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ pud_populate(&init_mm, &kasan_early_shadow_pud[i],
+ kasan_early_shadow_pmd);
+
+ for (addr = KASAN_SHADOW_START; addr != KASAN_SHADOW_END; addr += PGDIR_SIZE)
+ p4d_populate(&init_mm, p4d_offset(pgd++, addr), kasan_early_shadow_pud);
+}
+
+void __init kasan_init(void)
+{
+ phys_addr_t start, end;
+ u64 i;
+ pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO);
+
+ for_each_mem_range(i, &start, &end)
+ kasan_init_phys_region((void *)start, (void *)end);
+
+ if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+ kasan_remove_zero_shadow((void *)VMALLOC_START, VMALLOC_SIZE);
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
+ &kasan_early_shadow_pte[i], zero_pte, 0);
+
+ flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+
+ /* Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KASAN init done\n");
+}
+
+void __init kasan_late_init(void) { }
diff --git a/arch/powerpc/mm/kasan/init_book3s_64.c b/arch/powerpc/mm/kasan/init_book3s_64.c
new file mode 100644
index 000000000000..9300d641cf9a
--- /dev/null
+++ b/arch/powerpc/mm/kasan/init_book3s_64.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KASAN for 64-bit Book3S powerpc
+ *
+ * Copyright 2019-2022, Daniel Axtens, IBM Corporation.
+ */
+
+/*
+ * ppc64 turns on virtual memory late in boot, after calling into generic code
+ * like the device-tree parser, so it uses this in conjunction with a hook in
+ * outline mode to avoid invalid access early in boot.
+ */
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/printk.h>
+#include <linux/sched/task.h>
+#include <linux/memblock.h>
+#include <asm/pgalloc.h>
+
+DEFINE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key);
+
+static void __init kasan_init_phys_region(void *start, void *end)
+{
+ unsigned long k_start, k_end, k_cur;
+ void *va;
+
+ if (start >= end)
+ return;
+
+ k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE);
+ k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE);
+
+ va = memblock_alloc(k_end - k_start, PAGE_SIZE);
+ for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE)
+ map_kernel_page(k_cur, __pa(va), PAGE_KERNEL);
+}
+
+void __init kasan_init(void)
+{
+ /*
+ * We want to do the following things:
+ * 1) Map real memory into the shadow for all physical memblocks
+ * This takes us from c000... to c008...
+ * 2) Leave a hole over the shadow of vmalloc space. KASAN_VMALLOC
+ * will manage this for us.
+ * This takes us from c008... to c00a...
+ * 3) Map the 'early shadow'/zero page over iomap and vmemmap space.
+ * This takes us up to where we start at c00e...
+ */
+
+ void *k_start = kasan_mem_to_shadow((void *)RADIX_VMALLOC_END);
+ void *k_end = kasan_mem_to_shadow((void *)RADIX_VMEMMAP_END);
+ phys_addr_t start, end;
+ u64 i;
+ pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL);
+
+ if (!early_radix_enabled()) {
+ pr_warn("KASAN not enabled as it requires radix!");
+ return;
+ }
+
+ for_each_mem_range(i, &start, &end)
+ kasan_init_phys_region((void *)start, (void *)end);
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
+ &kasan_early_shadow_pte[i], zero_pte, 0);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i],
+ kasan_early_shadow_pte);
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ pud_populate(&init_mm, &kasan_early_shadow_pud[i],
+ kasan_early_shadow_pmd);
+
+ /* map the early shadow over the iomap and vmemmap space */
+ kasan_populate_early_shadow(k_start, k_end);
+
+ /* mark early shadow region as RO and wipe it */
+ zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO);
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page,
+ &kasan_early_shadow_pte[i], zero_pte, 0);
+
+ /*
+ * clear_page relies on some cache info that hasn't been set up yet.
+ * It ends up looping ~forever and blows up other data.
+ * Use memset instead.
+ */
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+
+ static_branch_inc(&powerpc_kasan_enabled_key);
+
+ /* Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KASAN init done\n");
+}
+
+void __init kasan_early_init(void) { }
+
+void __init kasan_late_init(void) { }
diff --git a/arch/powerpc/mm/maccess.c b/arch/powerpc/mm/maccess.c
new file mode 100644
index 000000000000..ea821d0ffe16
--- /dev/null
+++ b/arch/powerpc/mm/maccess.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+#include <asm/disassemble.h>
+#include <asm/inst.h>
+#include <asm/ppc-opcode.h>
+
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+ return is_kernel_addr((unsigned long)unsafe_src);
+}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1c07d5a3f543..84d171953ba4 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -12,67 +12,27 @@
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/gfp.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/highmem.h>
-#include <linux/initrd.h>
-#include <linux/pagemap.h>
#include <linux/suspend.h>
-#include <linux/hugetlb.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/memremap.h>
#include <linux/dma-direct.h>
-#include <asm/pgalloc.h>
-#include <asm/prom.h>
-#include <asm/io.h>
-#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/mmu.h>
-#include <asm/smp.h>
-#include <asm/machdep.h>
-#include <asm/btext.h>
-#include <asm/tlb.h>
-#include <asm/sections.h>
-#include <asm/sparsemem.h>
-#include <asm/vdso.h>
-#include <asm/fixmap.h>
#include <asm/swiotlb.h>
+#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/kasan.h>
+#include <asm/svm.h>
+#include <asm/mmzone.h>
+#include <asm/ftrace.h>
+#include <asm/code-patching.h>
+#include <asm/setup.h>
#include <mm/mmu_decl.h>
-#ifndef CPU_FTR_COHERENT_ICACHE
-#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
-#define CPU_FTR_NOEXECUTE 0
-#endif
-
unsigned long long memory_limit;
-bool init_mem_is_free;
-#ifdef CONFIG_HIGHMEM
-pte_t *kmap_pte;
-EXPORT_SYMBOL(kmap_pte);
-pgprot_t kmap_prot;
-EXPORT_SYMBOL(kmap_prot);
-
-static inline pte_t *virt_to_kpte(unsigned long vaddr)
-{
- return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
- vaddr), vaddr), vaddr);
-}
-#endif
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
@@ -88,15 +48,18 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
EXPORT_SYMBOL(phys_mem_access_prot);
#ifdef CONFIG_MEMORY_HOTPLUG
+static DEFINE_MUTEX(linear_mapping_mutex);
#ifdef CONFIG_NUMA
int memory_add_physaddr_to_nid(u64 start)
{
return hot_add_scn_to_nid(start);
}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
-int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
+int __weak create_section_mapping(unsigned long start, unsigned long end,
+ int nid, pgprot_t prot)
{
return -ENODEV;
}
@@ -106,74 +69,102 @@ int __weak remove_section_mapping(unsigned long start, unsigned long end)
return -ENODEV;
}
-#define FLUSH_CHUNK_SIZE SZ_1G
-/**
- * flush_dcache_range_chunked(): Write any modified data cache blocks out to
- * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
- * Does not invalidate the corresponding instruction cache blocks.
- *
- * @start: the start address
- * @stop: the stop address (exclusive)
- * @chunk: the max size of the chunks
- */
-static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
- unsigned long chunk)
-{
- unsigned long i;
-
- for (i = start; i < stop; i += chunk) {
- flush_dcache_range(i, min(stop, i + chunk));
- cond_resched();
- }
-}
-
-int __ref arch_add_memory(int nid, u64 start, u64 size,
- struct mhp_restrictions *restrictions)
+int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
+ struct mhp_params *params)
{
- unsigned long start_pfn = start >> PAGE_SHIFT;
- unsigned long nr_pages = size >> PAGE_SHIFT;
int rc;
- resize_hpt_for_hotplug(memblock_phys_mem_size());
-
start = (unsigned long)__va(start);
- rc = create_section_mapping(start, start + size, nid);
+ mutex_lock(&linear_mapping_mutex);
+ rc = create_section_mapping(start, start + size, nid,
+ params->pgprot);
+ mutex_unlock(&linear_mapping_mutex);
if (rc) {
- pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
+ pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
start, start + size, rc);
return -EFAULT;
}
-
- return __add_pages(nid, start_pfn, nr_pages, restrictions);
+ return 0;
}
-void __ref arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap)
+void __ref arch_remove_linear_mapping(u64 start, u64 size)
{
- unsigned long start_pfn = start >> PAGE_SHIFT;
- unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- __remove_pages(start_pfn, nr_pages, altmap);
-
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
- flush_dcache_range_chunked(start, start + size, FLUSH_CHUNK_SIZE);
+ mutex_lock(&linear_mapping_mutex);
ret = remove_section_mapping(start, start + size);
- WARN_ON_ONCE(ret);
+ mutex_unlock(&linear_mapping_mutex);
+ if (ret)
+ pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
+ start, start + size, ret);
/* Ensure all vmalloc mappings are flushed in case they also
* hit that section of memory
*/
vm_unmap_aliases();
+}
+
+/*
+ * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
+ * updating.
+ */
+static void update_end_of_memory_vars(u64 start, u64 size)
+{
+ unsigned long end_pfn = PFN_UP(start + size);
+
+ if (end_pfn > max_pfn) {
+ max_pfn = end_pfn;
+ max_low_pfn = end_pfn;
+ high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
+ }
+}
+
+int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
+ struct mhp_params *params)
+{
+ int ret;
+
+ ret = __add_pages(nid, start_pfn, nr_pages, params);
+ if (ret)
+ return ret;
+
+ /* update max_pfn, max_low_pfn and high_memory */
+ update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
+ nr_pages << PAGE_SHIFT);
+
+ return ret;
+}
+
+int __ref arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_params *params)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int rc;
+
+ rc = arch_create_linear_mapping(nid, start, size, params);
+ if (rc)
+ return rc;
+ rc = add_pages(nid, start_pfn, nr_pages, params);
+ if (rc)
+ arch_remove_linear_mapping(start, size);
+ return rc;
+}
+
+void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
- if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
- pr_warn("Hash collision while resizing HPT\n");
+ __remove_pages(start_pfn, nr_pages, altmap);
+ arch_remove_linear_mapping(start, size);
}
#endif
-#ifndef CONFIG_NEED_MULTIPLE_NODES
+#ifndef CONFIG_NUMA
void __init mem_topology_setup(void)
{
max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
@@ -190,26 +181,25 @@ void __init mem_topology_setup(void)
void __init initmem_init(void)
{
- /* XXX need to clip this if using highmem? */
- sparse_memory_present_with_active_regions(0);
sparse_init();
}
/* mark pages that don't exist as nosave */
static int __init mark_nonram_nosave(void)
{
- struct memblock_region *reg, *prev = NULL;
-
- for_each_memblock(memory, reg) {
- if (prev &&
- memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
- register_nosave_region(memblock_region_memory_end_pfn(prev),
- memblock_region_memory_base_pfn(reg));
- prev = reg;
+ unsigned long spfn, epfn, prev = 0;
+ int i;
+
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
+ if (prev && prev < spfn)
+ register_nosave_region(prev, spfn);
+
+ prev = epfn;
}
+
return 0;
}
-#else /* CONFIG_NEED_MULTIPLE_NODES */
+#else /* CONFIG_NUMA */
static int __init mark_nonram_nosave(void)
{
return 0;
@@ -247,9 +237,6 @@ void __init paging_init(void)
map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
-
- kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
- kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */
printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
@@ -275,7 +262,7 @@ void __init paging_init(void)
max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
#endif
- free_area_init_nodes(max_zone_pfns);
+ free_area_init(max_zone_pfns);
mark_nonram_nosave();
}
@@ -297,7 +284,7 @@ void __init mem_init(void)
* back to to-down.
*/
memblock_set_bottom_up(true);
- swiotlb_init(0);
+ swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
#endif
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
@@ -315,13 +302,13 @@ void __init mem_init(void)
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
struct page *page = pfn_to_page(pfn);
- if (!memblock_is_reserved(paddr))
+ if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr))
free_highmem_page(page);
}
}
#endif /* CONFIG_HIGHMEM */
-#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
+#if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP)
/*
* If smp is enabled, next_tlbcam_idx is initialized in the cpu up
* functions.... do it here for the non-smp case.
@@ -330,7 +317,6 @@ void __init mem_init(void)
(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif
- mem_init_print_info(NULL);
#ifdef CONFIG_PPC32
pr_info("Kernel virtual memory layout:\n");
#ifdef CONFIG_KASAN
@@ -347,6 +333,10 @@ void __init mem_init(void)
ioremap_bot, IOREMAP_TOP);
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
VMALLOC_START, VMALLOC_END);
+#ifdef MODULES_VADDR
+ pr_info(" * 0x%08lx..0x%08lx : modules\n",
+ MODULES_VADDR, MODULES_END);
+#endif
#endif /* CONFIG_PPC32 */
}
@@ -354,265 +344,35 @@ void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
mark_initmem_nx();
- init_mem_is_free = true;
+ static_branch_enable(&init_mem_is_free);
free_initmem_default(POISON_FREE_INITMEM);
+ ftrace_free_init_tramp();
}
-/**
- * flush_coherent_icache() - if a CPU has a coherent icache, flush it
- * @addr: The base address to use (can be any valid address, the whole cache will be flushed)
- * Return true if the cache was flushed, false otherwise
- */
-static inline bool flush_coherent_icache(unsigned long addr)
-{
- /*
- * For a snooping icache, we still need a dummy icbi to purge all the
- * prefetched instructions from the ifetch buffers. We also need a sync
- * before the icbi to order the the actual stores to memory that might
- * have modified instructions with the icbi.
- */
- if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
- mb(); /* sync */
- allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
- icbi((void *)addr);
- prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
- mb(); /* sync */
- isync();
- return true;
- }
-
- return false;
-}
-
-/**
- * invalidate_icache_range() - Flush the icache by issuing icbi across an address range
- * @start: the start address
- * @stop: the stop address (exclusive)
- */
-static void invalidate_icache_range(unsigned long start, unsigned long stop)
-{
- unsigned long shift = l1_icache_shift();
- unsigned long bytes = l1_icache_bytes();
- char *addr = (char *)(start & ~(bytes - 1));
- unsigned long size = stop - (unsigned long)addr + (bytes - 1);
- unsigned long i;
-
- for (i = 0; i < size >> shift; i++, addr += bytes)
- icbi(addr);
-
- mb(); /* sync */
- isync();
-}
-
-/**
- * flush_icache_range: Write any modified data cache blocks out to memory
- * and invalidate the corresponding blocks in the instruction cache
- *
- * Generic code will call this after writing memory, before executing from it.
- *
- * @start: the start address
- * @stop: the stop address (exclusive)
- */
-void flush_icache_range(unsigned long start, unsigned long stop)
-{
- if (flush_coherent_icache(start))
- return;
-
- clean_dcache_range(start, stop);
-
- if (IS_ENABLED(CONFIG_44x)) {
- /*
- * Flash invalidate on 44x because we are passed kmapped
- * addresses and this doesn't work for userspace pages due to
- * the virtually tagged icache.
- */
- iccci((void *)start);
- mb(); /* sync */
- isync();
- } else
- invalidate_icache_range(start, stop);
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-#if !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
-/**
- * flush_dcache_icache_phys() - Flush a page by it's physical address
- * @physaddr: the physical address of the page
- */
-static void flush_dcache_icache_phys(unsigned long physaddr)
-{
- unsigned long bytes = l1_dcache_bytes();
- unsigned long nb = PAGE_SIZE / bytes;
- unsigned long addr = physaddr & PAGE_MASK;
- unsigned long msr, msr0;
- unsigned long loop1 = addr, loop2 = addr;
-
- msr0 = mfmsr();
- msr = msr0 & ~MSR_DR;
- /*
- * This must remain as ASM to prevent potential memory accesses
- * while the data MMU is disabled
- */
- asm volatile(
- " mtctr %2;\n"
- " mtmsr %3;\n"
- " isync;\n"
- "0: dcbst 0, %0;\n"
- " addi %0, %0, %4;\n"
- " bdnz 0b;\n"
- " sync;\n"
- " mtctr %2;\n"
- "1: icbi 0, %1;\n"
- " addi %1, %1, %4;\n"
- " bdnz 1b;\n"
- " sync;\n"
- " mtmsr %5;\n"
- " isync;\n"
- : "+&r" (loop1), "+&r" (loop2)
- : "r" (nb), "r" (msr), "i" (bytes), "r" (msr0)
- : "ctr", "memory");
-}
-#endif // !defined(CONFIG_PPC_8xx) && !defined(CONFIG_PPC64)
-
-/*
- * This is called when a page has been modified by the kernel.
- * It just marks the page as not i-cache clean. We do the i-cache
- * flush later when the page is given to a user process, if necessary.
- */
-void flush_dcache_page(struct page *page)
-{
- if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- return;
- /* avoid an atomic op if possible */
- if (test_bit(PG_arch_1, &page->flags))
- clear_bit(PG_arch_1, &page->flags);
-}
-EXPORT_SYMBOL(flush_dcache_page);
-
-void flush_dcache_icache_page(struct page *page)
-{
-#ifdef CONFIG_HUGETLB_PAGE
- if (PageCompound(page)) {
- flush_dcache_icache_hugepage(page);
- return;
- }
-#endif
-#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
- /* On 8xx there is no need to kmap since highmem is not supported */
- __flush_dcache_icache(page_address(page));
-#else
- if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
- void *start = kmap_atomic(page);
- __flush_dcache_icache(start);
- kunmap_atomic(start);
- } else {
- unsigned long addr = page_to_pfn(page) << PAGE_SHIFT;
-
- if (flush_coherent_icache(addr))
- return;
- flush_dcache_icache_phys(addr);
- }
-#endif
-}
-EXPORT_SYMBOL(flush_dcache_icache_page);
-
-/**
- * __flush_dcache_icache(): Flush a particular page from the data cache to RAM.
- * Note: this is necessary because the instruction cache does *not*
- * snoop from the data cache.
- *
- * @page: the address of the page to flush
- */
-void __flush_dcache_icache(void *p)
-{
- unsigned long addr = (unsigned long)p;
-
- if (flush_coherent_icache(addr))
- return;
-
- clean_dcache_range(addr, addr + PAGE_SIZE);
-
- /*
- * We don't flush the icache on 44x. Those have a virtual icache and we
- * don't have access to the virtual address here (it's not the page
- * vaddr but where it's mapped in user space). The flushing of the
- * icache on these is handled elsewhere, when a change in the address
- * space occurs, before returning to user space.
- */
-
- if (cpu_has_feature(MMU_FTR_TYPE_44x))
- return;
-
- invalidate_icache_range(addr, addr + PAGE_SIZE);
-}
-
-void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
-{
- clear_page(page);
-
- /*
- * We shouldn't have to do this, but some versions of glibc
- * require it (ld.so assumes zero filled pages are icache clean)
- * - Anton
- */
- flush_dcache_page(pg);
-}
-EXPORT_SYMBOL(clear_user_page);
-
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg)
-{
- copy_page(vto, vfrom);
-
- /*
- * We should be able to use the following optimisation, however
- * there are two problems.
- * Firstly a bug in some versions of binutils meant PLT sections
- * were not marked executable.
- * Secondly the first word in the GOT section is blrl, used
- * to establish the GOT address. Until recently the GOT was
- * not marked executable.
- * - Anton
- */
-#if 0
- if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
- return;
-#endif
-
- flush_dcache_page(pg);
-}
-
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
- unsigned long addr, int len)
-{
- unsigned long maddr;
-
- maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
- flush_icache_range(maddr, maddr + len);
- kunmap(page);
-}
-EXPORT_SYMBOL(flush_icache_user_range);
-
/*
* System memory should not be in /proc/iomem but various tools expect it
* (eg kdump).
*/
static int __init add_system_ram_resources(void)
{
- struct memblock_region *reg;
+ phys_addr_t start, end;
+ u64 i;
- for_each_memblock(memory, reg) {
+ for_each_mem_range(i, &start, &end) {
struct resource *res;
- unsigned long base = reg->base;
- unsigned long size = reg->size;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
WARN_ON(!res);
if (res) {
res->name = "System RAM";
- res->start = base;
- res->end = base + size - 1;
+ res->start = start;
+ /*
+ * In memblock, end points to the first byte after
+ * the range while in resourses, end points to the
+ * last byte in the range.
+ */
+ res->end = end - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
WARN_ON(request_resource(&iomem_resource, res) < 0);
}
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
deleted file mode 100644
index ae683fdc716c..000000000000
--- a/arch/powerpc/mm/mmap.c
+++ /dev/null
@@ -1,228 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * flexible mmap layout support
- *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
- * All Rights Reserved.
- *
- * Started by Ingo Molnar <mingo@elte.hu>
- */
-
-#include <linux/personality.h>
-#include <linux/mm.h>
-#include <linux/random.h>
-#include <linux/sched/signal.h>
-#include <linux/sched/mm.h>
-#include <linux/elf-randomize.h>
-#include <linux/security.h>
-#include <linux/mman.h>
-
-/*
- * Top of mmap area (just below the process stack).
- *
- * Leave at least a ~128 MB hole.
- */
-#define MIN_GAP (128*1024*1024)
-#define MAX_GAP (TASK_SIZE/6*5)
-
-static inline int mmap_is_legacy(struct rlimit *rlim_stack)
-{
- if (current->personality & ADDR_COMPAT_LAYOUT)
- return 1;
-
- if (rlim_stack->rlim_cur == RLIM_INFINITY)
- return 1;
-
- return sysctl_legacy_va_layout;
-}
-
-unsigned long arch_mmap_rnd(void)
-{
- unsigned long shift, rnd;
-
- shift = mmap_rnd_bits;
-#ifdef CONFIG_COMPAT
- if (is_32bit_task())
- shift = mmap_rnd_compat_bits;
-#endif
- rnd = get_random_long() % (1ul << shift);
-
- return rnd << PAGE_SHIFT;
-}
-
-static inline unsigned long stack_maxrandom_size(void)
-{
- if (!(current->flags & PF_RANDOMIZE))
- return 0;
-
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- return (1<<23);
- else
- return (1<<30);
-}
-
-static inline unsigned long mmap_base(unsigned long rnd,
- struct rlimit *rlim_stack)
-{
- unsigned long gap = rlim_stack->rlim_cur;
- unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
-
- /* Values close to RLIM_INFINITY can overflow. */
- if (gap + pad > gap)
- gap += pad;
-
- if (gap < MIN_GAP)
- gap = MIN_GAP;
- else if (gap > MAX_GAP)
- gap = MAX_GAP;
-
- return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
-}
-
-#ifdef CONFIG_PPC_RADIX_MMU
-/*
- * Same function as generic code used only for radix, because we don't need to overload
- * the generic one. But we will have to duplicate, because hash select
- * HAVE_ARCH_UNMAPPED_AREA
- */
-static unsigned long
-radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
- unsigned long len, unsigned long pgoff,
- unsigned long flags)
-{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- int fixed = (flags & MAP_FIXED);
- unsigned long high_limit;
- struct vm_unmapped_area_info info;
-
- high_limit = DEFAULT_MAP_WINDOW;
- if (addr >= high_limit || (fixed && (addr + len > high_limit)))
- high_limit = TASK_SIZE;
-
- if (len > high_limit)
- return -ENOMEM;
-
- if (fixed) {
- if (addr > high_limit - len)
- return -ENOMEM;
- return addr;
- }
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (high_limit - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
- }
-
- info.flags = 0;
- info.length = len;
- info.low_limit = mm->mmap_base;
- info.high_limit = high_limit;
- info.align_mask = 0;
-
- return vm_unmapped_area(&info);
-}
-
-static unsigned long
-radix__arch_get_unmapped_area_topdown(struct file *filp,
- const unsigned long addr0,
- const unsigned long len,
- const unsigned long pgoff,
- const unsigned long flags)
-{
- struct vm_area_struct *vma;
- struct mm_struct *mm = current->mm;
- unsigned long addr = addr0;
- int fixed = (flags & MAP_FIXED);
- unsigned long high_limit;
- struct vm_unmapped_area_info info;
-
- high_limit = DEFAULT_MAP_WINDOW;
- if (addr >= high_limit || (fixed && (addr + len > high_limit)))
- high_limit = TASK_SIZE;
-
- if (len > high_limit)
- return -ENOMEM;
-
- if (fixed) {
- if (addr > high_limit - len)
- return -ENOMEM;
- return addr;
- }
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (high_limit - len >= addr && addr >= mmap_min_addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
- return addr;
- }
-
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.low_limit = max(PAGE_SIZE, mmap_min_addr);
- info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
- info.align_mask = 0;
-
- addr = vm_unmapped_area(&info);
- if (!(addr & ~PAGE_MASK))
- return addr;
- VM_BUG_ON(addr != -ENOMEM);
-
- /*
- * A failed mmap() very likely causes application failure,
- * so fall back to the bottom-up function here. This scenario
- * can happen with large stack limits and large mmap()
- * allocations.
- */
- return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
-}
-
-static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
- unsigned long random_factor,
- struct rlimit *rlim_stack)
-{
- if (mmap_is_legacy(rlim_stack)) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = radix__arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base(random_factor, rlim_stack);
- mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
- }
-}
-#else
-/* dummy */
-extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
- unsigned long random_factor,
- struct rlimit *rlim_stack);
-#endif
-/*
- * This function, called very early during the creation of a new
- * process VM image, sets up which VM layout function to use:
- */
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
-{
- unsigned long random_factor = 0UL;
-
- if (current->flags & PF_RANDOMIZE)
- random_factor = arch_mmap_rnd();
-
- if (radix_enabled())
- return radix__arch_pick_mmap_layout(mm, random_factor,
- rlim_stack);
- /*
- * Fall back to the standard layout if the personality
- * bit is set, or if the expected stack growth is unlimited:
- */
- if (mmap_is_legacy(rlim_stack)) {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- } else {
- mm->mmap_base = mmap_base(random_factor, rlim_stack);
- mm->get_unmapped_area = arch_get_unmapped_area_topdown;
- }
-}
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
index 18f20da0d348..1fb9c99f8679 100644
--- a/arch/powerpc/mm/mmu_context.c
+++ b/arch/powerpc/mm/mmu_context.c
@@ -18,6 +18,12 @@ static inline void switch_mm_pgdir(struct task_struct *tsk,
{
/* 32-bit keeps track of the current PGDIR in the thread struct */
tsk->thread.pgdir = mm->pgd;
+#ifdef CONFIG_PPC_BOOK3S_32
+ tsk->thread.sr0 = mm->context.sr0;
+#endif
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ tsk->thread.pid = mm->context.id;
+#endif
}
#elif defined(CONFIG_PPC_BOOK3E_64)
static inline void switch_mm_pgdir(struct task_struct *tsk,
@@ -25,6 +31,9 @@ static inline void switch_mm_pgdir(struct task_struct *tsk,
{
/* 64-bit Book3E keeps track of current PGD in the PACA */
get_paca()->pgd = mm->pgd;
+#ifdef CONFIG_PPC_KUAP
+ tsk->thread.pid = mm->context.id;
+#endif
}
#else
static inline void switch_mm_pgdir(struct task_struct *tsk,
@@ -43,24 +52,26 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
/*
* This full barrier orders the store to the cpumask above vs
- * a subsequent operation which allows this CPU to begin loading
- * translations for next.
+ * a subsequent load which allows this CPU/MMU to begin loading
+ * translations for 'next' from page table PTEs into the TLB.
*
- * When using the radix MMU that operation is the load of the
+ * When using the radix MMU, that operation is the load of the
* MMU context id, which is then moved to SPRN_PID.
*
* For the hash MMU it is either the first load from slb_cache
- * in switch_slb(), and/or the store of paca->mm_ctx_id in
- * copy_mm_to_paca().
+ * in switch_slb() to preload the SLBs, or the load of
+ * get_user_context which loads the context for the VSID hash
+ * to insert a new SLB, in the SLB fault handler.
*
* On the other side, the barrier is in mm/tlb-radix.c for
- * radix which orders earlier stores to clear the PTEs vs
- * the load of mm_cpumask. And pte_xchg which does the same
- * thing for hash.
+ * radix which orders earlier stores to clear the PTEs before
+ * the load of mm_cpumask to check which CPU TLBs should be
+ * flushed. For hash, pte_xchg to clear the PTE includes the
+ * barrier.
*
- * This full barrier is needed by membarrier when switching
- * between processes after store to rq->curr, before user-space
- * memory accesses.
+ * This full barrier is also needed by membarrier when
+ * switching between processes after store to rq->curr, before
+ * user-space memory accesses.
*/
smp_mb();
@@ -79,11 +90,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* context
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- asm volatile ("dssall");
+ asm volatile (PPC_DSSALL);
- if (new_on_cpu)
- radix_kvm_prefetch_workaround(next);
- else
+ if (!new_on_cpu)
membarrier_arch_switch_mm(prev, next, tsk);
/*
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 7097e07a209a..bd9784f77f2e 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -38,7 +38,7 @@ static inline void _tlbil_pid(unsigned int pid)
#else /* CONFIG_40x || CONFIG_PPC_8xx */
extern void _tlbil_all(void);
extern void _tlbil_pid(unsigned int pid);
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
extern void _tlbil_pid_noind(unsigned int pid);
#else
#define _tlbil_pid_noind(pid) _tlbil_pid(pid)
@@ -55,7 +55,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
trace_tlbie(0, 0, address, pid, 0, 0, 0);
}
-#elif defined(CONFIG_PPC_BOOK3E)
+#elif defined(CONFIG_PPC_BOOK3E_64)
extern void _tlbil_va(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind);
#else
@@ -67,7 +67,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
}
#endif /* CONFIG_PPC_8xx */
-#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
+#if defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_47x)
extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind);
#else
@@ -82,45 +82,26 @@ static inline void print_system_hash_info(void) {}
#else /* CONFIG_PPC_MMU_NOHASH */
-extern void _tlbie(unsigned long address);
-extern void _tlbia(void);
-
void print_system_hash_info(void);
#endif /* CONFIG_PPC_MMU_NOHASH */
#ifdef CONFIG_PPC32
-void hash_preload(struct mm_struct *mm, unsigned long ea);
-
extern void mapin_ram(void);
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
unsigned int size, pgprot_t prot);
-extern int __map_without_bats;
-extern unsigned int rtas_data, rtas_size;
-
-struct hash_pte;
-extern struct hash_pte *Hash;
extern u8 early_hash[];
#endif /* CONFIG_PPC32 */
extern unsigned long __max_low_memory;
-extern phys_addr_t __initial_memory_limit_addr;
extern phys_addr_t total_memory;
extern phys_addr_t total_lowmem;
extern phys_addr_t memstart_addr;
extern phys_addr_t lowmem_end_addr;
-#ifdef CONFIG_WII
-extern unsigned long wii_hole_start;
-extern unsigned long wii_hole_size;
-
-extern unsigned long wii_mmu_mapin_mem2(unsigned long top);
-extern void wii_memory_fixups(void);
-#endif
-
/* ...and now those things that may be slightly different between processor
* architectures. -- Dan
*/
@@ -130,11 +111,9 @@ void MMU_init_hw_patch(void);
unsigned long mmu_mapin_ram(unsigned long base, unsigned long top);
#endif
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx,
- bool dryrun);
-extern unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
- phys_addr_t phys);
+ bool dryrun, bool init);
#ifdef CONFIG_PPC32
extern void adjust_total_lowmem(void);
extern int switch_to_as1(void);
@@ -161,11 +140,15 @@ struct tlbcam {
u32 MAS3;
u32 MAS7;
};
+
+#define NUM_TLBCAMS 64
+
+extern struct tlbcam TLBCAM[NUM_TLBCAMS];
#endif
-#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_FSL_BOOKE) || defined(CONFIG_PPC_8xx)
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_85xx) || defined(CONFIG_PPC_8xx)
/* 6xx have BATS */
-/* FSL_BOOKE have TLBCAM */
+/* PPC_85xx have TLBCAM */
/* 8xx have LTLB */
phys_addr_t v_block_mapped(unsigned long va);
unsigned long p_block_mapped(phys_addr_t pa);
@@ -174,7 +157,7 @@ static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; }
static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; }
#endif
-#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx)
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500)
void mmu_mark_initmem_nx(void);
void mmu_mark_rodata_ro(void);
#else
@@ -182,8 +165,17 @@ static inline void mmu_mark_initmem_nx(void) { }
static inline void mmu_mark_rodata_ro(void) { }
#endif
-#ifdef CONFIG_PPC_DEBUG_WX
+#ifdef CONFIG_PPC_8xx
+void __init mmu_mapin_immr(void);
+#endif
+
+#ifdef CONFIG_DEBUG_WX
void ptdump_check_wx(void);
#else
static inline void ptdump_check_wx(void) { }
#endif
+
+static inline bool debug_pagealloc_enabled_or_kfence(void)
+{
+ return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled();
+}
diff --git a/arch/powerpc/mm/nohash/40x.c b/arch/powerpc/mm/nohash/40x.c
index f348104eb461..3684d6e570fb 100644
--- a/arch/powerpc/mm/nohash/40x.c
+++ b/arch/powerpc/mm/nohash/40x.c
@@ -32,11 +32,8 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
-#include <asm/pgalloc.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
@@ -46,7 +43,6 @@
#include <mm/mmu_decl.h>
-extern int __map_without_ltlbs;
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
@@ -97,14 +93,20 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
p = 0;
s = total_lowmem;
- if (__map_without_ltlbs)
+ if (IS_ENABLED(CONFIG_KFENCE))
+ return 0;
+
+ if (debug_pagealloc_enabled())
+ return 0;
+
+ if (strict_kernel_rwx_enabled())
return 0;
while (s >= LARGE_PAGE_SIZE_16M) {
pmd_t *pmdp;
- unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE;
+ unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_RW;
- pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
+ pmdp = pmd_off_k(v);
*pmdp++ = __pmd(val);
*pmdp++ = __pmd(val);
*pmdp++ = __pmd(val);
@@ -117,9 +119,9 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
while (s >= LARGE_PAGE_SIZE_4M) {
pmd_t *pmdp;
- unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE;
+ unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_RW;
- pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
+ pmdp = pmd_off_k(v);
*pmdp = __pmd(val);
v += LARGE_PAGE_SIZE_4M;
diff --git a/arch/powerpc/mm/nohash/44x.c b/arch/powerpc/mm/nohash/44x.c
index 3d6ae7c72412..1beae802bb1c 100644
--- a/arch/powerpc/mm/nohash/44x.c
+++ b/arch/powerpc/mm/nohash/44x.c
@@ -25,6 +25,7 @@
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/code-patching.h>
+#include <asm/smp.h>
#include <mm/mmu_decl.h>
@@ -37,7 +38,7 @@ int icache_44x_need_flush;
unsigned long tlb_47x_boltmap[1024/8];
-static void ppc44x_update_tlb_hwater(void)
+static void __init ppc44x_update_tlb_hwater(void)
{
/* The TLB miss handlers hard codes the watermark in a cmpli
* instruction to improve performances rather than loading it
@@ -121,7 +122,7 @@ static void __init ppc47x_update_boltmap(void)
/*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
*/
-static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
+static void __init ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
{
unsigned int rA;
int bolted;
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 3189308dece4..dbbfe897455d 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -8,16 +8,12 @@
*/
#include <linux/memblock.h>
-#include <linux/mmu_context.h>
-#include <asm/fixmap.h>
-#include <asm/code-patching.h>
+#include <linux/hugetlb.h>
#include <mm/mmu_decl.h>
#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
-extern int __map_without_ltlbs;
-
static unsigned long block_mapped_ram;
/*
@@ -30,8 +26,6 @@ phys_addr_t v_block_mapped(unsigned long va)
if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
return p + va - VIRT_IMMR_BASE;
- if (__map_without_ltlbs)
- return 0;
if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
return __pa(va);
return 0;
@@ -47,162 +41,149 @@ unsigned long p_block_mapped(phys_addr_t pa)
if (pa >= p && pa < p + IMMR_SIZE)
return VIRT_IMMR_BASE + pa - p;
- if (__map_without_ltlbs)
- return 0;
if (pa < block_mapped_ram)
return (unsigned long)__va(pa);
return 0;
}
-#define LARGE_PAGE_SIZE_8M (1<<23)
-
-/*
- * MMU_init_hw does the chip-specific initialization of the MMU hardware.
- */
-void __init MMU_init_hw(void)
+static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
{
- /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
- if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) {
- unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
- unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY;
- int i = IS_ENABLED(CONFIG_PIN_TLB_IMMR) ? 29 : 28;
- unsigned long addr = 0;
- unsigned long mem = total_lowmem;
-
- for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
- mtspr(SPRN_MD_CTR, ctr | (i << 8));
- mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
- mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
- mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
- addr += LARGE_PAGE_SIZE_8M;
- mem -= LARGE_PAGE_SIZE_8M;
- }
+ if (hpd_val(*pmdp) == 0) {
+ pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
+
+ if (!ptep)
+ return NULL;
+
+ hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
+ hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
}
+ return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
}
-static void __init mmu_mapin_immr(void)
+static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
+ pgprot_t prot, int psize, bool new)
{
- unsigned long p = PHYS_IMMR_BASE;
- unsigned long v = VIRT_IMMR_BASE;
- int offset;
+ pmd_t *pmdp = pmd_off_k(va);
+ pte_t *ptep;
+
+ if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
+ return -EINVAL;
+
+ if (new) {
+ if (WARN_ON(slab_is_available()))
+ return -EINVAL;
+
+ if (psize == MMU_PAGE_512K)
+ ptep = early_pte_alloc_kernel(pmdp, va);
+ else
+ ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
+ } else {
+ if (psize == MMU_PAGE_512K)
+ ptep = pte_offset_kernel(pmdp, va);
+ else
+ ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
+ }
+
+ if (WARN_ON(!ptep))
+ return -ENOMEM;
+
+ /* The PTE should never be already present */
+ if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
+ return -EINVAL;
- for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
- map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
+ set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
+
+ return 0;
}
-static void mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
+/*
+ * MMU_init_hw does the chip-specific initialization of the MMU hardware.
+ */
+void __init MMU_init_hw(void)
{
- modify_instruction_site(site, 0xffff, (unsigned long)__va(mapped) >> 16);
}
-static void mmu_patch_addis(s32 *site, long simm)
+static bool immr_is_mapped __initdata;
+
+void __init mmu_mapin_immr(void)
{
- unsigned int instr = *(unsigned int *)patch_site_addr(site);
+ if (immr_is_mapped)
+ return;
- instr &= 0xffff0000;
- instr |= ((unsigned long)simm) >> 16;
- patch_instruction_site(site, instr);
+ immr_is_mapped = true;
+
+ __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
+ PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
}
-static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, pgprot_t prot)
+static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
+ pgprot_t prot, bool new)
{
- unsigned long s = offset;
- unsigned long v = PAGE_OFFSET + s;
- phys_addr_t p = memstart_addr + s;
-
- for (; s < top; s += PAGE_SIZE) {
- map_kernel_page(v, p, prot);
- v += PAGE_SIZE;
- p += PAGE_SIZE;
- }
+ unsigned long v = PAGE_OFFSET + offset;
+ unsigned long p = offset;
+
+ WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
+
+ for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+ for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
+ for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
+ __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
+
+ if (!new)
+ flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
}
unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
{
- unsigned long mapped;
-
- if (__map_without_ltlbs) {
- mapped = 0;
- mmu_mapin_immr();
- if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR))
- patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP);
- if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
- mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
+ unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+ unsigned long sinittext = __pa(_sinittext);
+ bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled_or_kfence();
+ unsigned long boundary = strict_boundary ? sinittext : etext8;
+ unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
+ WARN_ON(top < einittext8);
+
+ mmu_mapin_immr();
+
+ mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
+ if (debug_pagealloc_enabled_or_kfence()) {
+ top = boundary;
} else {
- unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
-
- mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
- if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT))
- mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, einittext8);
-
- /*
- * Populate page tables to:
- * - have them appear in /sys/kernel/debug/kernel_page_tables
- * - allow the BDI to find the pages when they are not PINNED
- */
- mmu_mapin_ram_chunk(0, einittext8, PAGE_KERNEL_X);
- mmu_mapin_ram_chunk(einittext8, mapped, PAGE_KERNEL);
- mmu_mapin_immr();
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
+ mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
}
- mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
- mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
-
- /* If the size of RAM is not an exact power of two, we may not
- * have covered RAM in its entirety with 8 MiB
- * pages. Consequently, restrict the top end of RAM currently
- * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
- * coverage with normal-sized pages (or other reasons) do not
- * attempt to allocate outside the allowed range.
- */
- if (mapped)
- memblock_set_current_limit(mapped);
+ if (top > SZ_32M)
+ memblock_set_current_limit(top);
- block_mapped_ram = mapped;
+ block_mapped_ram = top;
- return mapped;
+ return top;
}
void mmu_mark_initmem_nx(void)
{
- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && CONFIG_ETEXT_SHIFT < 23)
- mmu_patch_addis(&patch__itlbmiss_linmem_top8,
- -((long)_etext & ~(LARGE_PAGE_SIZE_8M - 1)));
- if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT)) {
- unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
- unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
- unsigned long etext = __pa(_etext);
-
- mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, __pa(_etext));
-
- /* Update page tables for PTDUMP and BDI */
- mmu_mapin_ram_chunk(0, einittext8, __pgprot(0));
- if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
- mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_TEXT);
- mmu_mapin_ram_chunk(etext, einittext8, PAGE_KERNEL);
- } else {
- mmu_mapin_ram_chunk(0, etext8, PAGE_KERNEL_TEXT);
- mmu_mapin_ram_chunk(etext8, einittext8, PAGE_KERNEL);
- }
- }
+ unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
+ unsigned long sinittext = __pa(_sinittext);
+ unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
+ unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
+
+ if (!debug_pagealloc_enabled_or_kfence())
+ mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
+
+ mmu_pin_tlb(block_mapped_ram, false);
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mmu_mark_rodata_ro(void)
{
unsigned long sinittext = __pa(_sinittext);
- unsigned long etext = __pa(_etext);
-
- if (CONFIG_DATA_SHIFT < 23)
- mmu_patch_addis(&patch__dtlbmiss_romem_top8,
- -__pa(((unsigned long)_sinittext) &
- ~(LARGE_PAGE_SIZE_8M - 1)));
- mmu_patch_addis(&patch__dtlbmiss_romem_top, -__pa(_sinittext));
-
- /* Update page tables for PTDUMP and BDI */
- mmu_mapin_ram_chunk(0, sinittext, __pgprot(0));
- mmu_mapin_ram_chunk(0, etext, PAGE_KERNEL_ROX);
- mmu_mapin_ram_chunk(etext, sinittext, PAGE_KERNEL_RO);
+
+ mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
+ if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
+ mmu_pin_tlb(block_mapped_ram, true);
}
#endif
@@ -215,67 +196,15 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
BUG_ON(first_memblock_base != 0);
/* 8xx can only access 32MB at the moment */
- memblock_set_current_limit(min_t(u64, first_memblock_size, 0x02000000));
-}
-
-/*
- * Set up to use a given MMU context.
- * id is context number, pgd is PGD pointer.
- *
- * We place the physical address of the new task page directory loaded
- * into the MMU base register, and set the ASID compare register with
- * the new "context."
- */
-void set_context(unsigned long id, pgd_t *pgd)
-{
- s16 offset = (s16)(__pa(swapper_pg_dir));
-
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is passed as second argument.
- */
- if (IS_ENABLED(CONFIG_BDI_SWITCH))
- abatron_pteptrs[1] = pgd;
-
- /* Register M_TWB will contain base address of level 1 table minus the
- * lower part of the kernel PGDIR base address, so that all accesses to
- * level 1 table are done relative to lower part of kernel PGDIR base
- * address.
- */
- mtspr(SPRN_M_TWB, __pa(pgd) - offset);
-
- /* Update context */
- mtspr(SPRN_M_CASID, id - 1);
- /* sync */
- mb();
+ memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
}
-void flush_instruction_cache(void)
+int pud_clear_huge(pud_t *pud)
{
- isync();
- mtspr(SPRN_IC_CST, IDC_INVALL);
- isync();
+ return 0;
}
-#ifdef CONFIG_PPC_KUEP
-void __init setup_kuep(bool disabled)
+int pmd_clear_huge(pmd_t *pmd)
{
- if (disabled)
- return;
-
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-
- mtspr(SPRN_MI_AP, MI_APG_KUEP);
+ return 0;
}
-#endif
-
-#ifdef CONFIG_PPC_KUAP
-void __init setup_kuap(bool disabled)
-{
- pr_info("Activating Kernel Userspace Access Protection\n");
-
- if (disabled)
- pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
-
- mtspr(SPRN_MD_AP, MD_APG_KUAP);
-}
-#endif
diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile
index 0424f6ce5bd8..f3894e79d5f7 100644
--- a/arch/powerpc/mm/nohash/Makefile
+++ b/arch/powerpc/mm/nohash/Makefile
@@ -2,18 +2,18 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y += mmu_context.o tlb.o tlb_low.o
+obj-y += mmu_context.o tlb.o tlb_low.o kup.o
obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o
obj-$(CONFIG_40x) += 40x.o
obj-$(CONFIG_44x) += 44x.o
obj-$(CONFIG_PPC_8xx) += 8xx.o
-obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke.o
+obj-$(CONFIG_PPC_E500) += e500.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_booke.o
ifdef CONFIG_HUGETLB_PAGE
-obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o
+obj-$(CONFIG_PPC_E500) += e500_hugetlbpage.o
endif
# Disable kcov instrumentation on sensitive code
# This is necessary for booting with kcov enabled on book3e machines
KCOV_INSTRUMENT_tlb.o := n
-KCOV_INSTRUMENT_fsl_booke.o := n
+KCOV_INSTRUMENT_e500.o := n
diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c
index 4637fdd469cf..b80fc4a91a53 100644
--- a/arch/powerpc/mm/nohash/book3e_pgtable.c
+++ b/arch/powerpc/mm/nohash/book3e_pgtable.c
@@ -10,6 +10,7 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/dma.h>
+#include <asm/code-patching.h>
#include <mm/mmu_decl.h>
@@ -73,6 +74,7 @@ static void __init *early_alloc_pgtable(unsigned long size)
int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -80,7 +82,8 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE);
if (slab_is_available()) {
pgdp = pgd_offset_k(ea);
- pudp = pud_alloc(&init_mm, pgdp, ea);
+ p4dp = p4d_offset(pgdp, ea);
+ pudp = pud_alloc(&init_mm, p4dp, ea);
if (!pudp)
return -ENOMEM;
pmdp = pmd_alloc(&init_mm, pudp, ea);
@@ -91,20 +94,19 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
return -ENOMEM;
} else {
pgdp = pgd_offset_k(ea);
-#ifndef __PAGETABLE_PUD_FOLDED
- if (pgd_none(*pgdp)) {
+ p4dp = p4d_offset(pgdp, ea);
+ if (p4d_none(*p4dp)) {
pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
- pgd_populate(&init_mm, pgdp, pudp);
+ p4d_populate(&init_mm, p4dp, pudp);
}
-#endif /* !__PAGETABLE_PUD_FOLDED */
- pudp = pud_offset(pgdp, ea);
+ pudp = pud_offset(p4dp, ea);
if (pud_none(*pudp)) {
pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
pud_populate(&init_mm, pudp, pmdp);
}
pmdp = pmd_offset(pudp, ea);
if (!pmd_present(*pmdp)) {
- ptep = early_alloc_pgtable(PAGE_SIZE);
+ ptep = early_alloc_pgtable(PTE_TABLE_SIZE);
pmd_populate_kernel(&init_mm, pmdp, ptep);
}
ptep = pte_offset_kernel(pmdp, ea);
@@ -114,3 +116,17 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
smp_wmb();
return 0;
}
+
+void __patch_exception(int exc, unsigned long addr)
+{
+ unsigned int *ibase = &interrupt_base_book3e;
+
+ /*
+ * Our exceptions vectors start with a NOP and -then- a branch
+ * to deal with single stepping from userspace which stops on
+ * the second instruction. Thus we need to patch the second
+ * instruction of the exception, not the first one.
+ */
+
+ patch_branch(ibase + (exc / 4) + 1, addr, 0);
+}
diff --git a/arch/powerpc/mm/nohash/fsl_booke.c b/arch/powerpc/mm/nohash/e500.c
index b4eb06ceb189..40a4e69ae1a9 100644
--- a/arch/powerpc/mm/nohash/fsl_booke.c
+++ b/arch/powerpc/mm/nohash/e500.c
@@ -36,12 +36,10 @@
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/memblock.h>
+#include <linux/of_fdt.h>
-#include <asm/pgalloc.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <linux/uaccess.h>
#include <asm/smp.h>
@@ -53,21 +51,15 @@
unsigned int tlbcam_index;
-#define NUM_TLBCAMS (64)
struct tlbcam TLBCAM[NUM_TLBCAMS];
-struct tlbcamrange {
+static struct {
unsigned long start;
unsigned long limit;
phys_addr_t phys;
} tlbcam_addrs[NUM_TLBCAMS];
-unsigned long tlbcam_sz(int idx)
-{
- return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
-}
-
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_85xx
/*
* Return PA for this VA if it is mapped by a CAM, or 0
*/
@@ -124,15 +116,18 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
TLBCAM[index].MAS2 |= (flags & _PAGE_GUARDED) ? MAS2_G : 0;
TLBCAM[index].MAS2 |= (flags & _PAGE_ENDIAN) ? MAS2_E : 0;
- TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SX | MAS3_SR;
- TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_SW : 0);
+ TLBCAM[index].MAS3 = (phys & MAS3_RPN) | MAS3_SR;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_SW : 0;
if (mmu_has_feature(MMU_FTR_BIG_PHYS))
TLBCAM[index].MAS7 = (u64)phys >> 32;
/* Below is unlikely -- only for large user pages or similar */
if (pte_user(__pte(flags))) {
- TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR;
- TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0);
+ TLBCAM[index].MAS3 |= MAS3_UR;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_UX : 0;
+ TLBCAM[index].MAS3 |= (flags & _PAGE_RW) ? MAS3_UW : 0;
+ } else {
+ TLBCAM[index].MAS3 |= (flags & _PAGE_EXEC) ? MAS3_SX : 0;
}
tlbcam_addrs[index].start = virt;
@@ -140,8 +135,8 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
tlbcam_addrs[index].phys = phys;
}
-unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
- phys_addr_t phys)
+static unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
+ phys_addr_t phys)
{
unsigned int camsize = __ilog2(ram);
unsigned int align = __ffs(virt | phys);
@@ -167,19 +162,38 @@ unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
unsigned long ram, int max_cam_idx,
- bool dryrun)
+ bool dryrun, bool init)
{
int i;
unsigned long amount_mapped = 0;
+ unsigned long boundary;
+
+ if (strict_kernel_rwx_enabled())
+ boundary = (unsigned long)(_sinittext - _stext);
+ else
+ boundary = ram;
/* Calculate CAM values */
- for (i = 0; ram && i < max_cam_idx; i++) {
+ for (i = 0; boundary && i < max_cam_idx; i++) {
unsigned long cam_sz;
+ pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL_ROX;
+
+ cam_sz = calc_cam_sz(boundary, virt, phys);
+ if (!dryrun)
+ settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
+
+ boundary -= cam_sz;
+ amount_mapped += cam_sz;
+ virt += cam_sz;
+ phys += cam_sz;
+ }
+ for (ram -= amount_mapped; ram && i < max_cam_idx; i++) {
+ unsigned long cam_sz;
+ pgprot_t prot = init ? PAGE_KERNEL_X : PAGE_KERNEL;
cam_sz = calc_cam_sz(ram, virt, phys);
if (!dryrun)
- settlbcam(i, virt, phys, cam_sz,
- pgprot_val(PAGE_KERNEL_X), 0);
+ settlbcam(i, virt, phys, cam_sz, pgprot_val(prot), 0);
ram -= cam_sz;
amount_mapped += cam_sz;
@@ -190,8 +204,13 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
if (dryrun)
return amount_mapped;
- loadcam_multi(0, i, max_cam_idx);
- tlbcam_index = i;
+ if (init) {
+ loadcam_multi(0, i, max_cam_idx);
+ tlbcam_index = i;
+ } else {
+ loadcam_multi(0, i, 0);
+ WARN_ON(i > tlbcam_index);
+ }
#ifdef CONFIG_PPC64
get_paca()->tcd.esel_next = i;
@@ -202,12 +221,12 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
return amount_mapped;
}
-unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun)
+unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx, bool dryrun, bool init)
{
unsigned long virt = PAGE_OFFSET;
phys_addr_t phys = memstart_addr;
- return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun);
+ return map_mem_in_cams_addr(phys, virt, ram, max_cam_idx, dryrun, init);
}
#ifdef CONFIG_PPC32
@@ -221,6 +240,16 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1;
}
+void flush_instruction_cache(void)
+{
+ unsigned long tmp;
+
+ tmp = mfspr(SPRN_L1CSR1);
+ tmp |= L1CSR1_ICFI | L1CSR1_ICLFR;
+ mtspr(SPRN_L1CSR1, tmp);
+ isync();
+}
+
/*
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
*/
@@ -229,6 +258,11 @@ void __init MMU_init_hw(void)
flush_instruction_cache();
}
+static unsigned long __init tlbcam_sz(int idx)
+{
+ return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
+}
+
void __init adjust_total_lowmem(void)
{
unsigned long ram;
@@ -238,8 +272,8 @@ void __init adjust_total_lowmem(void)
ram = min((phys_addr_t)__max_low_memory, (phys_addr_t)total_lowmem);
i = switch_to_as1();
- __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false);
- restore_to_as0(i, 0, 0, 1);
+ __max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, false, true);
+ restore_to_as0(i, 0, NULL, 1);
pr_info("Memory CAM mapping: ");
for (i = 0; i < tlbcam_index - 1; i++)
@@ -250,6 +284,22 @@ void __init adjust_total_lowmem(void)
memblock_set_current_limit(memstart_addr + __max_low_memory);
}
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mmu_mark_rodata_ro(void)
+{
+ unsigned long remapped;
+
+ remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false);
+
+ WARN_ON(__max_low_memory != remapped);
+}
+#endif
+
+void mmu_mark_initmem_nx(void)
+{
+ /* Everything is done in mmu_mark_rodata_ro() */
+}
+
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{
@@ -309,11 +359,11 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
/* map a 64M area for the second relocation */
if (memstart_addr > start)
map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM,
- false);
+ false, true);
else
map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
0x4000000, CONFIG_LOWMEM_CAM_NUM,
- false);
+ false, true);
restore_to_as0(n, offset, __va(dt_ptr), 1);
/* We should never reach here */
panic("Relocation error");
diff --git a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c b/arch/powerpc/mm/nohash/e500_hugetlbpage.c
index 8b88be91b622..c7d4b317a823 100644
--- a/arch/powerpc/mm/nohash/book3e_hugetlbpage.c
+++ b/arch/powerpc/mm/nohash/e500_hugetlbpage.c
@@ -103,21 +103,11 @@ static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
int found = 0;
mtspr(SPRN_MAS6, pid << 16);
- if (mmu_has_feature(MMU_FTR_USE_TLBRSRV)) {
- asm volatile(
- "li %0,0\n"
- "tlbsx. 0,%1\n"
- "bne 1f\n"
- "li %0,1\n"
- "1:\n"
- : "=&r"(found) : "r"(ea));
- } else {
- asm volatile(
- "tlbsx 0,%1\n"
- "mfspr %0,0x271\n"
- "srwi %0,%0,31\n"
- : "=&r"(found) : "r"(ea));
- }
+ asm volatile(
+ "tlbsx 0,%1\n"
+ "mfspr %0,0x271\n"
+ "srwi %0,%0,31\n"
+ : "=&r"(found) : "r"(ea));
return found;
}
@@ -142,7 +132,7 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
tsize = shift - 10;
/*
* We can't be interrupted while we're setting up the MAS
- * regusters or after we've confirmed that no tlb exists.
+ * registers or after we've confirmed that no tlb exists.
*/
local_irq_save(flags);
@@ -169,13 +159,9 @@ book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea, pte_t pte)
mtspr(SPRN_MAS1, mas1);
mtspr(SPRN_MAS2, mas2);
- if (mmu_has_feature(MMU_FTR_USE_PAIRED_MAS)) {
- mtspr(SPRN_MAS7_MAS3, mas7_3);
- } else {
- if (mmu_has_feature(MMU_FTR_BIG_PHYS))
- mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
- mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
- }
+ if (mmu_has_feature(MMU_FTR_BIG_PHYS))
+ mtspr(SPRN_MAS7, upper_32_bits(mas7_3));
+ mtspr(SPRN_MAS3, lower_32_bits(mas7_3));
asm volatile ("tlbwe");
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 4a75f2d9bf0e..0d04f9d5da8d 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -14,11 +14,11 @@
#include <linux/memblock.h>
#include <linux/libfdt.h>
#include <linux/crash_core.h>
-#include <asm/pgalloc.h>
-#include <asm/prom.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <asm/cacheflush.h>
#include <asm/kdump.h>
#include <mm/mmu_decl.h>
-#include <generated/compile.h>
#include <generated/utsrelease.h>
struct regions {
@@ -36,17 +36,11 @@ struct regions {
int reserved_mem_size_cells;
};
-/* Simplified build-specific string for starting entropy. */
-static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
- LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
-
struct regions __initdata regions;
static __init void kaslr_get_cmdline(void *fdt)
{
- int node = fdt_path_offset(fdt, "/chosen");
-
- early_init_dt_scan_chosen(node, "chosen", 1, boot_command_line);
+ early_init_dt_scan_chosen(boot_command_line);
}
static unsigned long __init rotate_xor(unsigned long hash, const void *area,
@@ -72,7 +66,8 @@ static unsigned long __init get_boot_seed(void *fdt)
{
unsigned long hash = 0;
- hash = rotate_xor(hash, build_str, sizeof(build_str));
+ /* build-specific string for starting entropy. */
+ hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
hash = rotate_xor(hash, fdt, fdt_totalsize(fdt));
return hash;
@@ -314,10 +309,10 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
ram = min_t(phys_addr_t, __max_low_memory, size);
- ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true);
+ ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
linear_sz = min_t(unsigned long, ram, SZ_512M);
- /* If the linear size is smaller than 64M, do not randmize */
+ /* If the linear size is smaller than 64M, do not randomize */
if (linear_sz < SZ_64M)
return 0;
diff --git a/arch/powerpc/mm/nohash/kup.c b/arch/powerpc/mm/nohash/kup.c
new file mode 100644
index 000000000000..552becf90e97
--- /dev/null
+++ b/arch/powerpc/mm/nohash/kup.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file contains the routines for initializing kernel userspace protection
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+#include <asm/kup.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_PPC_KUAP
+struct static_key_false disable_kuap_key;
+EXPORT_SYMBOL(disable_kuap_key);
+
+void setup_kuap(bool disabled)
+{
+ if (disabled) {
+ if (IS_ENABLED(CONFIG_40x))
+ disable_kuep = true;
+ if (smp_processor_id() == boot_cpuid)
+ static_branch_enable(&disable_kuap_key);
+ return;
+ }
+
+ pr_info("Activating Kernel Userspace Access Protection\n");
+
+ __prevent_user_access(KUAP_READ_WRITE);
+}
+#endif
diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c
index aac81c9f84a5..ccd5819b1bd9 100644
--- a/arch/powerpc/mm/nohash/mmu_context.c
+++ b/arch/powerpc/mm/nohash/mmu_context.c
@@ -21,21 +21,6 @@
* also clear mm->cpu_vm_mask bits when processes are migrated
*/
-//#define DEBUG_MAP_CONSISTENCY
-//#define DEBUG_CLAMP_LAST_CONTEXT 31
-//#define DEBUG_HARDER
-
-/* We don't use DEBUG because it tends to be compiled in always nowadays
- * and this would generate way too much output
- */
-#ifdef DEBUG_HARDER
-#define pr_hard(args...) printk(KERN_DEBUG args)
-#define pr_hardcont(args...) printk(KERN_CONT args)
-#else
-#define pr_hard(args...) do { } while(0)
-#define pr_hardcont(args...) do { } while(0)
-#endif
-
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -47,10 +32,18 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#include <asm/smp.h>
+#include <asm/kup.h>
#include <mm/mmu_decl.h>
/*
+ * Room for two PTE table pointers, usually the kernel and current user
+ * pointer to their respective root page table (pgdir).
+ */
+void *abatron_pteptrs[2];
+
+/*
* The MPC8xx has only 16 contexts. We rotate through them on each task switch.
* A better way would be to keep track of tasks that own contexts, and implement
* an LRU usage. That way very active tasks don't always have to pay the TLB
@@ -68,9 +61,7 @@
* -- BenH
*/
#define FIRST_CONTEXT 1
-#ifdef DEBUG_CLAMP_LAST_CONTEXT
-#define LAST_CONTEXT DEBUG_CLAMP_LAST_CONTEXT
-#elif defined(CONFIG_PPC_8xx)
+#if defined(CONFIG_PPC_8xx)
#define LAST_CONTEXT 16
#elif defined(CONFIG_PPC_47x)
#define LAST_CONTEXT 65535
@@ -80,9 +71,7 @@
static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
-#ifdef CONFIG_SMP
static unsigned long *stale_map[NR_CPUS];
-#endif
static struct mm_struct **context_mm;
static DEFINE_RAW_SPINLOCK(context_lock);
@@ -105,7 +94,6 @@ static DEFINE_RAW_SPINLOCK(context_lock);
* the stale map as we can just flush the local CPU
* -- benh
*/
-#ifdef CONFIG_SMP
static unsigned int steal_context_smp(unsigned int id)
{
struct mm_struct *mm;
@@ -127,7 +115,6 @@ static unsigned int steal_context_smp(unsigned int id)
id = FIRST_CONTEXT;
continue;
}
- pr_hardcont(" | steal %d from 0x%p", id, mm);
/* Mark this mm has having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
@@ -158,34 +145,25 @@ static unsigned int steal_context_smp(unsigned int id)
/* This will cause the caller to try again */
return MMU_NO_CONTEXT;
}
-#endif /* CONFIG_SMP */
static unsigned int steal_all_contexts(void)
{
struct mm_struct *mm;
-#ifdef CONFIG_SMP
int cpu = smp_processor_id();
-#endif
unsigned int id;
for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
/* Pick up the victim mm */
mm = context_mm[id];
- pr_hardcont(" | steal %d from 0x%p", id, mm);
-
/* Mark this mm as having no context anymore */
mm->context.id = MMU_NO_CONTEXT;
if (id != FIRST_CONTEXT) {
context_mm[id] = NULL;
__clear_bit(id, context_map);
-#ifdef DEBUG_MAP_CONSISTENCY
- mm->context.active = 0;
-#endif
}
-#ifdef CONFIG_SMP
- __clear_bit(id, stale_map[cpu]);
-#endif
+ if (IS_ENABLED(CONFIG_SMP))
+ __clear_bit(id, stale_map[cpu]);
}
/* Flush the TLB for all contexts (not to be used on SMP) */
@@ -204,15 +182,11 @@ static unsigned int steal_all_contexts(void)
static unsigned int steal_context_up(unsigned int id)
{
struct mm_struct *mm;
-#ifdef CONFIG_SMP
int cpu = smp_processor_id();
-#endif
/* Pick up the victim mm */
mm = context_mm[id];
- pr_hardcont(" | steal %d from 0x%p", id, mm);
-
/* Flush the TLB for that context */
local_flush_tlb_mm(mm);
@@ -220,81 +194,64 @@ static unsigned int steal_context_up(unsigned int id)
mm->context.id = MMU_NO_CONTEXT;
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
-#ifdef CONFIG_SMP
- __clear_bit(id, stale_map[cpu]);
-#endif
+ if (IS_ENABLED(CONFIG_SMP))
+ __clear_bit(id, stale_map[cpu]);
return id;
}
-#ifdef DEBUG_MAP_CONSISTENCY
-static void context_check_map(void)
+static void set_context(unsigned long id, pgd_t *pgd)
{
- unsigned int id, nrf, nact;
+ if (IS_ENABLED(CONFIG_PPC_8xx)) {
+ s16 offset = (s16)(__pa(swapper_pg_dir));
+
+ /*
+ * Register M_TWB will contain base address of level 1 table minus the
+ * lower part of the kernel PGDIR base address, so that all accesses to
+ * level 1 table are done relative to lower part of kernel PGDIR base
+ * address.
+ */
+ mtspr(SPRN_M_TWB, __pa(pgd) - offset);
- nrf = nact = 0;
- for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
- int used = test_bit(id, context_map);
- if (!used)
- nrf++;
- if (used != (context_mm[id] != NULL))
- pr_err("MMU: Context %d is %s and MM is %p !\n",
- id, used ? "used" : "free", context_mm[id]);
- if (context_mm[id] != NULL)
- nact += context_mm[id]->context.active;
- }
- if (nrf != nr_free_contexts) {
- pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
- nr_free_contexts, nrf);
- nr_free_contexts = nrf;
+ /* Update context */
+ mtspr(SPRN_M_CASID, id - 1);
+
+ /* sync */
+ mb();
+ } else if (kuap_is_disabled()) {
+ if (IS_ENABLED(CONFIG_40x))
+ mb(); /* sync */
+
+ mtspr(SPRN_PID, id);
+ isync();
}
- if (nact > num_online_cpus())
- pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
- nact, num_online_cpus());
- if (FIRST_CONTEXT > 0 && !test_bit(0, context_map))
- pr_err("MMU: Context 0 has been freed !!!\n");
}
-#else
-static void context_check_map(void) { }
-#endif
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
unsigned int id;
-#ifdef CONFIG_SMP
unsigned int i, cpu = smp_processor_id();
-#endif
unsigned long *map;
/* No lockless fast path .. yet */
raw_spin_lock(&context_lock);
- pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
- cpu, next, next->context.active, next->context.id);
-
-#ifdef CONFIG_SMP
- /* Mark us active and the previous one not anymore */
- next->context.active++;
- if (prev) {
- pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
- WARN_ON(prev->context.active < 1);
- prev->context.active--;
+ if (IS_ENABLED(CONFIG_SMP)) {
+ /* Mark us active and the previous one not anymore */
+ next->context.active++;
+ if (prev) {
+ WARN_ON(prev->context.active < 1);
+ prev->context.active--;
+ }
}
again:
-#endif /* CONFIG_SMP */
/* If we already have a valid assigned context, skip all that */
id = next->context.id;
- if (likely(id != MMU_NO_CONTEXT)) {
-#ifdef DEBUG_MAP_CONSISTENCY
- if (context_mm[id] != next)
- pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
- next, id, id, context_mm[id]);
-#endif
+ if (likely(id != MMU_NO_CONTEXT))
goto ctxt_ok;
- }
/* We really don't have a context, let's try to acquire one */
id = next_context;
@@ -304,14 +261,12 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
/* No more free contexts, let's try to steal one */
if (nr_free_contexts == 0) {
-#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
id = steal_context_smp(id);
if (id == MMU_NO_CONTEXT)
goto again;
goto stolen;
}
-#endif /* CONFIG_SMP */
if (IS_ENABLED(CONFIG_PPC_8xx))
id = steal_all_contexts();
else
@@ -330,20 +285,13 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
next_context = id + 1;
context_mm[id] = next;
next->context.id = id;
- pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
- context_check_map();
ctxt_ok:
/* If that context got marked stale on this CPU, then flush the
* local TLB for it and unmark it before we use it
*/
-#ifdef CONFIG_SMP
- if (test_bit(id, stale_map[cpu])) {
- pr_hardcont(" | stale flush %d [%d..%d]",
- id, cpu_first_thread_sibling(cpu),
- cpu_last_thread_sibling(cpu));
-
+ if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) {
local_flush_tlb_mm(next);
/* XXX This clear should ultimately be part of local_flush_tlb_mm */
@@ -353,11 +301,14 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
__clear_bit(id, stale_map[i]);
}
}
-#endif
/* Flick the MMU and release lock */
- pr_hardcont(" -> %d\n", id);
+ if (IS_ENABLED(CONFIG_BDI_SWITCH))
+ abatron_pteptrs[1] = next->pgd;
set_context(id, next->pgd);
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ tsk->thread.pid = id;
+#endif
raw_spin_unlock(&context_lock);
}
@@ -366,17 +317,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
*/
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
- pr_hard("initing context for mm @%p\n", mm);
-
- /*
- * We have MMU_NO_CONTEXT set to be ~0. Hence check
- * explicitly against context.id == 0. This ensures that we properly
- * initialize context slice details for newly allocated mm's (which will
- * have id == 0) and don't alter context slice inherited via fork (which
- * will have id != 0).
- */
- if (mm->context.id == 0)
- slice_init_new_context_exec(mm);
mm->context.id = MMU_NO_CONTEXT;
mm->context.active = 0;
pte_frag_set(&mm->context, NULL);
@@ -401,16 +341,12 @@ void destroy_context(struct mm_struct *mm)
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
mm->context.id = MMU_NO_CONTEXT;
-#ifdef DEBUG_MAP_CONSISTENCY
- mm->context.active = 0;
-#endif
context_mm[id] = NULL;
nr_free_contexts++;
}
raw_spin_unlock_irqrestore(&context_lock, flags);
}
-#ifdef CONFIG_SMP
static int mmu_ctx_cpu_prepare(unsigned int cpu)
{
/* We don't touch CPU 0 map, it's allocated at aboot and kept
@@ -419,7 +355,6 @@ static int mmu_ctx_cpu_prepare(unsigned int cpu)
if (cpu == boot_cpuid)
return 0;
- pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
return 0;
}
@@ -430,7 +365,6 @@ static int mmu_ctx_cpu_dead(unsigned int cpu)
if (cpu == boot_cpuid)
return 0;
- pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
kfree(stale_map[cpu]);
stale_map[cpu] = NULL;
@@ -440,8 +374,6 @@ static int mmu_ctx_cpu_dead(unsigned int cpu)
return 0;
}
-#endif /* CONFIG_SMP */
-
/*
* Initialize the context management stuff.
*/
@@ -465,16 +397,16 @@ void __init mmu_context_init(void)
if (!context_mm)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(void *) * (LAST_CONTEXT + 1));
-#ifdef CONFIG_SMP
- stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
- if (!stale_map[boot_cpuid])
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- CTX_MAP_SIZE);
-
- cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
- "powerpc/mmu/ctx:prepare",
- mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
-#endif
+ if (IS_ENABLED(CONFIG_SMP)) {
+ stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
+ if (!stale_map[boot_cpuid])
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ CTX_MAP_SIZE);
+
+ cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
+ "powerpc/mmu/ctx:prepare",
+ mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
+ }
printk(KERN_INFO
"MMU: Allocated %zu bytes of context maps for %d contexts\n",
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index 696f568253a0..2c15c86c7015 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -34,6 +34,7 @@
#include <linux/of_fdt.h>
#include <linux/hugetlb.h>
+#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/code-patching.h>
@@ -48,8 +49,7 @@
* other sizes not listed here. The .ind field is only used on MMUs that have
* indirect page table entries.
*/
-#if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
[MMU_PAGE_4K] = {
.shift = 12,
@@ -80,18 +80,27 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.enc = BOOK3E_PAGESZ_1GB,
},
};
-#elif defined(CONFIG_PPC_8xx)
+
+static inline int mmu_get_tsize(int psize)
+{
+ return mmu_psize_defs[psize].enc;
+}
+#else
+static inline int mmu_get_tsize(int psize)
+{
+ /* This isn't used on !Book3E for now */
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PPC_8xx
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
- /* we only manage 4k and 16k pages as normal pages */
-#ifdef CONFIG_PPC_4K_PAGES
[MMU_PAGE_4K] = {
.shift = 12,
},
-#else
[MMU_PAGE_16K] = {
.shift = 14,
},
-#endif
[MMU_PAGE_512K] = {
.shift = 19,
},
@@ -99,53 +108,7 @@ struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
.shift = 23,
},
};
-#else
-struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
- [MMU_PAGE_4K] = {
- .shift = 12,
- .ind = 20,
- .enc = BOOK3E_PAGESZ_4K,
- },
- [MMU_PAGE_16K] = {
- .shift = 14,
- .enc = BOOK3E_PAGESZ_16K,
- },
- [MMU_PAGE_64K] = {
- .shift = 16,
- .ind = 28,
- .enc = BOOK3E_PAGESZ_64K,
- },
- [MMU_PAGE_1M] = {
- .shift = 20,
- .enc = BOOK3E_PAGESZ_1M,
- },
- [MMU_PAGE_16M] = {
- .shift = 24,
- .ind = 36,
- .enc = BOOK3E_PAGESZ_16M,
- },
- [MMU_PAGE_256M] = {
- .shift = 28,
- .enc = BOOK3E_PAGESZ_256M,
- },
- [MMU_PAGE_1G] = {
- .shift = 30,
- .enc = BOOK3E_PAGESZ_1GB,
- },
-};
-#endif /* CONFIG_FSL_BOOKE */
-
-static inline int mmu_get_tsize(int psize)
-{
- return mmu_psize_defs[psize].enc;
-}
-#else
-static inline int mmu_get_tsize(int psize)
-{
- /* This isn't used on !Book3E for now */
- return 0;
-}
-#endif /* CONFIG_PPC_BOOK3E_MMU */
+#endif
/* The variables below are currently only used on 64-bit Book3E
* though this will probably be made common with other nohash
@@ -153,7 +116,6 @@ static inline int mmu_get_tsize(int psize)
*/
#ifdef CONFIG_PPC64
-int mmu_linear_psize; /* Page size used for the linear mapping */
int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
@@ -170,7 +132,7 @@ int extlb_level_exc;
#endif /* CONFIG_PPC64 */
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
DEFINE_PER_CPU(int, next_tlbcam_idx);
EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
@@ -188,6 +150,7 @@ EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
* processor
*/
+#ifndef CONFIG_PPC_8xx
/*
* These are the base non-SMP variants of page and mm flushing
*/
@@ -221,6 +184,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
mmu_get_tsize(mmu_virtual_psize), 0);
}
EXPORT_SYMBOL(local_flush_tlb_page);
+#endif
/*
* And here are the SMP non-local implementations
@@ -360,6 +324,7 @@ void __init early_init_mmu_47x(void)
/*
* Flush kernel TLB entries in the given range
*/
+#ifndef CONFIG_PPC_8xx
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
@@ -372,6 +337,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
#endif
}
EXPORT_SYMBOL(flush_tlb_kernel_range);
+#endif
/*
* Currently, for range flushing, we just do a full mm flush. This should
@@ -434,14 +400,14 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
}
}
-static void setup_page_sizes(void)
+static void __init setup_page_sizes(void)
{
unsigned int tlb0cfg;
unsigned int tlb0ps;
unsigned int eptcfg;
int i, psize;
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
unsigned int mmucfg = mfspr(SPRN_MMUCFG);
int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
@@ -572,7 +538,7 @@ out:
}
}
-static void setup_mmu_htw(void)
+static void __init setup_mmu_htw(void)
{
/*
* If we want to use HW tablewalk, enable it by patching the TLB miss
@@ -584,7 +550,7 @@ static void setup_mmu_htw(void)
patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
break;
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
case PPC_HTW_E6500:
extlb_level_exc = EX_TLB_SIZE;
patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
@@ -627,7 +593,7 @@ static void early_init_this_mmu(void)
}
mtspr(SPRN_MAS4, mas4);
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned int num_cams;
bool map = true;
@@ -646,7 +612,7 @@ static void early_init_this_mmu(void)
if (map)
linear_map_top = map_mem_in_cams(linear_map_top,
- num_cams, false);
+ num_cams, false, true);
}
#endif
@@ -658,14 +624,6 @@ static void early_init_this_mmu(void)
static void __init early_init_mmu_global(void)
{
- /* XXX This will have to be decided at runtime, but right
- * now our boot and TLB miss code hard wires it. Ideally
- * we should find out a suitable page size and patch the
- * TLB miss code (either that or use the PACA to store
- * the value we want)
- */
- mmu_linear_psize = MMU_PAGE_1G;
-
/* XXX This should be decided at runtime based on supported
* page sizes in the TLB, but for now let's assume 16M is
* always there and a good fit (which it probably is)
@@ -688,7 +646,7 @@ static void __init early_init_mmu_global(void)
/* Look for HW tablewalk support */
setup_mmu_htw();
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
if (book3e_htw_mode == PPC_HTW_NONE) {
extlb_level_exc = EX_TLB_SIZE;
@@ -709,7 +667,7 @@ static void __init early_init_mmu_global(void)
static void __init early_mmu_set_memory_limit(void)
{
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
/*
* Limit memory so we dont have linear faults.
@@ -758,7 +716,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
* We crop it to the size of the first MEMBLOCK to
* avoid going over total available memory just in case...
*/
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#ifdef CONFIG_PPC_E500
if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned long linear_sz;
unsigned int num_cams;
@@ -767,7 +725,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
- true);
+ true, true);
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
} else
@@ -783,9 +741,5 @@ void __init early_init_mmu(void)
#ifdef CONFIG_PPC_47x
early_init_mmu_47x();
#endif
-
-#ifdef CONFIG_PPC_MM_SLICES
- mm_ctx_set_slb_addr_limit(&init_mm.context, SLB_ADDR_LIMIT_DEFAULT);
-#endif
}
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/mm/nohash/tlb_low.S b/arch/powerpc/mm/nohash/tlb_low.S
index 2ca407cedbe7..e1199608ff4d 100644
--- a/arch/powerpc/mm/nohash/tlb_low.S
+++ b/arch/powerpc/mm/nohash/tlb_low.S
@@ -92,36 +92,25 @@ _GLOBAL(__tlbil_va)
tlbsx. r6,0,r3
bne 10f
sync
-BEGIN_MMU_FTR_SECTION
- b 2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+#ifndef CONFIG_PPC_47x
/* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
* 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this
* value will invalidate the TLB entry.
*/
tlbwe r6,r6,PPC44x_TLB_PAGEID
- isync
-10: wrtee r10
- blr
-2:
-#ifdef CONFIG_PPC_47x
+#else
oris r7,r6,0x8000 /* specify way explicitly */
clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */
ori r4,r4,PPC47x_TLBE_SIZE
tlbwe r4,r7,0 /* write it */
+#endif /* !CONFIG_PPC_47x */
isync
- wrtee r10
+10: wrtee r10
blr
-#else /* CONFIG_PPC_47x */
-1: trap
- EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
-#endif /* !CONFIG_PPC_47x */
_GLOBAL(_tlbil_all)
_GLOBAL(_tlbil_pid)
-BEGIN_MMU_FTR_SECTION
- b 2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
+#ifndef CONFIG_PPC_47x
li r3,0
sync
@@ -136,8 +125,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
isync
blr
-2:
-#ifdef CONFIG_PPC_47x
+#else
/* 476 variant. There's not simple way to do this, hopefully we'll
* try to limit the amount of such full invalidates
*/
@@ -179,11 +167,8 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
b 1b /* Then loop */
1: isync /* Sync shadows */
wrtee r11
-#else /* CONFIG_PPC_47x */
-1: trap
- EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
-#endif /* !CONFIG_PPC_47x */
blr
+#endif /* !CONFIG_PPC_47x */
#ifdef CONFIG_PPC_47x
@@ -201,7 +186,7 @@ _GLOBAL(_tlbivax_bcast)
isync
PPC_TLBIVAX(0, R3)
isync
- eieio
+ mbar
tlbsync
BEGIN_FTR_SECTION
b 1f
@@ -214,7 +199,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)
* Touch enough instruction cache lines to ensure cache hits
*/
1: mflr r9
- bl 2f
+ bcl 20,31,$+4
2: mflr r6
li r7,32
PPC_ICBT(0,R6,R7) /* touch next cache line */
@@ -236,7 +221,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_476_DD2)
blr
#endif /* CONFIG_PPC_47x */
-#elif defined(CONFIG_FSL_BOOKE)
+#elif defined(CONFIG_PPC_85xx)
/*
* FSL BookE implementations.
*
@@ -309,7 +294,7 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
isync
1: wrtee r10
blr
-#elif defined(CONFIG_PPC_BOOK3E)
+#elif defined(CONFIG_PPC_BOOK3E_64)
/*
* New Book3E (>= 2.06) implementation
*
@@ -370,34 +355,21 @@ _GLOBAL(_tlbivax_bcast)
rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
PPC_TLBIVAX(0,R3)
- eieio
+ mbar
tlbsync
sync
wrtee r10
blr
-
-_GLOBAL(set_context)
-#ifdef CONFIG_BDI_SWITCH
- /* Context switch the PTE pointer for the Abatron BDI2000.
- * The PGDIR is the second parameter.
- */
- lis r5, abatron_pteptrs@h
- ori r5, r5, abatron_pteptrs@l
- stw r4, 0x4(r5)
-#endif
- mtspr SPRN_PID,r3
- isync /* Force context change */
- blr
#else
#error Unsupported processor type !
#endif
-#if defined(CONFIG_PPC_FSL_BOOK3E)
+#if defined(CONFIG_PPC_E500)
/*
* extern void loadcam_entry(unsigned int index)
*
* Load TLBCAM[index] entry in to the L2 CAM MMU
- * Must preserve r7, r8, r9, and r10
+ * Must preserve r7, r8, r9, r10, r11, r12
*/
_GLOBAL(loadcam_entry)
mflr r5
@@ -429,16 +401,22 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
*
* r3 = first entry to write
* r4 = number of entries to write
- * r5 = temporary tlb entry
+ * r5 = temporary tlb entry (0 means no switch to AS1)
*/
_GLOBAL(loadcam_multi)
mflr r8
+ /* Don't switch to AS=1 if already there */
+ mfmsr r11
+ andi. r11,r11,MSR_IS
+ bne 10f
+ mr. r12, r5
+ beq 10f
/*
* Set up temporary TLB entry that is the same as what we're
* running from, but in AS=1.
*/
- bl 1f
+ bcl 20,31,$+4
1: mflr r6
tlbsx 0,r8
mfspr r6,SPRN_MAS1
@@ -458,6 +436,7 @@ _GLOBAL(loadcam_multi)
mtmsr r6
isync
+10:
mr r9,r3
add r10,r3,r4
2: bl loadcam_entry
@@ -466,6 +445,12 @@ _GLOBAL(loadcam_multi)
mr r3,r9
blt 2b
+ /* Don't return to AS=0 if we were in AS=1 at function start */
+ andi. r11,r11,MSR_IS
+ bne 3f
+ cmpwi r12, 0
+ beq 3f
+
/* Return to AS=0 and clear the temporary entry */
mfmsr r6
rlwinm. r6,r6,0,~(MSR_IS|MSR_DS)
@@ -481,6 +466,7 @@ _GLOBAL(loadcam_multi)
tlbwe
isync
+3:
mtlr r8
blr
#endif
diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
index 1f110c3c48fb..76cf456d7976 100644
--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
@@ -6,6 +6,7 @@
* Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
*/
+#include <linux/pgtable.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <asm/page.h>
@@ -13,7 +14,6 @@
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
-#include <asm/pgtable.h>
#include <asm/exception-64e.h>
#include <asm/ppc-opcode.h>
#include <asm/kvm_asm.h>
@@ -61,7 +61,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
ld r14,PACAPGD(r13)
std r15,EX_TLB_R15(r12)
std r10,EX_TLB_CR(r12)
-#ifdef CONFIG_PPC_FSL_BOOK3E
START_BTB_FLUSH_SECTION
mfspr r11, SPRN_SRR1
andi. r10,r11,MSR_PR
@@ -70,22 +69,17 @@ START_BTB_FLUSH_SECTION
1:
END_BTB_FLUSH_SECTION
std r7,EX_TLB_R7(r12)
-#endif
- TLB_MISS_PROLOG_STATS
.endm
.macro tlb_epilog_bolted
ld r14,EX_TLB_CR(r12)
-#ifdef CONFIG_PPC_FSL_BOOK3E
ld r7,EX_TLB_R7(r12)
-#endif
ld r10,EX_TLB_R10(r12)
ld r11,EX_TLB_R11(r12)
ld r13,EX_TLB_R13(r12)
mtcr r14
ld r14,EX_TLB_R14(r12)
ld r15,EX_TLB_R15(r12)
- TLB_MISS_RESTORE_STATS
ld r16,EX_TLB_R16(r12)
mfspr r12,SPRN_SPRG_GEN_SCRATCH
.endm
@@ -128,9 +122,15 @@ END_BTB_FLUSH_SECTION
ori r10,r10,_PAGE_PRESENT
oris r11,r10,_PAGE_ACCESSED@h
- TLB_MISS_STATS_SAVE_INFO_BOLTED
bne tlb_miss_kernel_bolted
+tlb_miss_user_bolted:
+#ifdef CONFIG_PPC_KUAP
+ mfspr r10,SPRN_MAS1
+ rlwinm. r10,r10,0,0x3fff0000
+ beq- tlb_miss_fault_bolted /* KUAP fault */
+#endif
+
tlb_miss_common_bolted:
/*
* This is the guts of the TLB miss handler for bolted-linear.
@@ -148,16 +148,7 @@ tlb_miss_common_bolted:
clrrdi r15,r15,3
beq tlb_miss_fault_bolted /* No PGDIR, bail */
-BEGIN_MMU_FTR_SECTION
- /* Set the TLB reservation and search for existing entry. Then load
- * the entry.
- */
- PPC_TLBSRX_DOT(0,R16)
- ldx r14,r14,r15 /* grab pgd entry */
- beq tlb_miss_done_bolted /* tlb exists already, bail */
-MMU_FTR_SECTION_ELSE
ldx r14,r14,r15 /* grab pgd entry */
-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
clrrdi r15,r15,3
@@ -209,7 +200,6 @@ ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
tlbwe
tlb_miss_done_bolted:
- TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
tlb_epilog_bolted
rfi
@@ -219,21 +209,20 @@ itlb_miss_kernel_bolted:
tlb_miss_kernel_bolted:
mfspr r10,SPRN_MAS1
ld r14,PACA_KERNELPGD(r13)
- cmpldi cr0,r15,8 /* Check for vmalloc region */
+ srdi r15,r16,44 /* get kernel region */
+ andi. r15,r15,1 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
- beq+ tlb_miss_common_bolted
+ bne+ tlb_miss_common_bolted
tlb_miss_fault_bolted:
/* We need to check if it was an instruction miss */
- andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
+ andi. r10,r11,_PAGE_BAP_UX|_PAGE_BAP_SX
bne itlb_miss_fault_bolted
dtlb_miss_fault_bolted:
- TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
tlb_epilog_bolted
b exc_data_storage_book3e
itlb_miss_fault_bolted:
- TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
tlb_epilog_bolted
b exc_instruction_storage_book3e
@@ -243,20 +232,18 @@ itlb_miss_fault_bolted:
rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
srdi r15,r16,60 /* get region */
- TLB_MISS_STATS_SAVE_INFO_BOLTED
bne- itlb_miss_fault_bolted
- li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
+ li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */
/* We do the user/kernel test for the PID here along with the RW test
*/
cmpldi cr0,r15,0 /* Check for user region */
oris r11,r11,_PAGE_ACCESSED@h
- beq tlb_miss_common_bolted
+ beq tlb_miss_user_bolted
b itlb_miss_kernel_bolted
-#ifdef CONFIG_PPC_FSL_BOOK3E
/*
* TLB miss handling for e6500 and derivatives, using hardware tablewalk.
*
@@ -276,7 +263,6 @@ itlb_miss_fault_bolted:
srdi. r15,r16,60 /* get region */
ori r16,r16,1
- TLB_MISS_STATS_SAVE_INFO_BOLTED
bne tlb_miss_kernel_e6500 /* user/kernel test */
b tlb_miss_common_e6500
@@ -288,7 +274,6 @@ itlb_miss_fault_bolted:
srdi. r15,r16,60 /* get region */
rldicr r16,r16,0,62
- TLB_MISS_STATS_SAVE_INFO_BOLTED
bne tlb_miss_kernel_e6500 /* user vs kernel check */
/*
@@ -460,7 +445,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_SMT)
.endm
tlb_unlock_e6500
- TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
tlb_epilog_bolted
rfi
@@ -510,7 +494,9 @@ tlb_miss_huge_e6500:
tlb_miss_kernel_e6500:
ld r14,PACA_KERNELPGD(r13)
- cmpldi cr1,r15,8 /* Check for vmalloc region */
+ srdi r15,r16,44 /* get kernel region */
+ xoris r15,r15,0xc /* Check for vmalloc region */
+ cmplwi cr1,r15,1
beq+ cr1,tlb_miss_common_e6500
tlb_miss_fault_e6500:
@@ -519,14 +505,11 @@ tlb_miss_fault_e6500:
andi. r16,r16,1
bne itlb_miss_fault_e6500
dtlb_miss_fault_e6500:
- TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
tlb_epilog_bolted
b exc_data_storage_book3e
itlb_miss_fault_e6500:
- TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
tlb_epilog_bolted
b exc_instruction_storage_book3e
-#endif /* CONFIG_PPC_FSL_BOOK3E */
/**********************************************************************
* *
@@ -546,17 +529,18 @@ itlb_miss_fault_e6500:
*/
mfspr r14,SPRN_ESR
mfspr r16,SPRN_DEAR /* get faulting address */
- srdi r15,r16,60 /* get region */
- cmpldi cr0,r15,0xc /* linear mapping ? */
- TLB_MISS_STATS_SAVE_INFO
+ srdi r15,r16,44 /* get region */
+ xoris r15,r15,0xc
+ cmpldi cr0,r15,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r15,1 /* vmalloc mapping ? */
/* The page tables are mapped virtually linear. At this point, though,
* we don't know whether we are trying to fault in a first level
* virtual address or a virtual page table address. We can get that
* from bit 0x1 of the region ID which we have set for a page table
*/
- andi. r10,r15,0x1
+ andis. r10,r15,0x1
bne- virt_page_table_tlb_miss
std r14,EX_TLB_ESR(r12); /* save ESR */
@@ -568,7 +552,7 @@ itlb_miss_fault_e6500:
/* We do the user/kernel test for the PID here along with the RW test
*/
- cmpldi cr0,r15,0 /* Check for user region */
+ srdi. r15,r16,60 /* Check for user region */
/* We pre-test some combination of permissions to avoid double
* faults:
@@ -589,18 +573,16 @@ itlb_miss_fault_e6500:
*/
rlwimi r11,r14,32-19,27,27
rlwimi r11,r14,32-16,19,19
- beq normal_tlb_miss
+ beq normal_tlb_miss_user
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
- cmpldi cr0,r15,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
- beq+ normal_tlb_miss
+ beq+ cr1,normal_tlb_miss
/* We got a crappy address, just fault with whatever DEAR and ESR
* are here
*/
- TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
TLB_MISS_EPILOG_ERROR
b exc_data_storage_book3e
@@ -622,31 +604,30 @@ itlb_miss_fault_e6500:
*
* Faulting address is SRR0 which is already in r16
*/
- srdi r15,r16,60 /* get region */
- cmpldi cr0,r15,0xc /* linear mapping ? */
- TLB_MISS_STATS_SAVE_INFO
+ srdi r15,r16,44 /* get region */
+ xoris r15,r15,0xc
+ cmpldi cr0,r15,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r15,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
- li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
+ li r11,_PAGE_PRESENT|_PAGE_BAP_UX /* Base perm */
oris r11,r11,_PAGE_ACCESSED@h
- cmpldi cr0,r15,0 /* Check for user region */
+ srdi. r15,r16,60 /* Check for user region */
std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
- beq normal_tlb_miss
+ beq normal_tlb_miss_user
li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
oris r11,r11,_PAGE_ACCESSED@h
/* XXX replace the RMW cycles with immediate loads + writes */
mfspr r10,SPRN_MAS1
- cmpldi cr0,r15,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
- beq+ normal_tlb_miss
+ beq+ cr1,normal_tlb_miss
/* We got a crappy address, just fault */
- TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
TLB_MISS_EPILOG_ERROR
b exc_instruction_storage_book3e
@@ -662,6 +643,12 @@ itlb_miss_fault_e6500:
* r11 = PTE permission mask
* r10 = crap (free to use)
*/
+normal_tlb_miss_user:
+#ifdef CONFIG_PPC_KUAP
+ mfspr r14,SPRN_MAS1
+ rlwinm. r14,r14,0,0x3fff0000
+ beq- normal_tlb_miss_access_fault /* KUAP fault */
+#endif
normal_tlb_miss:
/* So we first construct the page table address. We do that by
* shifting the bottom of the address (not the region ID) by
@@ -671,22 +658,14 @@ normal_tlb_miss:
* NOTE: For 64K pages, we do things slightly differently in
* order to handle the weird page table format used by linux
*/
- ori r10,r15,0x1
+ srdi r15,r16,44
+ oris r10,r15,0x1
rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
- sldi r15,r10,60
- clrrdi r14,r14,3
+ sldi r15,r10,44
+ clrrdi r14,r14,19
or r10,r15,r14
-BEGIN_MMU_FTR_SECTION
- /* Set the TLB reservation and search for existing entry. Then load
- * the entry.
- */
- PPC_TLBSRX_DOT(0,R16)
ld r14,0(r10)
- beq normal_tlb_miss_done
-MMU_FTR_SECTION_ELSE
- ld r14,0(r10)
-ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
finish_normal_tlb_miss:
/* Check if required permissions are met */
@@ -705,13 +684,13 @@ finish_normal_tlb_miss:
*
* TODO: mix up code below for better scheduling
*/
- clrrdi r11,r16,12 /* Clear low crap in EA */
- rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
- mtspr SPRN_MAS2,r11
+ clrrdi r10,r16,12 /* Clear low crap in EA */
+ rlwimi r10,r14,32-19,27,31 /* Insert WIMGE */
+ mtspr SPRN_MAS2,r10
/* Check page size, if not standard, update MAS1 */
- rldicl r11,r14,64-8,64-8
- cmpldi cr0,r11,BOOK3E_PAGESZ_4K
+ rldicl r10,r14,64-8,64-8
+ cmpldi cr0,r10,BOOK3E_PAGESZ_4K
beq- 1f
mfspr r11,SPRN_MAS1
rlwimi r11,r14,31,21,24
@@ -730,13 +709,9 @@ finish_normal_tlb_miss:
li r11,MAS3_SW|MAS3_UW
andc r15,r15,r11
1:
-BEGIN_MMU_FTR_SECTION
srdi r16,r15,32
mtspr SPRN_MAS3,r15
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r15
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
@@ -745,23 +720,20 @@ normal_tlb_miss_done:
* level 0 and just going back to userland. They are only needed
* if you are going to take an access fault
*/
- TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
TLB_MISS_EPILOG_SUCCESS
rfi
normal_tlb_miss_access_fault:
/* We need to check if it was an instruction miss */
- andi. r10,r11,_PAGE_EXEC
+ andi. r10,r11,_PAGE_BAP_UX
bne 1f
ld r14,EX_TLB_DEAR(r12)
ld r15,EX_TLB_ESR(r12)
mtspr SPRN_DEAR,r14
mtspr SPRN_ESR,r15
- TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
TLB_MISS_EPILOG_ERROR
b exc_data_storage_book3e
-1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
- TLB_MISS_EPILOG_ERROR
+1: TLB_MISS_EPILOG_ERROR
b exc_instruction_storage_book3e
@@ -791,6 +763,7 @@ normal_tlb_miss_access_fault:
*/
virt_page_table_tlb_miss:
/* Are we hitting a kernel page table ? */
+ srdi r15,r16,60
andi. r10,r15,0x8
/* The cool thing now is that r10 contains 0 for user and 8 for kernel,
@@ -805,19 +778,22 @@ virt_page_table_tlb_miss:
mfspr r10,SPRN_MAS1
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
+#ifdef CONFIG_PPC_KUAP
+ b 2f
1:
-BEGIN_MMU_FTR_SECTION
- /* Search if we already have a TLB entry for that virtual address, and
- * if we do, bail out.
- */
- PPC_TLBSRX_DOT(0,R16)
- beq virt_page_table_tlb_miss_done
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
+ mfspr r10,SPRN_MAS1
+ rlwinm. r10,r10,0,0x3fff0000
+ beq- virt_page_table_tlb_miss_fault /* KUAP fault */
+2:
+#else
+1:
+#endif
/* Now, we need to walk the page tables. First check if we are in
* range.
*/
- rldicl. r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
+ rldicl r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
+ cmpldi r10,0x80
bne- virt_page_table_tlb_miss_fault
/* Get the PGD pointer */
@@ -863,43 +839,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
clrldi r11,r15,4 /* remove region ID from RPN */
ori r10,r11,1 /* Or-in SR */
-BEGIN_MMU_FTR_SECTION
srdi r16,r10,32
mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r10
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
-BEGIN_MMU_FTR_SECTION
-virt_page_table_tlb_miss_done:
-
- /* We have overridden MAS2:EPN but currently our primary TLB miss
- * handler will always restore it so that should not be an issue,
- * if we ever optimize the primary handler to not write MAS2 on
- * some cases, we'll have to restore MAS2:EPN here based on the
- * original fault's DEAR. If we do that we have to modify the
- * ITLB miss handler to also store SRR0 in the exception frame
- * as DEAR.
- *
- * However, one nasty thing we did is we cleared the reservation
- * (well, potentially we did). We do a trick here thus if we
- * are not a level 0 exception (we interrupted the TLB miss) we
- * offset the return address by -4 in order to replay the tlbsrx
- * instruction there
- */
- subf r10,r13,r12
- cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
- bne- 1f
- ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
- addi r10,r11,-4
- std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
-1:
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
/* Return to caller, normal case */
- TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK);
TLB_MISS_EPILOG_SUCCESS
rfi
@@ -935,18 +881,15 @@ virt_page_table_tlb_miss_fault:
beq 1f
mtspr SPRN_DEAR,r15
mtspr SPRN_ESR,r16
- TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT);
TLB_MISS_EPILOG_ERROR
b exc_data_storage_book3e
-1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT);
- TLB_MISS_EPILOG_ERROR
+1: TLB_MISS_EPILOG_ERROR
b exc_instruction_storage_book3e
virt_page_table_tlb_miss_whacko_fault:
/* The linear fault will restart everything so ESR and DEAR will
* not have been clobbered, let's just fault with what we have
*/
- TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_FAULT);
TLB_MISS_EPILOG_ERROR
b exc_data_storage_book3e
@@ -969,29 +912,28 @@ virt_page_table_tlb_miss_whacko_fault:
*/
mfspr r14,SPRN_ESR
mfspr r16,SPRN_DEAR /* get faulting address */
- srdi r11,r16,60 /* get region */
- cmpldi cr0,r11,0xc /* linear mapping ? */
- TLB_MISS_STATS_SAVE_INFO
+ srdi r11,r16,44 /* get region */
+ xoris r11,r11,0xc
+ cmpldi cr0,r11,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r11,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
- cmpldi cr0,r11,0 /* Check for user region */
+ srdi. r11,r16,60 /* Check for user region */
ld r15,PACAPGD(r13) /* Load user pgdir */
beq htw_tlb_miss
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
- cmpldi cr0,r11,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
- beq+ htw_tlb_miss
+ beq+ cr1,htw_tlb_miss
/* We got a crappy address, just fault with whatever DEAR and ESR
* are here
*/
- TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
TLB_MISS_EPILOG_ERROR
b exc_data_storage_book3e
@@ -1013,27 +955,26 @@ virt_page_table_tlb_miss_whacko_fault:
*
* Faulting address is SRR0 which is already in r16
*/
- srdi r11,r16,60 /* get region */
- cmpldi cr0,r11,0xc /* linear mapping ? */
- TLB_MISS_STATS_SAVE_INFO
+ srdi r11,r16,44 /* get region */
+ xoris r11,r11,0xc
+ cmpldi cr0,r11,0 /* linear mapping ? */
beq tlb_load_linear /* yes -> go to linear map load */
+ cmpldi cr1,r11,1 /* vmalloc mapping ? */
/* We do the user/kernel test for the PID here along with the RW test
*/
- cmpldi cr0,r11,0 /* Check for user region */
+ srdi. r11,r16,60 /* Check for user region */
ld r15,PACAPGD(r13) /* Load user pgdir */
beq htw_tlb_miss
/* XXX replace the RMW cycles with immediate loads + writes */
1: mfspr r10,SPRN_MAS1
- cmpldi cr0,r11,8 /* Check for vmalloc region */
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
beq+ htw_tlb_miss
/* We got a crappy address, just fault */
- TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
TLB_MISS_EPILOG_ERROR
b exc_instruction_storage_book3e
@@ -1054,6 +995,11 @@ virt_page_table_tlb_miss_whacko_fault:
* avoid too much complication, it will save/restore things for us
*/
htw_tlb_miss:
+#ifdef CONFIG_PPC_KUAP
+ mfspr r10,SPRN_MAS1
+ rlwinm. r10,r10,0,0x3fff0000
+ beq- htw_tlb_miss_fault /* KUAP fault */
+#endif
/* Search if we already have a TLB entry for that virtual address, and
* if we do, bail out.
*
@@ -1115,13 +1061,9 @@ htw_tlb_miss:
*/
ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
-BEGIN_MMU_FTR_SECTION
srdi r16,r10,32
mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r10
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
@@ -1130,7 +1072,6 @@ htw_tlb_miss_done:
* level 0 and just going back to userland. They are only needed
* if you are going to take an access fault
*/
- TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK)
TLB_MISS_EPILOG_SUCCESS
rfi
@@ -1142,11 +1083,9 @@ htw_tlb_miss_fault:
beq 1f
mtspr SPRN_DEAR,r16
mtspr SPRN_ESR,r14
- TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT)
TLB_MISS_EPILOG_ERROR
b exc_data_storage_book3e
-1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT)
- TLB_MISS_EPILOG_ERROR
+1: TLB_MISS_EPILOG_ERROR
b exc_instruction_storage_book3e
/*
@@ -1179,8 +1118,8 @@ tlb_load_linear:
* we only use 1G pages for now. That might have to be changed in a
* final implementation, especially when dealing with hypervisors
*/
- ld r11,PACATOC(r13)
- ld r11,linear_map_top@got(r11)
+ __LOAD_PACA_TOC(r11)
+ LOAD_REG_ADDR_ALTTOC(r11, r11, linear_map_top)
ld r10,0(r11)
tovirt(10,10)
cmpld cr0,r16,r10
@@ -1205,13 +1144,9 @@ tlb_load_linear:
clrldi r10,r10,4 /* clear region bits */
ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
-BEGIN_MMU_FTR_SECTION
srdi r16,r10,32
mtspr SPRN_MAS3,r10
mtspr SPRN_MAS7,r16
-MMU_FTR_SECTION_ELSE
- mtspr SPRN_MAS7_MAS3,r10
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
tlbwe
@@ -1221,7 +1156,6 @@ tlb_load_linear_done:
* We do that because we can't resume a fault within a TLB
* miss handler, due to MAS and TLB reservation being clobbered.
*/
- TLB_MISS_STATS_X(MMSTAT_TLB_MISS_LINEAR)
TLB_MISS_EPILOG_ERROR
rfi
@@ -1233,13 +1167,3 @@ tlb_load_linear_fault:
b exc_data_storage_book3e
1: TLB_MISS_EPILOG_ERROR_SPECIAL
b exc_instruction_storage_book3e
-
-
-#ifdef CONFIG_BOOK3E_MMU_TLB_STATS
-.tlb_stat_inc:
-1: ldarx r8,0,r9
- addi r8,r8,1
- stdcx. r8,0,r9
- bne- 1b
- blr
-#endif
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3c7dec70cda0..b44ce71917d7 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -26,7 +26,6 @@
#include <linux/slab.h>
#include <asm/cputhreads.h>
#include <asm/sparsemem.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/topology.h>
#include <asm/firmware.h>
@@ -40,9 +39,6 @@ static int numa_enabled = 1;
static char *cmdline __initdata;
-static int numa_debug;
-#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
-
int numa_cpu_lookup_table[NR_CPUS];
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
struct pglist_data *node_data[MAX_NUMNODES];
@@ -51,14 +47,22 @@ EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(node_to_cpumask_map);
EXPORT_SYMBOL(node_data);
-static int min_common_depth;
+static int primary_domain_index;
static int n_mem_addr_cells, n_mem_size_cells;
-static int form1_affinity;
+
+#define FORM0_AFFINITY 0
+#define FORM1_AFFINITY 1
+#define FORM2_AFFINITY 2
+static int affinity_form;
#define MAX_DISTANCE_REF_POINTS 4
static int distance_ref_points_depth;
static const __be32 *distance_ref_points;
static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
+static int numa_distance_table[MAX_NUMNODES][MAX_NUMNODES] = {
+ [0 ... MAX_NUMNODES - 1] = { [0 ... MAX_NUMNODES - 1] = -1 }
+};
+static int numa_id_index_table[MAX_NUMNODES] = { [0 ... MAX_NUMNODES - 1] = NUMA_NO_NODE };
/*
* Allocate node_to_cpumask_map based on number of available nodes
@@ -79,7 +83,7 @@ static void __init setup_node_to_cpumask_map(void)
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
/* cpumask_of_node() will now work */
- dbg("Node to cpumask map for %u nodes\n", nr_node_ids);
+ pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
}
static int __init fake_numa_create_new_node(unsigned long end_pfn,
@@ -123,13 +127,13 @@ static int __init fake_numa_create_new_node(unsigned long end_pfn,
cmdline = p;
fake_nid++;
*nid = fake_nid;
- dbg("created new fake_node with id %d\n", fake_nid);
+ pr_debug("created new fake_node with id %d\n", fake_nid);
return 1;
}
return 0;
}
-static void reset_numa_cpu_lookup_table(void)
+static void __init reset_numa_cpu_lookup_table(void)
{
unsigned int cpu;
@@ -137,33 +141,79 @@ static void reset_numa_cpu_lookup_table(void)
numa_cpu_lookup_table[cpu] = -1;
}
-static void map_cpu_to_node(int cpu, int node)
+void map_cpu_to_node(int cpu, int node)
{
update_numa_cpu_lookup_table(cpu, node);
- dbg("adding cpu %d to node %d\n", cpu, node);
-
- if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
+ if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) {
+ pr_debug("adding cpu %d to node %d\n", cpu, node);
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
+ }
}
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
-static void unmap_cpu_from_node(unsigned long cpu)
+void unmap_cpu_from_node(unsigned long cpu)
{
int node = numa_cpu_lookup_table[cpu];
- dbg("removing cpu %lu from node %d\n", cpu, node);
-
if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
+ pr_debug("removing cpu %lu from node %d\n", cpu, node);
} else {
- printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
- cpu, node);
+ pr_warn("Warning: cpu %lu not found in node %d\n", cpu, node);
}
}
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
-int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
+static int __associativity_to_nid(const __be32 *associativity,
+ int max_array_sz)
+{
+ int nid;
+ /*
+ * primary_domain_index is 1 based array index.
+ */
+ int index = primary_domain_index - 1;
+
+ if (!numa_enabled || index >= max_array_sz)
+ return NUMA_NO_NODE;
+
+ nid = of_read_number(&associativity[index], 1);
+
+ /* POWER4 LPAR uses 0xffff as invalid node */
+ if (nid == 0xffff || nid >= nr_node_ids)
+ nid = NUMA_NO_NODE;
+ return nid;
+}
+/*
+ * Returns nid in the range [0..nr_node_ids], or -1 if no useful NUMA
+ * info is found.
+ */
+static int associativity_to_nid(const __be32 *associativity)
+{
+ int array_sz = of_read_number(associativity, 1);
+
+ /* Skip the first element in the associativity array */
+ return __associativity_to_nid((associativity + 1), array_sz);
+}
+
+static int __cpu_form2_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
+{
+ int dist;
+ int node1, node2;
+
+ node1 = associativity_to_nid(cpu1_assoc);
+ node2 = associativity_to_nid(cpu2_assoc);
+
+ dist = numa_distance_table[node1][node2];
+ if (dist <= LOCAL_DISTANCE)
+ return 0;
+ else if (dist <= REMOTE_DISTANCE)
+ return 1;
+ else
+ return 2;
+}
+
+static int __cpu_form1_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
{
int dist = 0;
@@ -179,6 +229,15 @@ int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
return dist;
}
+int cpu_relative_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc)
+{
+ /* We should not get called with FORM0 */
+ VM_WARN_ON(affinity_form == FORM0_AFFINITY);
+ if (affinity_form == FORM1_AFFINITY)
+ return __cpu_form1_relative_distance(cpu1_assoc, cpu2_assoc);
+ return __cpu_form2_relative_distance(cpu1_assoc, cpu2_assoc);
+}
+
/* must hold reference to node during call */
static const __be32 *of_get_associativity(struct device_node *dev)
{
@@ -190,7 +249,9 @@ int __node_distance(int a, int b)
int i;
int distance = LOCAL_DISTANCE;
- if (!form1_affinity)
+ if (affinity_form == FORM2_AFFINITY)
+ return numa_distance_table[a][b];
+ else if (affinity_form == FORM0_AFFINITY)
return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
for (i = 0; i < distance_ref_points_depth; i++) {
@@ -205,51 +266,6 @@ int __node_distance(int a, int b)
}
EXPORT_SYMBOL(__node_distance);
-static void initialize_distance_lookup_table(int nid,
- const __be32 *associativity)
-{
- int i;
-
- if (!form1_affinity)
- return;
-
- for (i = 0; i < distance_ref_points_depth; i++) {
- const __be32 *entry;
-
- entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
- distance_lookup_table[nid][i] = of_read_number(entry, 1);
- }
-}
-
-/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
- * info is found.
- */
-static int associativity_to_nid(const __be32 *associativity)
-{
- int nid = NUMA_NO_NODE;
-
- if (!numa_enabled)
- goto out;
-
- if (of_read_number(associativity, 1) >= min_common_depth)
- nid = of_read_number(&associativity[min_common_depth], 1);
-
- /* POWER4 LPAR uses 0xffff as invalid node */
- if (nid == 0xffff || nid >= MAX_NUMNODES)
- nid = NUMA_NO_NODE;
-
- if (nid > 0 &&
- of_read_number(associativity, 1) >= distance_ref_points_depth) {
- /*
- * Skip the length field and send start of associativity array
- */
- initialize_distance_lookup_table(nid, associativity + 1);
- }
-
-out:
- return nid;
-}
-
/* Returns the nid associated with the given device tree node,
* or -1 if not found.
*/
@@ -283,10 +299,154 @@ int of_node_to_nid(struct device_node *device)
}
EXPORT_SYMBOL(of_node_to_nid);
-static int __init find_min_common_depth(void)
+static void __initialize_form1_numa_distance(const __be32 *associativity,
+ int max_array_sz)
{
- int depth;
+ int i, nid;
+
+ if (affinity_form != FORM1_AFFINITY)
+ return;
+
+ nid = __associativity_to_nid(associativity, max_array_sz);
+ if (nid != NUMA_NO_NODE) {
+ for (i = 0; i < distance_ref_points_depth; i++) {
+ const __be32 *entry;
+ int index = be32_to_cpu(distance_ref_points[i]) - 1;
+
+ /*
+ * broken hierarchy, return with broken distance table
+ */
+ if (WARN(index >= max_array_sz, "Broken ibm,associativity property"))
+ return;
+
+ entry = &associativity[index];
+ distance_lookup_table[nid][i] = of_read_number(entry, 1);
+ }
+ }
+}
+
+static void initialize_form1_numa_distance(const __be32 *associativity)
+{
+ int array_sz;
+
+ array_sz = of_read_number(associativity, 1);
+ /* Skip the first element in the associativity array */
+ __initialize_form1_numa_distance(associativity + 1, array_sz);
+}
+
+/*
+ * Used to update distance information w.r.t newly added node.
+ */
+void update_numa_distance(struct device_node *node)
+{
+ int nid;
+
+ if (affinity_form == FORM0_AFFINITY)
+ return;
+ else if (affinity_form == FORM1_AFFINITY) {
+ const __be32 *associativity;
+
+ associativity = of_get_associativity(node);
+ if (!associativity)
+ return;
+
+ initialize_form1_numa_distance(associativity);
+ return;
+ }
+
+ /* FORM2 affinity */
+ nid = of_node_to_nid_single(node);
+ if (nid == NUMA_NO_NODE)
+ return;
+
+ /*
+ * With FORM2 we expect NUMA distance of all possible NUMA
+ * nodes to be provided during boot.
+ */
+ WARN(numa_distance_table[nid][nid] == -1,
+ "NUMA distance details for node %d not provided\n", nid);
+}
+
+/*
+ * ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
+ * ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
+ */
+static void __init initialize_form2_numa_distance_lookup_table(void)
+{
+ int i, j;
struct device_node *root;
+ const __u8 *form2_distances;
+ const __be32 *numa_lookup_index;
+ int form2_distances_length;
+ int max_numa_index, distance_index;
+
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ root = of_find_node_by_path("/ibm,opal");
+ else
+ root = of_find_node_by_path("/rtas");
+ if (!root)
+ root = of_find_node_by_path("/");
+
+ numa_lookup_index = of_get_property(root, "ibm,numa-lookup-index-table", NULL);
+ max_numa_index = of_read_number(&numa_lookup_index[0], 1);
+
+ /* first element of the array is the size and is encode-int */
+ form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
+ form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
+ /* Skip the size which is encoded int */
+ form2_distances += sizeof(__be32);
+
+ pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
+ form2_distances_length, max_numa_index);
+
+ for (i = 0; i < max_numa_index; i++)
+ /* +1 skip the max_numa_index in the property */
+ numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
+
+
+ if (form2_distances_length != max_numa_index * max_numa_index) {
+ WARN(1, "Wrong NUMA distance information\n");
+ form2_distances = NULL; // don't use it
+ }
+ distance_index = 0;
+ for (i = 0; i < max_numa_index; i++) {
+ for (j = 0; j < max_numa_index; j++) {
+ int nodeA = numa_id_index_table[i];
+ int nodeB = numa_id_index_table[j];
+ int dist;
+
+ if (form2_distances)
+ dist = form2_distances[distance_index++];
+ else if (nodeA == nodeB)
+ dist = LOCAL_DISTANCE;
+ else
+ dist = REMOTE_DISTANCE;
+ numa_distance_table[nodeA][nodeB] = dist;
+ pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
+ }
+ }
+
+ of_node_put(root);
+}
+
+static int __init find_primary_domain_index(void)
+{
+ int index;
+ struct device_node *root;
+
+ /*
+ * Check for which form of affinity.
+ */
+ if (firmware_has_feature(FW_FEATURE_OPAL)) {
+ affinity_form = FORM1_AFFINITY;
+ } else if (firmware_has_feature(FW_FEATURE_FORM2_AFFINITY)) {
+ pr_debug("Using form 2 affinity\n");
+ affinity_form = FORM2_AFFINITY;
+ } else if (firmware_has_feature(FW_FEATURE_FORM1_AFFINITY)) {
+ pr_debug("Using form 1 affinity\n");
+ affinity_form = FORM1_AFFINITY;
+ } else
+ affinity_form = FORM0_AFFINITY;
if (firmware_has_feature(FW_FEATURE_OPAL))
root = of_find_node_by_path("/ibm,opal");
@@ -312,42 +472,37 @@ static int __init find_min_common_depth(void)
&distance_ref_points_depth);
if (!distance_ref_points) {
- dbg("NUMA: ibm,associativity-reference-points not found.\n");
+ pr_debug("ibm,associativity-reference-points not found.\n");
goto err;
}
distance_ref_points_depth /= sizeof(int);
-
- if (firmware_has_feature(FW_FEATURE_OPAL) ||
- firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
- dbg("Using form 1 affinity\n");
- form1_affinity = 1;
- }
-
- if (form1_affinity) {
- depth = of_read_number(distance_ref_points, 1);
- } else {
+ if (affinity_form == FORM0_AFFINITY) {
if (distance_ref_points_depth < 2) {
- printk(KERN_WARNING "NUMA: "
- "short ibm,associativity-reference-points\n");
+ pr_warn("short ibm,associativity-reference-points\n");
goto err;
}
- depth = of_read_number(&distance_ref_points[1], 1);
+ index = of_read_number(&distance_ref_points[1], 1);
+ } else {
+ /*
+ * Both FORM1 and FORM2 affinity find the primary domain details
+ * at the same offset.
+ */
+ index = of_read_number(distance_ref_points, 1);
}
-
/*
* Warn and cap if the hardware supports more than
* MAX_DISTANCE_REF_POINTS domains.
*/
if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
- printk(KERN_WARNING "NUMA: distance array capped at "
- "%d entries\n", MAX_DISTANCE_REF_POINTS);
+ pr_warn("distance array capped at %d entries\n",
+ MAX_DISTANCE_REF_POINTS);
distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
}
of_node_put(root);
- return depth;
+ return index;
err:
of_node_put(root);
@@ -425,41 +580,119 @@ static int of_get_assoc_arrays(struct assoc_arrays *aa)
return 0;
}
+static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb)
+{
+ struct assoc_arrays aa = { .arrays = NULL };
+ int default_nid = NUMA_NO_NODE;
+ int nid = default_nid;
+ int rc, index;
+
+ if ((primary_domain_index < 0) || !numa_enabled)
+ return default_nid;
+
+ rc = of_get_assoc_arrays(&aa);
+ if (rc)
+ return default_nid;
+
+ if (primary_domain_index <= aa.array_sz &&
+ !(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
+ const __be32 *associativity;
+
+ index = lmb->aa_index * aa.array_sz;
+ associativity = &aa.arrays[index];
+ nid = __associativity_to_nid(associativity, aa.array_sz);
+ if (nid > 0 && affinity_form == FORM1_AFFINITY) {
+ /*
+ * lookup array associativity entries have
+ * no length of the array as the first element.
+ */
+ __initialize_form1_numa_distance(associativity, aa.array_sz);
+ }
+ }
+ return nid;
+}
+
/*
* This is like of_node_to_nid_single() for memory represented in the
* ibm,dynamic-reconfiguration-memory node.
*/
-static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
+int of_drconf_to_nid_single(struct drmem_lmb *lmb)
{
struct assoc_arrays aa = { .arrays = NULL };
int default_nid = NUMA_NO_NODE;
int nid = default_nid;
int rc, index;
- if ((min_common_depth < 0) || !numa_enabled)
+ if ((primary_domain_index < 0) || !numa_enabled)
return default_nid;
rc = of_get_assoc_arrays(&aa);
if (rc)
return default_nid;
- if (min_common_depth <= aa.array_sz &&
+ if (primary_domain_index <= aa.array_sz &&
!(lmb->flags & DRCONF_MEM_AI_INVALID) && lmb->aa_index < aa.n_arrays) {
- index = lmb->aa_index * aa.array_sz + min_common_depth - 1;
- nid = of_read_number(&aa.arrays[index], 1);
+ const __be32 *associativity;
- if (nid == 0xffff || nid >= MAX_NUMNODES)
- nid = default_nid;
+ index = lmb->aa_index * aa.array_sz;
+ associativity = &aa.arrays[index];
+ nid = __associativity_to_nid(associativity, aa.array_sz);
+ }
+ return nid;
+}
- if (nid > 0) {
- index = lmb->aa_index * aa.array_sz;
- initialize_distance_lookup_table(nid,
- &aa.arrays[index]);
- }
+#ifdef CONFIG_PPC_SPLPAR
+
+static int __vphn_get_associativity(long lcpu, __be32 *associativity)
+{
+ long rc, hwid;
+
+ /*
+ * On a shared lpar, device tree will not have node associativity.
+ * At this time lppaca, or its __old_status field may not be
+ * updated. Hence kernel cannot detect if its on a shared lpar. So
+ * request an explicit associativity irrespective of whether the
+ * lpar is shared or dedicated. Use the device tree property as a
+ * fallback. cpu_to_phys_id is only valid between
+ * smp_setup_cpu_maps() and smp_setup_pacas().
+ */
+ if (firmware_has_feature(FW_FEATURE_VPHN)) {
+ if (cpu_to_phys_id)
+ hwid = cpu_to_phys_id[lcpu];
+ else
+ hwid = get_hard_smp_processor_id(lcpu);
+
+ rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity);
+ if (rc == H_SUCCESS)
+ return 0;
}
- return nid;
+ return -1;
+}
+
+static int vphn_get_nid(long lcpu)
+{
+ __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
+
+
+ if (!__vphn_get_associativity(lcpu, associativity))
+ return associativity_to_nid(associativity);
+
+ return NUMA_NO_NODE;
+
}
+#else
+
+static int __vphn_get_associativity(long lcpu, __be32 *associativity)
+{
+ return -1;
+}
+
+static int vphn_get_nid(long unused)
+{
+ return NUMA_NO_NODE;
+}
+#endif /* CONFIG_PPC_SPLPAR */
/*
* Figure out to which domain a cpu belongs and stick it there.
@@ -467,19 +700,33 @@ static int of_drconf_to_nid_single(struct drmem_lmb *lmb)
*/
static int numa_setup_cpu(unsigned long lcpu)
{
- int nid = NUMA_NO_NODE;
struct device_node *cpu;
+ int fcpu = cpu_first_thread_sibling(lcpu);
+ int nid = NUMA_NO_NODE;
+
+ if (!cpu_present(lcpu)) {
+ set_cpu_numa_node(lcpu, first_online_node);
+ return first_online_node;
+ }
/*
* If a valid cpu-to-node mapping is already available, use it
* directly instead of querying the firmware, since it represents
* the most recent mapping notified to us by the platform (eg: VPHN).
+ * Since cpu_to_node binding remains the same for all threads in the
+ * core. If a valid cpu-to-node mapping is already available, for
+ * the first thread in the core, use it.
*/
- if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
+ nid = numa_cpu_lookup_table[fcpu];
+ if (nid >= 0) {
map_cpu_to_node(lcpu, nid);
return nid;
}
+ nid = vphn_get_nid(lcpu);
+ if (nid != NUMA_NO_NODE)
+ goto out_present;
+
cpu = of_get_cpu_node(lcpu, NULL);
if (!cpu) {
@@ -491,13 +738,26 @@ static int numa_setup_cpu(unsigned long lcpu)
}
nid = of_node_to_nid_single(cpu);
+ of_node_put(cpu);
out_present:
if (nid < 0 || !node_possible(nid))
nid = first_online_node;
+ /*
+ * Update for the first thread of the core. All threads of a core
+ * have to be part of the same node. This not only avoids querying
+ * for every other thread in the core, but always avoids a case
+ * where virtual node associativity change causes subsequent threads
+ * of a core to be associated with different nid. However if first
+ * thread is already online, expect it to have a valid mapping.
+ */
+ if (fcpu != lcpu) {
+ WARN_ON(cpu_online(fcpu));
+ map_cpu_to_node(fcpu, nid);
+ }
+
map_cpu_to_node(lcpu, nid);
- of_node_put(cpu);
out:
return nid;
}
@@ -535,9 +795,6 @@ static int ppc_numa_cpu_prepare(unsigned int cpu)
static int ppc_numa_cpu_dead(unsigned int cpu)
{
-#ifdef CONFIG_HOTPLUG_CPU
- unmap_cpu_from_node(cpu);
-#endif
return 0;
}
@@ -587,8 +844,9 @@ static inline int __init read_usm_ranges(const __be32 **usm)
* Extract NUMA information from the ibm,dynamic-reconfiguration-memory
* node. This assumes n_mem_{addr,size}_cells have been set.
*/
-static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
- const __be32 **usm)
+static int __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
+ const __be32 **usm,
+ void *data)
{
unsigned int ranges, is_kexec_kdump = 0;
unsigned long base, size, sz;
@@ -600,7 +858,7 @@ static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
*/
if ((lmb->flags & DRCONF_MEM_RESERVED)
|| !(lmb->flags & DRCONF_MEM_ASSIGNED))
- return;
+ return 0;
if (*usm)
is_kexec_kdump = 1;
@@ -612,7 +870,7 @@ static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
if (is_kexec_kdump) {
ranges = read_usm_ranges(usm);
if (!ranges) /* there are no (base, size) duple */
- return;
+ return 0;
}
do {
@@ -621,7 +879,7 @@ static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
size = read_n_cells(n_mem_size_cells, usm);
}
- nid = of_drconf_to_nid_single(lmb);
+ nid = get_nid_and_numa_distance(lmb);
fake_numa_create_new_node(((base + size) >> PAGE_SHIFT),
&nid);
node_set_online(nid);
@@ -629,6 +887,8 @@ static void __init numa_setup_drmem_lmb(struct drmem_lmb *lmb,
if (sz)
memblock_set_node(base, sz, &memblock.memory, nid);
} while (--ranges);
+
+ return 0;
}
static int __init parse_numa_properties(void)
@@ -636,24 +896,31 @@ static int __init parse_numa_properties(void)
struct device_node *memory;
int default_nid = 0;
unsigned long i;
+ const __be32 *associativity;
if (numa_enabled == 0) {
- printk(KERN_WARNING "NUMA disabled by user\n");
+ pr_warn("disabled by user\n");
return -1;
}
- min_common_depth = find_min_common_depth();
+ primary_domain_index = find_primary_domain_index();
- if (min_common_depth < 0) {
+ if (primary_domain_index < 0) {
/*
- * if we fail to parse min_common_depth from device tree
+ * if we fail to parse primary_domain_index from device tree
* mark the numa disabled, boot with numa disabled.
*/
numa_enabled = false;
- return min_common_depth;
+ return primary_domain_index;
}
- dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
+ pr_debug("associativity depth for CPU/Memory: %d\n", primary_domain_index);
+
+ /*
+ * If it is FORM2 initialize the distance table here.
+ */
+ if (affinity_form == FORM2_AFFINITY)
+ initialize_form2_numa_distance_lookup_table();
/*
* Even though we connect cpus to numa domains later in SMP
@@ -661,22 +928,36 @@ static int __init parse_numa_properties(void)
* each node to be onlined must have NODE_DATA etc backing it.
*/
for_each_present_cpu(i) {
+ __be32 vphn_assoc[VPHN_ASSOC_BUFSIZE];
struct device_node *cpu;
- int nid;
-
- cpu = of_get_cpu_node(i, NULL);
- BUG_ON(!cpu);
- nid = of_node_to_nid_single(cpu);
- of_node_put(cpu);
+ int nid = NUMA_NO_NODE;
+
+ memset(vphn_assoc, 0, VPHN_ASSOC_BUFSIZE * sizeof(__be32));
+
+ if (__vphn_get_associativity(i, vphn_assoc) == 0) {
+ nid = associativity_to_nid(vphn_assoc);
+ initialize_form1_numa_distance(vphn_assoc);
+ } else {
+
+ /*
+ * Don't fall back to default_nid yet -- we will plug
+ * cpus into nodes once the memory scan has discovered
+ * the topology.
+ */
+ cpu = of_get_cpu_node(i, NULL);
+ BUG_ON(!cpu);
+
+ associativity = of_get_associativity(cpu);
+ if (associativity) {
+ nid = associativity_to_nid(associativity);
+ initialize_form1_numa_distance(associativity);
+ }
+ of_node_put(cpu);
+ }
- /*
- * Don't fall back to default_nid yet -- we will plug
- * cpus into nodes once the memory scan has discovered
- * the topology.
- */
- if (nid < 0)
- continue;
- node_set_online(nid);
+ /* node_set_online() is an UB if 'nid' is negative */
+ if (likely(nid >= 0))
+ node_set_online(nid);
}
get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
@@ -708,8 +989,11 @@ new_range:
* have associativity properties. If none, then
* everything goes to default_nid.
*/
- nid = of_node_to_nid_single(memory);
- if (nid < 0)
+ associativity = of_get_associativity(memory);
+ if (associativity) {
+ nid = associativity_to_nid(associativity);
+ initialize_form1_numa_distance(associativity);
+ } else
nid = default_nid;
fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
@@ -730,7 +1014,7 @@ new_range:
*/
memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (memory) {
- walk_drmem_lmbs(memory, numa_setup_drmem_lmb);
+ walk_drmem_lmbs(memory, NULL, numa_setup_drmem_lmb);
of_node_put(memory);
}
@@ -743,17 +1027,12 @@ static void __init setup_nonnuma(void)
unsigned long total_ram = memblock_phys_mem_size();
unsigned long start_pfn, end_pfn;
unsigned int nid = 0;
- struct memblock_region *reg;
-
- printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
- top_of_ram, total_ram);
- printk(KERN_DEBUG "Memory hole size: %ldMB\n",
- (top_of_ram - total_ram) >> 20);
+ int i;
- for_each_memblock(memory, reg) {
- start_pfn = memblock_region_memory_base_pfn(reg);
- end_pfn = memblock_region_memory_end_pfn(reg);
+ pr_debug("Top of RAM: 0x%lx, Total RAM: 0x%lx\n", top_of_ram, total_ram);
+ pr_debug("Memory hole size: %ldMB\n", (top_of_ram - total_ram) >> 20);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) {
fake_numa_create_new_node(end_pfn, &nid);
memblock_set_node(PFN_PHYS(start_pfn),
PFN_PHYS(end_pfn - start_pfn),
@@ -830,7 +1109,9 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
static void __init find_possible_nodes(void)
{
struct device_node *rtas;
- u32 numnodes, i;
+ const __be32 *domains = NULL;
+ int prop_length, max_nodes;
+ u32 i;
if (!numa_enabled)
return;
@@ -839,16 +1120,38 @@ static void __init find_possible_nodes(void)
if (!rtas)
return;
- if (of_property_read_u32_index(rtas,
- "ibm,max-associativity-domains",
- min_common_depth, &numnodes))
- goto out;
+ /*
+ * ibm,current-associativity-domains is a fairly recent property. If
+ * it doesn't exist, then fallback on ibm,max-associativity-domains.
+ * Current denotes what the platform can support compared to max
+ * which denotes what the Hypervisor can support.
+ *
+ * If the LPAR is migratable, new nodes might be activated after a LPM,
+ * so we should consider the max number in that case.
+ */
+ if (!of_get_property(of_root, "ibm,migratable-partition", NULL))
+ domains = of_get_property(rtas,
+ "ibm,current-associativity-domains",
+ &prop_length);
+ if (!domains) {
+ domains = of_get_property(rtas, "ibm,max-associativity-domains",
+ &prop_length);
+ if (!domains)
+ goto out;
+ }
- for (i = 0; i < numnodes; i++) {
+ max_nodes = of_read_number(&domains[primary_domain_index], 1);
+ pr_info("Partition configured for %d NUMA nodes.\n", max_nodes);
+
+ for (i = 0; i < max_nodes; i++) {
if (!node_possible(i))
node_set(i, node_possible_map);
}
+ prop_length /= sizeof(int);
+ if (prop_length > primary_domain_index + 2)
+ coregroup_enabled = 1;
+
out:
of_node_put(rtas);
}
@@ -857,6 +1160,19 @@ void __init mem_topology_setup(void)
{
int cpu;
+ max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
+ min_low_pfn = MEMORY_START >> PAGE_SHIFT;
+
+ /*
+ * Linux/mm assumes node 0 to be online at boot. However this is not
+ * true on PowerPC, where node 0 is similar to any other node, it
+ * could be cpuless, memoryless node. So force node 0 to be offline
+ * for now. This will prevent cpuless, memoryless node 0 showing up
+ * unnecessarily as online. If a node has cpus or memory that need
+ * to be online, then node will anyway be marked online.
+ */
+ node_set_offline(0);
+
if (parse_numa_properties())
setup_nonnuma();
@@ -874,17 +1190,23 @@ void __init mem_topology_setup(void)
reset_numa_cpu_lookup_table();
- for_each_present_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ /*
+ * Powerpc with CONFIG_NUMA always used to have a node 0,
+ * even if it was memoryless or cpuless. For all cpus that
+ * are possible but not present, cpu_to_node() would point
+ * to node 0. To remove a cpuless, memoryless dummy node,
+ * powerpc need to make sure all possible but not present
+ * cpu_to_node are set to a proper node.
+ */
numa_setup_cpu(cpu);
+ }
}
void __init initmem_init(void)
{
int nid;
- max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
- max_pfn = max_low_pfn;
-
memblock_dump_all();
for_each_online_node(nid) {
@@ -892,7 +1214,6 @@ void __init initmem_init(void)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
setup_node_data(nid, start_pfn, end_pfn);
- sparse_memory_present_with_active_regions(nid);
}
sparse_init();
@@ -916,9 +1237,6 @@ static int __init early_numa(char *p)
if (strstr(p, "off"))
numa_enabled = 0;
- if (strstr(p, "debug"))
- numa_debug = 1;
-
p = strstr(p, "fake=");
if (p)
cmdline = p + strlen("fake=");
@@ -927,28 +1245,6 @@ static int __init early_numa(char *p)
}
early_param("numa", early_numa);
-/*
- * The platform can inform us through one of several mechanisms
- * (post-migration device tree updates, PRRN or VPHN) that the NUMA
- * assignment of a resource has changed. This controls whether we act
- * on that. Disabled by default.
- */
-static bool topology_updates_enabled;
-
-static int __init early_topology_updates(char *p)
-{
- if (!p)
- return 0;
-
- if (!strcmp(p, "on")) {
- pr_warn("Caution: enabling topology updates\n");
- topology_updates_enabled = true;
- }
-
- return 0;
-}
-early_param("topology_updates", early_topology_updates);
-
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Find the node associated with a hot added memory section for
@@ -1087,98 +1383,9 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
-struct topology_update_data {
- struct topology_update_data *next;
- unsigned int cpu;
- int old_nid;
- int new_nid;
-};
-
-#define TOPOLOGY_DEF_TIMER_SECS 60
-
-static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
-static cpumask_t cpu_associativity_changes_mask;
-static int vphn_enabled;
-static int prrn_enabled;
-static void reset_topology_timer(void);
-static int topology_timer_secs = 1;
static int topology_inited;
/*
- * Change polling interval for associativity changes.
- */
-int timed_topology_update(int nsecs)
-{
- if (vphn_enabled) {
- if (nsecs > 0)
- topology_timer_secs = nsecs;
- else
- topology_timer_secs = TOPOLOGY_DEF_TIMER_SECS;
-
- reset_topology_timer();
- }
-
- return 0;
-}
-
-/*
- * Store the current values of the associativity change counters in the
- * hypervisor.
- */
-static void setup_cpu_associativity_change_counters(void)
-{
- int cpu;
-
- /* The VPHN feature supports a maximum of 8 reference points */
- BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
-
- for_each_possible_cpu(cpu) {
- int i;
- u8 *counts = vphn_cpu_change_counts[cpu];
- volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
-
- for (i = 0; i < distance_ref_points_depth; i++)
- counts[i] = hypervisor_counts[i];
- }
-}
-
-/*
- * The hypervisor maintains a set of 8 associativity change counters in
- * the VPA of each cpu that correspond to the associativity levels in the
- * ibm,associativity-reference-points property. When an associativity
- * level changes, the corresponding counter is incremented.
- *
- * Set a bit in cpu_associativity_changes_mask for each cpu whose home
- * node associativity levels have changed.
- *
- * Returns the number of cpus with unhandled associativity changes.
- */
-static int update_cpu_associativity_changes_mask(void)
-{
- int cpu;
- cpumask_t *changes = &cpu_associativity_changes_mask;
-
- for_each_possible_cpu(cpu) {
- int i, changed = 0;
- u8 *counts = vphn_cpu_change_counts[cpu];
- volatile u8 *hypervisor_counts = lppaca_of(cpu).vphn_assoc_counts;
-
- for (i = 0; i < distance_ref_points_depth; i++) {
- if (hypervisor_counts[i] != counts[i]) {
- counts[i] = hypervisor_counts[i];
- changed = 1;
- }
- }
- if (changed) {
- cpumask_or(changes, changes, cpu_sibling_mask(cpu));
- cpu = cpu_last_thread_sibling(cpu);
- }
- }
-
- return cpumask_weight(changes);
-}
-
-/*
* Retrieve the new associativity information for a virtual processor's
* home node.
*/
@@ -1191,448 +1398,79 @@ static long vphn_get_associativity(unsigned long cpu,
VPHN_FLAG_VCPU, associativity);
switch (rc) {
+ case H_SUCCESS:
+ pr_debug("VPHN hcall succeeded. Reset polling...\n");
+ goto out;
+
case H_FUNCTION:
- printk_once(KERN_INFO
- "VPHN is not supported. Disabling polling...\n");
- stop_topology_update();
+ pr_err_ratelimited("VPHN unsupported. Disabling polling...\n");
break;
case H_HARDWARE:
- printk(KERN_ERR
- "hcall_vphn() experienced a hardware fault "
+ pr_err_ratelimited("hcall_vphn() experienced a hardware fault "
"preventing VPHN. Disabling polling...\n");
- stop_topology_update();
break;
- case H_SUCCESS:
- dbg("VPHN hcall succeeded. Reset polling...\n");
- timed_topology_update(0);
+ case H_PARAMETER:
+ pr_err_ratelimited("hcall_vphn() was passed an invalid parameter. "
+ "Disabling polling...\n");
+ break;
+ default:
+ pr_err_ratelimited("hcall_vphn() returned %ld. Disabling polling...\n"
+ , rc);
break;
}
-
+out:
return rc;
}
-int find_and_online_cpu_nid(int cpu)
+void find_and_update_cpu_nid(int cpu)
{
__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
int new_nid;
/* Use associativity from first thread for all siblings */
if (vphn_get_associativity(cpu, associativity))
- return cpu_to_node(cpu);
+ return;
+ /* Do not have previous associativity, so find it now. */
new_nid = associativity_to_nid(associativity);
- if (new_nid < 0 || !node_possible(new_nid))
- new_nid = first_online_node;
- if (NODE_DATA(new_nid) == NULL) {
-#ifdef CONFIG_MEMORY_HOTPLUG
- /*
- * Need to ensure that NODE_DATA is initialized for a node from
- * available memory (see memblock_alloc_try_nid). If unable to
- * init the node, then default to nearest node that has memory
- * installed. Skip onlining a node if the subsystems are not
- * yet initialized.
- */
- if (!topology_inited || try_online_node(new_nid))
- new_nid = first_online_node;
-#else
- /*
- * Default to using the nearest node that has memory installed.
- * Otherwise, it would be necessary to patch the kernel MM code
- * to deal with more memoryless-node error conditions.
- */
+ if (new_nid < 0 || !node_possible(new_nid))
new_nid = first_online_node;
-#endif
- }
-
- pr_debug("%s:%d cpu %d nid %d\n", __FUNCTION__, __LINE__,
- cpu, new_nid);
- return new_nid;
-}
-
-/*
- * Update the CPU maps and sysfs entries for a single CPU when its NUMA
- * characteristics change. This function doesn't perform any locking and is
- * only safe to call from stop_machine().
- */
-static int update_cpu_topology(void *data)
-{
- struct topology_update_data *update;
- unsigned long cpu;
-
- if (!data)
- return -EINVAL;
-
- cpu = smp_processor_id();
-
- for (update = data; update; update = update->next) {
- int new_nid = update->new_nid;
- if (cpu != update->cpu)
- continue;
-
- unmap_cpu_from_node(cpu);
- map_cpu_to_node(cpu, new_nid);
+ else
+ // Associate node <-> cpu, so cpu_up() calls
+ // try_online_node() on the right node.
set_cpu_numa_node(cpu, new_nid);
- set_cpu_numa_mem(cpu, local_memory_node(new_nid));
- vdso_getcpu_init();
- }
-
- return 0;
-}
-
-static int update_lookup_table(void *data)
-{
- struct topology_update_data *update;
-
- if (!data)
- return -EINVAL;
-
- /*
- * Upon topology update, the numa-cpu lookup table needs to be updated
- * for all threads in the core, including offline CPUs, to ensure that
- * future hotplug operations respect the cpu-to-node associativity
- * properly.
- */
- for (update = data; update; update = update->next) {
- int nid, base, j;
- nid = update->new_nid;
- base = cpu_first_thread_sibling(update->cpu);
-
- for (j = 0; j < threads_per_core; j++) {
- update_numa_cpu_lookup_table(base + j, nid);
- }
- }
-
- return 0;
+ pr_debug("%s:%d cpu %d nid %d\n", __func__, __LINE__, cpu, new_nid);
}
-/*
- * Update the node maps and sysfs entries for each cpu whose home node
- * has changed. Returns 1 when the topology has changed, and 0 otherwise.
- *
- * cpus_locked says whether we already hold cpu_hotplug_lock.
- */
-int numa_update_cpu_topology(bool cpus_locked)
+int cpu_to_coregroup_id(int cpu)
{
- unsigned int cpu, sibling, changed = 0;
- struct topology_update_data *updates, *ud;
- cpumask_t updated_cpus;
- struct device *dev;
- int weight, new_nid, i = 0;
-
- if (!prrn_enabled && !vphn_enabled && topology_inited)
- return 0;
-
- weight = cpumask_weight(&cpu_associativity_changes_mask);
- if (!weight)
- return 0;
-
- updates = kcalloc(weight, sizeof(*updates), GFP_KERNEL);
- if (!updates)
- return 0;
-
- cpumask_clear(&updated_cpus);
-
- for_each_cpu(cpu, &cpu_associativity_changes_mask) {
- /*
- * If siblings aren't flagged for changes, updates list
- * will be too short. Skip on this update and set for next
- * update.
- */
- if (!cpumask_subset(cpu_sibling_mask(cpu),
- &cpu_associativity_changes_mask)) {
- pr_info("Sibling bits not set for associativity "
- "change, cpu%d\n", cpu);
- cpumask_or(&cpu_associativity_changes_mask,
- &cpu_associativity_changes_mask,
- cpu_sibling_mask(cpu));
- cpu = cpu_last_thread_sibling(cpu);
- continue;
- }
-
- new_nid = find_and_online_cpu_nid(cpu);
-
- if (new_nid == numa_cpu_lookup_table[cpu]) {
- cpumask_andnot(&cpu_associativity_changes_mask,
- &cpu_associativity_changes_mask,
- cpu_sibling_mask(cpu));
- dbg("Assoc chg gives same node %d for cpu%d\n",
- new_nid, cpu);
- cpu = cpu_last_thread_sibling(cpu);
- continue;
- }
+ __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
+ int index;
- for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
- ud = &updates[i++];
- ud->next = &updates[i];
- ud->cpu = sibling;
- ud->new_nid = new_nid;
- ud->old_nid = numa_cpu_lookup_table[sibling];
- cpumask_set_cpu(sibling, &updated_cpus);
- }
- cpu = cpu_last_thread_sibling(cpu);
- }
+ if (cpu < 0 || cpu > nr_cpu_ids)
+ return -1;
- /*
- * Prevent processing of 'updates' from overflowing array
- * where last entry filled in a 'next' pointer.
- */
- if (i)
- updates[i-1].next = NULL;
-
- pr_debug("Topology update for the following CPUs:\n");
- if (cpumask_weight(&updated_cpus)) {
- for (ud = &updates[0]; ud; ud = ud->next) {
- pr_debug("cpu %d moving from node %d "
- "to %d\n", ud->cpu,
- ud->old_nid, ud->new_nid);
- }
- }
+ if (!coregroup_enabled)
+ goto out;
- /*
- * In cases where we have nothing to update (because the updates list
- * is too short or because the new topology is same as the old one),
- * skip invoking update_cpu_topology() via stop-machine(). This is
- * necessary (and not just a fast-path optimization) since stop-machine
- * can end up electing a random CPU to run update_cpu_topology(), and
- * thus trick us into setting up incorrect cpu-node mappings (since
- * 'updates' is kzalloc()'ed).
- *
- * And for the similar reason, we will skip all the following updating.
- */
- if (!cpumask_weight(&updated_cpus))
+ if (!firmware_has_feature(FW_FEATURE_VPHN))
goto out;
- if (cpus_locked)
- stop_machine_cpuslocked(update_cpu_topology, &updates[0],
- &updated_cpus);
- else
- stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
+ if (vphn_get_associativity(cpu, associativity))
+ goto out;
- /*
- * Update the numa-cpu lookup table with the new mappings, even for
- * offline CPUs. It is best to perform this update from the stop-
- * machine context.
- */
- if (cpus_locked)
- stop_machine_cpuslocked(update_lookup_table, &updates[0],
- cpumask_of(raw_smp_processor_id()));
- else
- stop_machine(update_lookup_table, &updates[0],
- cpumask_of(raw_smp_processor_id()));
-
- for (ud = &updates[0]; ud; ud = ud->next) {
- unregister_cpu_under_node(ud->cpu, ud->old_nid);
- register_cpu_under_node(ud->cpu, ud->new_nid);
-
- dev = get_cpu_device(ud->cpu);
- if (dev)
- kobject_uevent(&dev->kobj, KOBJ_CHANGE);
- cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
- changed = 1;
- }
+ index = of_read_number(associativity, 1);
+ if (index > primary_domain_index + 1)
+ return of_read_number(&associativity[index - 1], 1);
out:
- kfree(updates);
- return changed;
-}
-
-int arch_update_cpu_topology(void)
-{
- return numa_update_cpu_topology(true);
-}
-
-static void topology_work_fn(struct work_struct *work)
-{
- rebuild_sched_domains();
-}
-static DECLARE_WORK(topology_work, topology_work_fn);
-
-static void topology_schedule_update(void)
-{
- schedule_work(&topology_work);
-}
-
-static void topology_timer_fn(struct timer_list *unused)
-{
- if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
- topology_schedule_update();
- else if (vphn_enabled) {
- if (update_cpu_associativity_changes_mask() > 0)
- topology_schedule_update();
- reset_topology_timer();
- }
-}
-static struct timer_list topology_timer;
-
-static void reset_topology_timer(void)
-{
- if (vphn_enabled)
- mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
-}
-
-#ifdef CONFIG_SMP
-
-static int dt_update_callback(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct of_reconfig_data *update = data;
- int rc = NOTIFY_DONE;
-
- switch (action) {
- case OF_RECONFIG_UPDATE_PROPERTY:
- if (of_node_is_type(update->dn, "cpu") &&
- !of_prop_cmp(update->prop->name, "ibm,associativity")) {
- u32 core_id;
- of_property_read_u32(update->dn, "reg", &core_id);
- rc = dlpar_cpu_readd(core_id);
- rc = NOTIFY_OK;
- }
- break;
- }
-
- return rc;
-}
-
-static struct notifier_block dt_update_nb = {
- .notifier_call = dt_update_callback,
-};
-
-#endif
-
-/*
- * Start polling for associativity changes.
- */
-int start_topology_update(void)
-{
- int rc = 0;
-
- if (!topology_updates_enabled)
- return 0;
-
- if (firmware_has_feature(FW_FEATURE_PRRN)) {
- if (!prrn_enabled) {
- prrn_enabled = 1;
-#ifdef CONFIG_SMP
- rc = of_reconfig_notifier_register(&dt_update_nb);
-#endif
- }
- }
- if (firmware_has_feature(FW_FEATURE_VPHN) &&
- lppaca_shared_proc(get_lppaca())) {
- if (!vphn_enabled) {
- vphn_enabled = 1;
- setup_cpu_associativity_change_counters();
- timer_setup(&topology_timer, topology_timer_fn,
- TIMER_DEFERRABLE);
- reset_topology_timer();
- }
- }
-
- pr_info("Starting topology update%s%s\n",
- (prrn_enabled ? " prrn_enabled" : ""),
- (vphn_enabled ? " vphn_enabled" : ""));
-
- return rc;
+ return cpu_to_core_id(cpu);
}
-/*
- * Disable polling for VPHN associativity changes.
- */
-int stop_topology_update(void)
-{
- int rc = 0;
-
- if (!topology_updates_enabled)
- return 0;
-
- if (prrn_enabled) {
- prrn_enabled = 0;
-#ifdef CONFIG_SMP
- rc = of_reconfig_notifier_unregister(&dt_update_nb);
-#endif
- }
- if (vphn_enabled) {
- vphn_enabled = 0;
- rc = del_timer_sync(&topology_timer);
- }
-
- pr_info("Stopping topology update\n");
-
- return rc;
-}
-
-int prrn_is_enabled(void)
-{
- return prrn_enabled;
-}
-
-void __init shared_proc_topology_init(void)
-{
- if (lppaca_shared_proc(get_lppaca())) {
- bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
- nr_cpumask_bits);
- numa_update_cpu_topology(false);
- }
-}
-
-static int topology_read(struct seq_file *file, void *v)
-{
- if (vphn_enabled || prrn_enabled)
- seq_puts(file, "on\n");
- else
- seq_puts(file, "off\n");
-
- return 0;
-}
-
-static int topology_open(struct inode *inode, struct file *file)
-{
- return single_open(file, topology_read, NULL);
-}
-
-static ssize_t topology_write(struct file *file, const char __user *buf,
- size_t count, loff_t *off)
-{
- char kbuf[4]; /* "on" or "off" plus null. */
- int read_len;
-
- read_len = count < 3 ? count : 3;
- if (copy_from_user(kbuf, buf, read_len))
- return -EINVAL;
-
- kbuf[read_len] = '\0';
-
- if (!strncmp(kbuf, "on", 2)) {
- topology_updates_enabled = true;
- start_topology_update();
- } else if (!strncmp(kbuf, "off", 3)) {
- stop_topology_update();
- topology_updates_enabled = false;
- } else
- return -EINVAL;
-
- return count;
-}
-
-static const struct proc_ops topology_proc_ops = {
- .proc_read = seq_read,
- .proc_write = topology_write,
- .proc_open = topology_open,
- .proc_release = single_release,
-};
-
static int topology_update_init(void)
{
- start_topology_update();
-
- if (vphn_enabled)
- topology_schedule_update();
-
- if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_proc_ops))
- return -ENOMEM;
-
topology_inited = 1;
return 0;
}
diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c
new file mode 100644
index 000000000000..6163e484bc6d
--- /dev/null
+++ b/arch/powerpc/mm/pageattr.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * MMU-generic set_memory implementation for powerpc
+ *
+ * Copyright 2019-2021, IBM Corporation.
+ */
+
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/set_memory.h>
+
+#include <asm/mmu.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+
+static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr,
+ unsigned long old, unsigned long new)
+{
+ return pte_update(&init_mm, addr, ptep, old & ~new, new & ~old, 0);
+}
+
+/*
+ * Updates the attributes of a page atomically.
+ *
+ * This sequence is safe against concurrent updates, and also allows updating the
+ * attributes of a page currently being executed or accessed.
+ */
+static int change_page_attr(pte_t *ptep, unsigned long addr, void *data)
+{
+ long action = (long)data;
+
+ addr &= PAGE_MASK;
+ /* modify the PTE bits as desired */
+ switch (action) {
+ case SET_MEMORY_RO:
+ /* Don't clear DIRTY bit */
+ pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO);
+ break;
+ case SET_MEMORY_RW:
+ pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_RW);
+ break;
+ case SET_MEMORY_NX:
+ pte_update_delta(ptep, addr, _PAGE_KERNEL_ROX, _PAGE_KERNEL_RO);
+ break;
+ case SET_MEMORY_X:
+ pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_ROX);
+ break;
+ case SET_MEMORY_NP:
+ pte_update(&init_mm, addr, ptep, _PAGE_PRESENT, 0, 0);
+ break;
+ case SET_MEMORY_P:
+ pte_update(&init_mm, addr, ptep, 0, _PAGE_PRESENT, 0);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ /* See ptesync comment in radix__set_pte_at() */
+ if (radix_enabled())
+ asm volatile("ptesync": : :"memory");
+
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ return 0;
+}
+
+int change_memory_attr(unsigned long addr, int numpages, long action)
+{
+ unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
+ unsigned long size = numpages * PAGE_SIZE;
+
+ if (!numpages)
+ return 0;
+
+ if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) &&
+ is_vm_area_hugepages((void *)addr)))
+ return -EINVAL;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * On hash, the linear mapping is not in the Linux page table so
+ * apply_to_existing_page_range() will have no effect. If in the future
+ * the set_memory_* functions are used on the linear map this will need
+ * to be updated.
+ */
+ if (!radix_enabled()) {
+ int region = get_region_id(addr);
+
+ if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID))
+ return -EINVAL;
+ }
+#endif
+
+ return apply_to_existing_page_range(&init_mm, start, size,
+ change_page_attr, (void *)action);
+}
diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c
index ee4bd6d38602..20652daa1d7e 100644
--- a/arch/powerpc/mm/pgtable-frag.c
+++ b/arch/powerpc/mm/pgtable-frag.c
@@ -83,7 +83,7 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
- * the allocated page with single fragement
+ * the allocated page with single fragment
* count.
*/
if (likely(!pte_frag_get(&mm->context))) {
@@ -110,6 +110,9 @@ void pte_fragment_free(unsigned long *table, int kernel)
{
struct page *page = virt_to_page(table);
+ if (PageReserved(page))
+ return free_reserved_page(page);
+
BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
if (!kernel)
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index e3759b69f81b..cb2dcdb18f8e 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -23,10 +23,18 @@
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <linux/hugetlb.h>
-#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/hugetlb.h>
+#include <asm/pte-walk.h>
+
+#ifdef CONFIG_PPC64
+#define PGD_ALIGN (sizeof(pgd_t) * MAX_PTRS_PER_PGD)
+#else
+#define PGD_ALIGN PAGE_SIZE
+#endif
+
+pgd_t swapper_pg_dir[MAX_PTRS_PER_PGD] __section(".bss..page_aligned") __aligned(PGD_ALIGN);
static inline int is_exec_fault(void)
{
@@ -73,18 +81,15 @@ static struct page *maybe_pte_to_page(pte_t pte)
static pte_t set_pte_filter_hash(pte_t pte)
{
- if (radix_enabled())
- return pte;
-
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
cpu_has_feature(CPU_FTR_NOEXECUTE))) {
struct page *pg = maybe_pte_to_page(pte);
if (!pg)
return pte;
- if (!test_bit(PG_arch_1, &pg->flags)) {
+ if (!test_bit(PG_dcache_clean, &pg->flags)) {
flush_dcache_icache_page(pg);
- set_bit(PG_arch_1, &pg->flags);
+ set_bit(PG_dcache_clean, &pg->flags);
}
}
return pte;
@@ -100,10 +105,13 @@ static pte_t set_pte_filter_hash(pte_t pte) { return pte; }
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
* instead we "filter out" the exec permission for non clean pages.
*/
-static pte_t set_pte_filter(pte_t pte)
+static inline pte_t set_pte_filter(pte_t pte)
{
struct page *pg;
+ if (radix_enabled())
+ return pte;
+
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
return set_pte_filter_hash(pte);
@@ -117,13 +125,13 @@ static pte_t set_pte_filter(pte_t pte)
return pte;
/* If the page clean, we move on */
- if (test_bit(PG_arch_1, &pg->flags))
+ if (test_bit(PG_dcache_clean, &pg->flags))
return pte;
/* If it's an exec fault, we flush the cache and make it clean */
if (is_exec_fault()) {
flush_dcache_icache_page(pg);
- set_bit(PG_arch_1, &pg->flags);
+ set_bit(PG_dcache_clean, &pg->flags);
return pte;
}
@@ -136,6 +144,9 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
{
struct page *pg;
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ return pte;
+
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
return pte;
@@ -162,12 +173,12 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
goto bail;
/* If the page is already clean, we move on */
- if (test_bit(PG_arch_1, &pg->flags))
+ if (test_bit(PG_dcache_clean, &pg->flags))
goto bail;
- /* Clean the page and set PG_arch_1 */
+ /* Clean the page and set PG_dcache_clean */
flush_dcache_icache_page(pg);
- set_bit(PG_arch_1, &pg->flags);
+ set_bit(PG_dcache_clean, &pg->flags);
bail:
return pte_mkexec(pte);
@@ -185,9 +196,6 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
*/
VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
- /* Add the pte bit when trying to set a pte */
- pte = pte_mkpte(pte);
-
/* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this
* is called.
@@ -198,6 +206,15 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
__set_pte_at(mm, addr, ptep, pte, 0);
}
+void unmap_kernel_page(unsigned long va)
+{
+ pmd_t *pmdp = pmd_off_k(va);
+ pte_t *ptep = pte_offset_kernel(pmdp, va);
+
+ pte_clear(&init_mm, va, ptep);
+ flush_tlb_kernel_range(va, va + PAGE_SIZE);
+}
+
/*
* This is called when relaxing access to a PTE. It's also called in the page
* fault path when we don't hit any of the major fault cases, ie, a minor
@@ -249,22 +266,49 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
#else
/*
- * Not used on non book3s64 platforms. But 8xx
- * can possibly use tsize derived from hstate.
+ * Not used on non book3s64 platforms.
+ * 8xx compares it with mmu_virtual_psize to
+ * know if it is a huge page or not.
*/
- psize = 0;
+ psize = MMU_PAGE_COUNT;
#endif
__ptep_set_access_flags(vma, ptep, pte, addr, psize);
}
return changed;
#endif
}
+
+#if defined(CONFIG_PPC_8xx)
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+ pmd_t *pmd = pmd_off(mm, addr);
+ pte_basic_t val;
+ pte_basic_t *entry = (pte_basic_t *)ptep;
+ int num, i;
+
+ /*
+ * Make sure hardware valid bit is not set. We don't do
+ * tlb flush for this update.
+ */
+ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
+
+ pte = set_pte_filter(pte);
+
+ val = pte_val(pte);
+
+ num = number_of_cells_per_pte(pmd, val, 1);
+
+ for (i = 0; i < num; i++, entry++, val += SZ_4K)
+ *entry = val;
+}
+#endif
#endif /* CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_DEBUG_VM
void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
@@ -272,12 +316,14 @@ void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
return;
pgd = mm->pgd + pgd_index(addr);
BUG_ON(pgd_none(*pgd));
- pud = pud_offset(pgd, addr);
+ p4d = p4d_offset(pgd, addr);
+ BUG_ON(p4d_none(*p4d));
+ pud = pud_offset(p4d, addr);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, addr);
/*
* khugepaged to collapse normal pages to hugepage, first set
- * pmd to none to force page fault/gup to take mmap_sem. After
+ * pmd to none to force page fault/gup to take mmap_lock. After
* pmd is set to none, we do a pte_clear which does this assertion
* so if we find pmd none, return.
*/
@@ -305,19 +351,20 @@ EXPORT_SYMBOL_GPL(vmalloc_to_phys);
* (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
*
* So long as we atomically load page table pointers we are safe against teardown,
- * we can follow the address down to the the page and take a ref on it.
+ * we can follow the address down to the page and take a ref on it.
* This function need to be called with interrupts disabled. We use this variant
* when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
*/
pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
bool *is_thp, unsigned *hpage_shift)
{
- pgd_t pgd, *pgdp;
+ pgd_t *pgdp;
+ p4d_t p4d, *p4dp;
pud_t pud, *pudp;
pmd_t pmd, *pmdp;
pte_t *ret_pte;
hugepd_t *hpdp = NULL;
- unsigned pdshift = PGDIR_SHIFT;
+ unsigned pdshift;
if (hpage_shift)
*hpage_shift = 0;
@@ -325,24 +372,28 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
if (is_thp)
*is_thp = false;
- pgdp = pgdir + pgd_index(ea);
- pgd = READ_ONCE(*pgdp);
/*
* Always operate on the local stack value. This make sure the
* value don't get updated by a parallel THP split/collapse,
* page fault or a page unmap. The return pte_t * is still not
* stable. So should be checked there for above conditions.
+ * Top level is an exception because it is folded into p4d.
*/
- if (pgd_none(pgd))
+ pgdp = pgdir + pgd_index(ea);
+ p4dp = p4d_offset(pgdp, ea);
+ p4d = READ_ONCE(*p4dp);
+ pdshift = P4D_SHIFT;
+
+ if (p4d_none(p4d))
return NULL;
- if (pgd_is_leaf(pgd)) {
- ret_pte = (pte_t *)pgdp;
+ if (p4d_is_leaf(p4d)) {
+ ret_pte = (pte_t *)p4dp;
goto out;
}
- if (is_hugepd(__hugepd(pgd_val(pgd)))) {
- hpdp = (hugepd_t *)&pgd;
+ if (is_hugepd(__hugepd(p4d_val(p4d)))) {
+ hpdp = (hugepd_t *)&p4d;
goto out_huge;
}
@@ -352,7 +403,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
* irq disabled
*/
pdshift = PUD_SHIFT;
- pudp = pud_offset(&pgd, ea);
+ pudp = pud_offset(&p4d, ea);
pud = READ_ONCE(*pudp);
if (pud_none(pud))
@@ -421,3 +472,27 @@ out:
return ret_pte;
}
EXPORT_SYMBOL_GPL(__find_linux_pte);
+
+/* Note due to the way vm flags are laid out, the bits are XWR */
+const pgprot_t protection_map[16] = {
+ [VM_NONE] = PAGE_NONE,
+ [VM_READ] = PAGE_READONLY,
+ [VM_WRITE] = PAGE_COPY,
+ [VM_WRITE | VM_READ] = PAGE_COPY,
+ [VM_EXEC] = PAGE_READONLY_X,
+ [VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_EXEC | VM_WRITE] = PAGE_COPY_X,
+ [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
+ [VM_SHARED] = PAGE_NONE,
+ [VM_SHARED | VM_READ] = PAGE_READONLY,
+ [VM_SHARED | VM_WRITE] = PAGE_SHARED,
+ [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
+ [VM_SHARED | VM_EXEC] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_X,
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
+};
+
+#ifndef CONFIG_PPC_BOOK3S_64
+DECLARE_VM_GET_PAGE_PROT
+#endif
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 5fb90edd865e..5c02fd08d61e 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -23,16 +23,30 @@
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/slab.h>
+#include <linux/set_memory.h>
-#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/fixmap.h>
#include <asm/setup.h>
#include <asm/sections.h>
+#include <asm/early_ioremap.h>
#include <mm/mmu_decl.h>
-extern char etext[], _stext[], _sinittext[], _einittext[];
+static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
+
+notrace void __init early_ioremap_init(void)
+{
+ unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
+ pte_t *ptep = (pte_t *)early_fixmap_pagetable;
+ pmd_t *pmdp = pmd_off_k(addr);
+
+ for (; (s32)(FIXADDR_TOP - addr) > 0;
+ addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
+ pmd_populate_kernel(&init_mm, pmdp, ptep);
+
+ early_ioremap_setup();
+}
static void __init *early_alloc_pgtable(unsigned long size)
{
@@ -45,7 +59,7 @@ static void __init *early_alloc_pgtable(unsigned long size)
return ptr;
}
-static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
+pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
{
if (pmd_none(*pmdp)) {
pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
@@ -63,13 +77,13 @@ int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
int err = -ENOMEM;
/* Use upper 10 bits of VA to index the first level map */
- pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
+ pd = pmd_off_k(va);
/* Use middle 10 bits of VA to index the second-level map */
if (likely(slab_is_available()))
pg = pte_alloc_kernel(pd, va);
else
pg = early_pte_alloc_kernel(pd, va);
- if (pg != 0) {
+ if (pg) {
err = 0;
/* The PTE should never be already set nor present in the
* hash table
@@ -88,19 +102,14 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
{
unsigned long v, s;
phys_addr_t p;
- int ktext;
+ bool ktext;
s = offset;
v = PAGE_OFFSET + s;
p = memstart_addr + s;
for (; s < top; s += PAGE_SIZE) {
- ktext = ((char *)v >= _stext && (char *)v < etext) ||
- ((char *)v >= _sinittext && (char *)v < _einittext);
+ ktext = core_kernel_text(v);
map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
-#ifdef CONFIG_PPC_BOOK3S_32
- if (ktext)
- hash_preload(&init_mm, v);
-#endif
v += PAGE_SIZE;
p += PAGE_SIZE;
}
@@ -108,11 +117,11 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
void __init mapin_ram(void)
{
- struct memblock_region *reg;
+ phys_addr_t base, end;
+ u64 i;
- for_each_memblock(memory, reg) {
- phys_addr_t base = reg->base;
- phys_addr_t top = min(base + reg->size, total_lowmem);
+ for_each_mem_range(i, &base, &end) {
+ phys_addr_t top = min(end, total_lowmem);
if (base >= top)
continue;
@@ -121,133 +130,59 @@ void __init mapin_ram(void)
}
}
-/* Scan the real Linux page tables and return a PTE pointer for
- * a virtual address in a context.
- * Returns true (1) if PTE was found, zero otherwise. The pointer to
- * the PTE pointer is unmodified if PTE is not found.
- */
-static int
-get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int retval = 0;
-
- pgd = pgd_offset(mm, addr & PAGE_MASK);
- if (pgd) {
- pud = pud_offset(pgd, addr & PAGE_MASK);
- if (pud && pud_present(*pud)) {
- pmd = pmd_offset(pud, addr & PAGE_MASK);
- if (pmd_present(*pmd)) {
- pte = pte_offset_map(pmd, addr & PAGE_MASK);
- if (pte) {
- retval = 1;
- *ptep = pte;
- if (pmdp)
- *pmdp = pmd;
- /* XXX caller needs to do pte_unmap, yuck */
- }
- }
- }
- }
- return(retval);
-}
-
-static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
-{
- pte_t *kpte;
- pmd_t *kpmd;
- unsigned long address;
-
- BUG_ON(PageHighMem(page));
- address = (unsigned long)page_address(page);
-
- if (v_block_mapped(address))
- return 0;
- if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
- return -EINVAL;
- __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
- pte_unmap(kpte);
-
- return 0;
-}
-
-/*
- * Change the page attributes of an page in the linear mapping.
- *
- * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
- */
-static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
-{
- int i, err = 0;
- unsigned long flags;
- struct page *start = page;
-
- local_irq_save(flags);
- for (i = 0; i < numpages; i++, page++) {
- err = __change_page_attr_noflush(page, prot);
- if (err)
- break;
- }
- wmb();
- local_irq_restore(flags);
- flush_tlb_kernel_range((unsigned long)page_address(start),
- (unsigned long)page_address(page));
- return err;
-}
-
void mark_initmem_nx(void)
{
- struct page *page = virt_to_page(_sinittext);
unsigned long numpages = PFN_UP((unsigned long)_einittext) -
PFN_DOWN((unsigned long)_sinittext);
- if (v_block_mapped((unsigned long)_stext + 1))
- mmu_mark_initmem_nx();
- else
- change_page_attr(page, numpages, PAGE_KERNEL);
+ mmu_mark_initmem_nx();
+
+ if (!v_block_mapped((unsigned long)_sinittext)) {
+ set_memory_nx((unsigned long)_sinittext, numpages);
+ set_memory_rw((unsigned long)_sinittext, numpages);
+ }
}
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void)
{
- struct page *page;
unsigned long numpages;
- if (v_block_mapped((unsigned long)_sinittext)) {
+ if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n");
+
+ if (v_block_mapped((unsigned long)_stext + 1)) {
mmu_mark_rodata_ro();
ptdump_check_wx();
return;
}
- page = virt_to_page(_stext);
- numpages = PFN_UP((unsigned long)_etext) -
- PFN_DOWN((unsigned long)_stext);
-
- change_page_attr(page, numpages, PAGE_KERNEL_ROX);
/*
- * mark .rodata as read only. Use __init_begin rather than __end_rodata
- * to cover NOTES and EXCEPTION_TABLE.
+ * mark text and rodata as read only. __end_rodata is set by
+ * powerpc's linker script and includes tables and data
+ * requiring relocation which are not put in RO_DATA.
*/
- page = virt_to_page(__start_rodata);
- numpages = PFN_UP((unsigned long)__init_begin) -
- PFN_DOWN((unsigned long)__start_rodata);
+ numpages = PFN_UP((unsigned long)__end_rodata) -
+ PFN_DOWN((unsigned long)_stext);
- change_page_attr(page, numpages, PAGE_KERNEL_RO);
+ set_memory_ro((unsigned long)_stext, numpages);
// mark_initmem_nx() should have already run by now
ptdump_check_wx();
}
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC)
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
+ unsigned long addr = (unsigned long)page_address(page);
+
if (PageHighMem(page))
return;
- change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
+ if (enable)
+ set_memory_p(addr, numpages);
+ else
+ set_memory_np(addr, numpages);
}
#endif /* CONFIG_DEBUG_PAGEALLOC */
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index e78832dce7bb..5ac1fd30341b 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -31,11 +31,8 @@
#include <linux/slab.h>
#include <linux/hugetlb.h>
-#include <asm/pgalloc.h>
#include <asm/page.h>
-#include <asm/prom.h>
#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/smp.h>
#include <asm/machdep.h>
@@ -101,23 +98,25 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
#ifndef __PAGETABLE_PUD_FOLDED
/* 4 level page table */
-struct page *pgd_page(pgd_t pgd)
+struct page *p4d_page(p4d_t p4d)
{
- if (pgd_is_leaf(pgd)) {
- VM_WARN_ON(!pgd_huge(pgd));
- return pte_page(pgd_pte(pgd));
+ if (p4d_is_leaf(p4d)) {
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ VM_WARN_ON(!p4d_huge(p4d));
+ return pte_page(p4d_pte(p4d));
}
- return virt_to_page(pgd_page_vaddr(pgd));
+ return virt_to_page(p4d_pgtable(p4d));
}
#endif
struct page *pud_page(pud_t pud)
{
if (pud_is_leaf(pud)) {
- VM_WARN_ON(!pud_huge(pud));
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ VM_WARN_ON(!pud_huge(pud));
return pte_page(pud_pte(pud));
}
- return virt_to_page(pud_page_vaddr(pud));
+ return virt_to_page(pud_pgtable(pud));
}
/*
@@ -127,7 +126,13 @@ struct page *pud_page(pud_t pud)
struct page *pmd_page(pmd_t pmd)
{
if (pmd_is_leaf(pmd)) {
- VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
+ /*
+ * vmalloc_to_page may be called on any vmap address (not only
+ * vmalloc), and it uses pmd_page() etc., when huge vmap is
+ * enabled so these checks can't be used.
+ */
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
return pte_page(pmd_pte(pmd));
}
return virt_to_page(pmd_page_vaddr(pmd));
diff --git a/arch/powerpc/mm/ptdump/8xx.c b/arch/powerpc/mm/ptdump/8xx.c
index 9e2d8e847d6e..fac932eb8f9a 100644
--- a/arch/powerpc/mm/ptdump/8xx.c
+++ b/arch/powerpc/mm/ptdump/8xx.c
@@ -5,12 +5,22 @@
*
*/
#include <linux/kernel.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "ptdump.h"
static const struct flag_info flag_array[] = {
{
+#ifdef CONFIG_PPC_16K_PAGES
+ .mask = _PAGE_HUGE,
+ .val = _PAGE_HUGE,
+#else
+ .mask = _PAGE_SPS,
+ .val = _PAGE_SPS,
+#endif
+ .set = "huge",
+ .clear = " ",
+ }, {
.mask = _PAGE_SH,
.val = 0,
.set = "user",
@@ -65,8 +75,10 @@ static const struct flag_info flag_array[] = {
};
struct pgtable_level pg_level[5] = {
- {
- }, { /* pgd */
+ { /* pgd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* p4d */
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pud */
diff --git a/arch/powerpc/mm/ptdump/Makefile b/arch/powerpc/mm/ptdump/Makefile
index 712762be3cb1..dc896d2874f3 100644
--- a/arch/powerpc/mm/ptdump/Makefile
+++ b/arch/powerpc/mm/ptdump/Makefile
@@ -4,6 +4,11 @@ obj-y += ptdump.o
obj-$(CONFIG_4xx) += shared.o
obj-$(CONFIG_PPC_8xx) += 8xx.o
-obj-$(CONFIG_PPC_BOOK3E_MMU) += shared.o
-obj-$(CONFIG_PPC_BOOK3S_32) += shared.o bats.o segment_regs.o
-obj-$(CONFIG_PPC_BOOK3S_64) += book3s64.o hashpagetable.o
+obj-$(CONFIG_PPC_E500) += shared.o
+obj-$(CONFIG_PPC_BOOK3S_32) += shared.o
+obj-$(CONFIG_PPC_BOOK3S_64) += book3s64.o
+
+ifdef CONFIG_PTDUMP_DEBUGFS
+obj-$(CONFIG_PPC_BOOK3S_32) += bats.o segment_regs.o
+obj-$(CONFIG_PPC_64S_HASH_MMU) += hashpagetable.o
+endif
diff --git a/arch/powerpc/mm/ptdump/bats.c b/arch/powerpc/mm/ptdump/bats.c
index 4154feac1da3..820c119013e4 100644
--- a/arch/powerpc/mm/ptdump/bats.c
+++ b/arch/powerpc/mm/ptdump/bats.c
@@ -6,67 +6,11 @@
* This dumps the content of BATS
*/
-#include <asm/debugfs.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
+#include <linux/debugfs.h>
#include <asm/cpu_has_feature.h>
-static char *pp_601(int k, int pp)
-{
- if (pp == 0)
- return k ? "NA" : "RWX";
- if (pp == 1)
- return k ? "ROX" : "RWX";
- if (pp == 2)
- return k ? "RWX" : "RWX";
- return k ? "ROX" : "ROX";
-}
-
-static void bat_show_601(struct seq_file *m, int idx, u32 lower, u32 upper)
-{
- u32 blpi = upper & 0xfffe0000;
- u32 k = (upper >> 2) & 3;
- u32 pp = upper & 3;
- phys_addr_t pbn = PHYS_BAT_ADDR(lower);
- u32 bsm = lower & 0x3ff;
- u32 size = (bsm + 1) << 17;
-
- seq_printf(m, "%d: ", idx);
- if (!(lower & 0x40)) {
- seq_puts(m, " -\n");
- return;
- }
-
- seq_printf(m, "0x%08x-0x%08x ", blpi, blpi + size - 1);
-#ifdef CONFIG_PHYS_64BIT
- seq_printf(m, "0x%016llx ", pbn);
-#else
- seq_printf(m, "0x%08x ", pbn);
-#endif
-
- seq_printf(m, "Kernel %s User %s", pp_601(k & 2, pp), pp_601(k & 1, pp));
-
- if (lower & _PAGE_WRITETHRU)
- seq_puts(m, "write through ");
- if (lower & _PAGE_NO_CACHE)
- seq_puts(m, "no cache ");
- if (lower & _PAGE_COHERENT)
- seq_puts(m, "coherent ");
- seq_puts(m, "\n");
-}
-
-#define BAT_SHOW_601(_m, _n, _l, _u) bat_show_601(_m, _n, mfspr(_l), mfspr(_u))
-
-static int bats_show_601(struct seq_file *m, void *v)
-{
- seq_puts(m, "---[ Block Address Translation ]---\n");
-
- BAT_SHOW_601(m, 0, SPRN_IBAT0L, SPRN_IBAT0U);
- BAT_SHOW_601(m, 1, SPRN_IBAT1L, SPRN_IBAT1U);
- BAT_SHOW_601(m, 2, SPRN_IBAT2L, SPRN_IBAT2U);
- BAT_SHOW_601(m, 3, SPRN_IBAT3L, SPRN_IBAT3U);
-
- return 0;
-}
+#include "ptdump.h"
static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool is_d)
{
@@ -88,6 +32,7 @@ static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool
#else
seq_printf(m, "0x%08x ", brpn);
#endif
+ pt_dump_size(m, size);
if (k == 1)
seq_puts(m, "User ");
@@ -97,26 +42,22 @@ static void bat_show_603(struct seq_file *m, int idx, u32 lower, u32 upper, bool
seq_puts(m, "Kernel/User ");
if (lower & BPP_RX)
- seq_puts(m, is_d ? "RO " : "EXEC ");
+ seq_puts(m, is_d ? "r " : " x ");
else if (lower & BPP_RW)
- seq_puts(m, is_d ? "RW " : "EXEC ");
+ seq_puts(m, is_d ? "rw " : " x ");
else
- seq_puts(m, is_d ? "NA " : "NX ");
-
- if (lower & _PAGE_WRITETHRU)
- seq_puts(m, "write through ");
- if (lower & _PAGE_NO_CACHE)
- seq_puts(m, "no cache ");
- if (lower & _PAGE_COHERENT)
- seq_puts(m, "coherent ");
- if (lower & _PAGE_GUARDED)
- seq_puts(m, "guarded ");
+ seq_puts(m, is_d ? " " : " ");
+
+ seq_puts(m, lower & _PAGE_WRITETHRU ? "w " : " ");
+ seq_puts(m, lower & _PAGE_NO_CACHE ? "i " : " ");
+ seq_puts(m, lower & _PAGE_COHERENT ? "m " : " ");
+ seq_puts(m, lower & _PAGE_GUARDED ? "g " : " ");
seq_puts(m, "\n");
}
#define BAT_SHOW_603(_m, _n, _l, _u, _d) bat_show_603(_m, _n, mfspr(_l), mfspr(_u), _d)
-static int bats_show_603(struct seq_file *m, void *v)
+static int bats_show(struct seq_file *m, void *v)
{
seq_puts(m, "---[ Instruction Block Address Translation ]---\n");
@@ -147,27 +88,12 @@ static int bats_show_603(struct seq_file *m, void *v)
return 0;
}
-static int bats_open(struct inode *inode, struct file *file)
-{
- if (IS_ENABLED(CONFIG_PPC_BOOK3S_601))
- return single_open(file, bats_show_601, NULL);
-
- return single_open(file, bats_show_603, NULL);
-}
-
-static const struct file_operations bats_fops = {
- .open = bats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(bats);
static int __init bats_init(void)
{
- struct dentry *debugfs_file;
-
- debugfs_file = debugfs_create_file("block_address_translation", 0400,
- powerpc_debugfs_root, NULL, &bats_fops);
- return debugfs_file ? 0 : -ENOMEM;
+ debugfs_create_file("block_address_translation", 0400,
+ arch_debugfs_dir, NULL, &bats_fops);
+ return 0;
}
device_initcall(bats_init);
diff --git a/arch/powerpc/mm/ptdump/book3s64.c b/arch/powerpc/mm/ptdump/book3s64.c
index 0dfca72cb9bd..5ad92d9dc5d1 100644
--- a/arch/powerpc/mm/ptdump/book3s64.c
+++ b/arch/powerpc/mm/ptdump/book3s64.c
@@ -5,7 +5,7 @@
*
*/
#include <linux/kernel.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "ptdump.h"
@@ -103,8 +103,10 @@ static const struct flag_info flag_array[] = {
};
struct pgtable_level pg_level[5] = {
- {
- }, { /* pgd */
+ { /* pgd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* p4d */
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pud */
diff --git a/arch/powerpc/mm/ptdump/hashpagetable.c b/arch/powerpc/mm/ptdump/hashpagetable.c
index a07278027c6f..9a601587836b 100644
--- a/arch/powerpc/mm/ptdump/hashpagetable.c
+++ b/arch/powerpc/mm/ptdump/hashpagetable.c
@@ -15,13 +15,12 @@
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <asm/pgtable.h>
#include <linux/const.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
#include <asm/plpar_wrappers.h>
#include <linux/memblock.h>
#include <asm/firmware.h>
+#include <asm/pgalloc.h>
struct pg_state {
struct seq_file *seq;
@@ -239,7 +238,10 @@ static int native_find(unsigned long ea, int psize, bool primary, u64 *v, u64
static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *r)
{
- struct hash_pte ptes[4];
+ struct {
+ unsigned long v;
+ unsigned long r;
+ } ptes[4];
unsigned long vsid, vpn, hash, hpte_group, want_v;
int i, j, ssize = mmu_kernel_ssize;
long lpar_rc = 0;
@@ -259,7 +261,7 @@ static int pseries_find(unsigned long ea, int psize, bool primary, u64 *v, u64 *
for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
- if (lpar_rc != H_SUCCESS)
+ if (lpar_rc)
continue;
for (j = 0; j < 4; j++) {
if (HPTE_V_COMPARE(ptes[j].v, want_v) &&
@@ -417,9 +419,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
}
}
-static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
+static void walk_pud(struct pg_state *st, p4d_t *p4d, unsigned long start)
{
- pud_t *pud = pud_offset(pgd, 0);
+ pud_t *pud = pud_offset(p4d, 0);
unsigned long addr;
unsigned int i;
@@ -431,6 +433,20 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
}
}
+static void walk_p4d(struct pg_state *st, pgd_t *pgd, unsigned long start)
+{
+ p4d_t *p4d = p4d_offset(pgd, 0);
+ unsigned long addr;
+ unsigned int i;
+
+ for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
+ addr = start + i * P4D_SIZE;
+ if (!p4d_none(*p4d))
+ /* p4d exists */
+ walk_pud(st, p4d, addr);
+ }
+}
+
static void walk_pagetables(struct pg_state *st)
{
pgd_t *pgd = pgd_offset_k(0UL);
@@ -445,7 +461,7 @@ static void walk_pagetables(struct pg_state *st)
addr = KERN_VIRT_START + i * PGDIR_SIZE;
if (!pgd_none(*pgd))
/* pgd exists */
- walk_pud(st, pgd, addr);
+ walk_p4d(st, pgd, addr);
}
}
@@ -513,27 +529,14 @@ static int ptdump_show(struct seq_file *m, void *v)
return 0;
}
-static int ptdump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ptdump_show, NULL);
-}
-
-static const struct file_operations ptdump_fops = {
- .open = ptdump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ptdump);
static int ptdump_init(void)
{
- struct dentry *debugfs_file;
-
if (!radix_enabled()) {
populate_markers();
- debugfs_file = debugfs_create_file("kernel_hash_pagetable",
- 0400, NULL, NULL, &ptdump_fops);
- return debugfs_file ? 0 : -ENOMEM;
+ debugfs_create_file("kernel_hash_pagetable", 0400, NULL, NULL,
+ &ptdump_fops);
}
return 0;
}
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 206156255247..2313053fe679 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -16,13 +16,14 @@
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/highmem.h>
+#include <linux/ptdump.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/fixmap.h>
-#include <asm/pgtable.h>
#include <linux/const.h>
+#include <linux/kasan.h>
#include <asm/page.h>
-#include <asm/pgalloc.h>
+#include <asm/hugetlb.h>
#include <mm/mmu_decl.h>
@@ -55,12 +56,12 @@
*
*/
struct pg_state {
+ struct ptdump_state ptdump;
struct seq_file *seq;
const struct addr_marker *marker;
unsigned long start_address;
unsigned long start_pa;
- unsigned long last_pa;
- unsigned int level;
+ int level;
u64 current_flags;
bool check_wx;
unsigned long wx_pages;
@@ -73,6 +74,10 @@ struct addr_marker {
static struct addr_marker address_markers[] = {
{ 0, "Start of kernel VM" },
+#ifdef MODULES_VADDR
+ { 0, "modules start" },
+ { 0, "modules end" },
+#endif
{ 0, "vmalloc() Area" },
{ 0, "vmalloc() End" },
#ifdef CONFIG_PPC64
@@ -100,6 +105,11 @@ static struct addr_marker address_markers[] = {
{ -1, NULL },
};
+static struct ptdump_range ptdump_range[] __ro_after_init = {
+ {TASK_SIZE_MAX, ~0UL},
+ {0, 0}
+};
+
#define pt_dump_seq_printf(m, fmt, args...) \
({ \
if (m) \
@@ -112,6 +122,19 @@ static struct addr_marker address_markers[] = {
seq_putc(m, c); \
})
+void pt_dump_size(struct seq_file *m, unsigned long size)
+{
+ static const char units[] = " KMGTPE";
+ const char *unit = units;
+
+ /* Work out what appropriate unit to use */
+ while (!(size & 1023) && unit[1]) {
+ size >>= 10;
+ unit++;
+ }
+ pt_dump_seq_printf(m, "%9lu%c ", size, *unit);
+}
+
static void dump_flag_info(struct pg_state *st, const struct flag_info
*flag, u64 pte, int num)
{
@@ -146,10 +169,6 @@ static void dump_flag_info(struct pg_state *st, const struct flag_info
static void dump_addr(struct pg_state *st, unsigned long addr)
{
- static const char units[] = "KMGTPE";
- const char *unit = units;
- unsigned long delta;
-
#ifdef CONFIG_PPC64
#define REG "0x%016lx"
#else
@@ -157,27 +176,15 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
#endif
pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
- if (st->start_pa == st->last_pa && st->start_address + PAGE_SIZE != addr) {
- pt_dump_seq_printf(st->seq, "[" REG "]", st->start_pa);
- delta = PAGE_SIZE >> 10;
- } else {
- pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
- delta = (addr - st->start_address) >> 10;
- }
- /* Work out what appropriate unit to use */
- while (!(delta & 1023) && unit[1]) {
- delta >>= 10;
- unit++;
- }
- pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
-
+ pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
+ pt_dump_size(st->seq, addr - st->start_address);
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
{
pte_t pte = __pte(st->current_flags);
- if (!IS_ENABLED(CONFIG_PPC_DEBUG_WX) || !st->check_wx)
+ if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx)
return;
if (!pte_write(pte) || !pte_exec(pte))
@@ -189,32 +196,40 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr)
st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
}
-static void note_page(struct pg_state *st, unsigned long addr,
- unsigned int level, u64 val)
+static void note_page_update_state(struct pg_state *st, unsigned long addr, int level, u64 val)
{
- u64 flag = val & pg_level[level].mask;
+ u64 flag = level >= 0 ? val & pg_level[level].mask : 0;
u64 pa = val & PTE_RPN_MASK;
+ st->level = level;
+ st->current_flags = flag;
+ st->start_address = addr;
+ st->start_pa = pa;
+
+ while (addr >= st->marker[1].start_address) {
+ st->marker++;
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ }
+}
+
+static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, u64 val)
+{
+ u64 flag = level >= 0 ? val & pg_level[level].mask : 0;
+ struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
+
/* At first no level is set */
- if (!st->level) {
- st->level = level;
- st->current_flags = flag;
- st->start_address = addr;
- st->start_pa = pa;
- st->last_pa = pa;
+ if (st->level == -1) {
pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ note_page_update_state(st, addr, level, val);
/*
* Dump the section of virtual memory when:
* - the PTE flags from one entry to the next differs.
* - we change levels in the tree.
* - the address is in a different section of memory and is thus
* used for a different purpose, regardless of the flags.
- * - the pa of this page is not adjacent to the last inspected page
*/
} else if (flag != st->current_flags || level != st->level ||
- addr >= st->marker[1].start_address ||
- (pa != st->last_pa + PAGE_SIZE &&
- (pa != st->start_pa || st->start_pa != st->last_pa))) {
+ addr >= st->marker[1].start_address) {
/* Check the PTE flags */
if (st->current_flags) {
@@ -234,81 +249,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
* Address indicates we have passed the end of the
* current section of virtual memory
*/
- while (addr >= st->marker[1].start_address) {
- st->marker++;
- pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
- }
- st->start_address = addr;
- st->start_pa = pa;
- st->last_pa = pa;
- st->current_flags = flag;
- st->level = level;
- } else {
- st->last_pa = pa;
- }
-}
-
-static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
-{
- pte_t *pte = pte_offset_kernel(pmd, 0);
- unsigned long addr;
- unsigned int i;
-
- for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
- addr = start + i * PAGE_SIZE;
- note_page(st, addr, 4, pte_val(*pte));
-
- }
-}
-
-static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
-{
- pmd_t *pmd = pmd_offset(pud, 0);
- unsigned long addr;
- unsigned int i;
-
- for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
- addr = start + i * PMD_SIZE;
- if (!pmd_none(*pmd) && !pmd_is_leaf(*pmd))
- /* pmd exists */
- walk_pte(st, pmd, addr);
- else
- note_page(st, addr, 3, pmd_val(*pmd));
- }
-}
-
-static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
-{
- pud_t *pud = pud_offset(pgd, 0);
- unsigned long addr;
- unsigned int i;
-
- for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
- addr = start + i * PUD_SIZE;
- if (!pud_none(*pud) && !pud_is_leaf(*pud))
- /* pud exists */
- walk_pmd(st, pud, addr);
- else
- note_page(st, addr, 2, pud_val(*pud));
- }
-}
-
-static void walk_pagetables(struct pg_state *st)
-{
- unsigned int i;
- unsigned long addr = st->start_address & PGDIR_MASK;
- pgd_t *pgd = pgd_offset_k(addr);
-
- /*
- * Traverse the linux pagetable structure and dump pages that are in
- * the hash pagetable.
- */
- for (i = pgd_index(addr); i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
- if (!pgd_none(*pgd) && !pgd_is_leaf(*pgd))
- /* pgd exists */
- walk_pud(st, pgd, addr);
- else
- note_page(st, addr, 1, pgd_val(*pgd));
+ note_page_update_state(st, addr, level, val);
}
}
@@ -316,7 +257,15 @@ static void populate_markers(void)
{
int i = 0;
+#ifdef CONFIG_PPC64
address_markers[i++].start_address = PAGE_OFFSET;
+#else
+ address_markers[i++].start_address = TASK_SIZE;
+#endif
+#ifdef MODULES_VADDR
+ address_markers[i++].start_address = MODULES_VADDR;
+ address_markers[i++].start_address = MODULES_END;
+#endif
address_markers[i++].start_address = VMALLOC_START;
address_markers[i++].start_address = VMALLOC_END;
#ifdef CONFIG_PPC64
@@ -341,11 +290,11 @@ static void populate_markers(void)
#endif
address_markers[i++].start_address = FIXADDR_START;
address_markers[i++].start_address = FIXADDR_TOP;
+#endif /* CONFIG_PPC64 */
#ifdef CONFIG_KASAN
address_markers[i++].start_address = KASAN_SHADOW_START;
address_markers[i++].start_address = KASAN_SHADOW_END;
#endif
-#endif /* CONFIG_PPC64 */
}
static int ptdump_show(struct seq_file *m, void *v)
@@ -353,34 +302,21 @@ static int ptdump_show(struct seq_file *m, void *v)
struct pg_state st = {
.seq = m,
.marker = address_markers,
- .start_address = PAGE_OFFSET,
+ .level = -1,
+ .ptdump = {
+ .note_page = note_page,
+ .range = ptdump_range,
+ }
};
-#ifdef CONFIG_PPC64
- if (!radix_enabled())
- st.start_address = KERN_VIRT_START;
-#endif
-
/* Traverse kernel page tables */
- walk_pagetables(&st);
- note_page(&st, 0, 0, 0);
+ ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(ptdump);
-static int ptdump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ptdump_show, NULL);
-}
-
-static const struct file_operations ptdump_fops = {
- .open = ptdump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void build_pgtable_complete_mask(void)
+static void __init build_pgtable_complete_mask(void)
{
unsigned int i, j;
@@ -390,22 +326,24 @@ static void build_pgtable_complete_mask(void)
pg_level[i].mask |= pg_level[i].flag[j].mask;
}
-#ifdef CONFIG_PPC_DEBUG_WX
+#ifdef CONFIG_DEBUG_WX
void ptdump_check_wx(void)
{
struct pg_state st = {
.seq = NULL,
- .marker = address_markers,
+ .marker = (struct addr_marker[]) {
+ { 0, NULL},
+ { -1, NULL},
+ },
+ .level = -1,
.check_wx = true,
- .start_address = PAGE_OFFSET,
+ .ptdump = {
+ .note_page = note_page,
+ .range = ptdump_range,
+ }
};
-#ifdef CONFIG_PPC64
- if (!radix_enabled())
- st.start_address = KERN_VIRT_START;
-#endif
-
- walk_pagetables(&st);
+ ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
if (st.wx_pages)
pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
@@ -415,14 +353,23 @@ void ptdump_check_wx(void)
}
#endif
-static int ptdump_init(void)
+static int __init ptdump_init(void)
{
- struct dentry *debugfs_file;
+#ifdef CONFIG_PPC64
+ if (!radix_enabled())
+ ptdump_range[0].start = KERN_VIRT_START;
+ else
+ ptdump_range[0].start = PAGE_OFFSET;
+
+ ptdump_range[0].end = PAGE_OFFSET + (PGDIR_SIZE * PTRS_PER_PGD);
+#endif
populate_markers();
build_pgtable_complete_mask();
- debugfs_file = debugfs_create_file("kernel_page_tables", 0400, NULL,
- NULL, &ptdump_fops);
- return debugfs_file ? 0 : -ENOMEM;
+
+ if (IS_ENABLED(CONFIG_PTDUMP_DEBUGFS))
+ debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
+
+ return 0;
}
device_initcall(ptdump_init);
diff --git a/arch/powerpc/mm/ptdump/ptdump.h b/arch/powerpc/mm/ptdump/ptdump.h
index 5d513636de73..154efae96ae0 100644
--- a/arch/powerpc/mm/ptdump/ptdump.h
+++ b/arch/powerpc/mm/ptdump/ptdump.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/types.h>
+#include <linux/seq_file.h>
struct flag_info {
u64 mask;
@@ -17,3 +18,5 @@ struct pgtable_level {
};
extern struct pgtable_level pg_level[5];
+
+void pt_dump_size(struct seq_file *m, unsigned long delta);
diff --git a/arch/powerpc/mm/ptdump/segment_regs.c b/arch/powerpc/mm/ptdump/segment_regs.c
index 501843664bb9..9df3af8d481f 100644
--- a/arch/powerpc/mm/ptdump/segment_regs.c
+++ b/arch/powerpc/mm/ptdump/segment_regs.c
@@ -6,11 +6,11 @@
* This dumps the content of Segment Registers
*/
-#include <asm/debugfs.h>
+#include <linux/debugfs.h>
static void seg_show(struct seq_file *m, int i)
{
- u32 val = mfsrin(i << 28);
+ u32 val = mfsr(i << 28);
seq_printf(m, "0x%01x0000000-0x%01xfffffff ", i, i);
seq_printf(m, "Kern key %d ", (val >> 30) & 1);
@@ -41,24 +41,12 @@ static int sr_show(struct seq_file *m, void *v)
return 0;
}
-static int sr_open(struct inode *inode, struct file *file)
-{
- return single_open(file, sr_show, NULL);
-}
-
-static const struct file_operations sr_fops = {
- .open = sr_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(sr);
static int __init sr_init(void)
{
- struct dentry *debugfs_file;
-
- debugfs_file = debugfs_create_file("segment_registers", 0400,
- powerpc_debugfs_root, NULL, &sr_fops);
- return debugfs_file ? 0 : -ENOMEM;
+ debugfs_create_file("segment_registers", 0400, arch_debugfs_dir,
+ NULL, &sr_fops);
+ return 0;
}
device_initcall(sr_init);
diff --git a/arch/powerpc/mm/ptdump/shared.c b/arch/powerpc/mm/ptdump/shared.c
index f7ed2f187cb0..f884760ca5cf 100644
--- a/arch/powerpc/mm/ptdump/shared.c
+++ b/arch/powerpc/mm/ptdump/shared.c
@@ -5,7 +5,7 @@
*
*/
#include <linux/kernel.h>
-#include <asm/pgtable.h>
+#include <linux/pgtable.h>
#include "ptdump.h"
@@ -17,9 +17,9 @@ static const struct flag_info flag_array[] = {
.clear = " ",
}, {
.mask = _PAGE_RW,
- .val = _PAGE_RW,
- .set = "rw",
- .clear = "r ",
+ .val = 0,
+ .set = "r ",
+ .clear = "rw",
}, {
.mask = _PAGE_EXEC,
.val = _PAGE_EXEC,
@@ -31,6 +31,11 @@ static const struct flag_info flag_array[] = {
.set = "present",
.clear = " ",
}, {
+ .mask = _PAGE_COHERENT,
+ .val = _PAGE_COHERENT,
+ .set = "coherent",
+ .clear = " ",
+ }, {
.mask = _PAGE_GUARDED,
.val = _PAGE_GUARDED,
.set = "guarded",
@@ -63,8 +68,10 @@ static const struct flag_info flag_array[] = {
};
struct pgtable_level pg_level[5] = {
- {
- }, { /* pgd */
+ { /* pgd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* p4d */
.flag = flag_array,
.num = ARRAY_SIZE(flag_array),
}, { /* pud */
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
index c2dec3a68d4c..8e60af32e51e 100644
--- a/arch/powerpc/net/Makefile
+++ b/arch/powerpc/net/Makefile
@@ -2,8 +2,4 @@
#
# Arch-specific network modules
#
-ifdef CONFIG_PPC64
-obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o
-else
-obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
-endif
+obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o bpf_jit_comp$(BITS).o
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 55d4377ccfae..a4f7880f959d 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -11,225 +11,79 @@
#ifndef __ASSEMBLY__
#include <asm/types.h>
+#include <asm/ppc-opcode.h>
-#ifdef PPC64_ELF_ABI_v1
+#ifdef CONFIG_PPC64_ELF_ABI_V1
#define FUNCTION_DESCR_SIZE 24
#else
#define FUNCTION_DESCR_SIZE 0
#endif
-/*
- * 16-bit immediate helper macros: HA() is for use with sign-extending instrs
- * (e.g. LD, ADDI). If the bottom 16 bits is "-ve", add another bit into the
- * top half to negate the effect (i.e. 0xffff + 1 = 0x(1)0000).
- */
-#define IMM_H(i) ((uintptr_t)(i)>>16)
-#define IMM_HA(i) (((uintptr_t)(i)>>16) + \
- (((uintptr_t)(i) & 0x8000) >> 15))
-#define IMM_L(i) ((uintptr_t)(i) & 0xffff)
-
#define PLANT_INSTR(d, idx, instr) \
do { if (d) { (d)[idx] = instr; } idx++; } while (0)
#define EMIT(instr) PLANT_INSTR(image, ctx->idx, instr)
-#define PPC_NOP() EMIT(PPC_INST_NOP)
-#define PPC_BLR() EMIT(PPC_INST_BLR)
-#define PPC_BLRL() EMIT(PPC_INST_BLRL)
-#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | ___PPC_RT(r))
-#define PPC_BCTR() EMIT(PPC_INST_BCTR)
-#define PPC_MTCTR(r) EMIT(PPC_INST_MTCTR | ___PPC_RT(r))
-#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | ___PPC_RT(d) | \
- ___PPC_RA(a) | IMM_L(i))
-#define PPC_MR(d, a) PPC_OR(d, a, a)
-#define PPC_LI(r, i) PPC_ADDI(r, 0, i)
-#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \
- ___PPC_RT(d) | ___PPC_RA(a) | IMM_L(i))
-#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
-#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
- ___PPC_RA(base) | ((i) & 0xfffc))
-#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \
- ___PPC_RA(base) | ___PPC_RB(b))
-#define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
- ___PPC_RA(base) | ((i) & 0xfffc))
-#define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
- ___PPC_RA(base) | IMM_L(i))
-#define PPC_STWU(r, base, i) EMIT(PPC_INST_STWU | ___PPC_RS(r) | \
- ___PPC_RA(base) | IMM_L(i))
-#define PPC_STH(r, base, i) EMIT(PPC_INST_STH | ___PPC_RS(r) | \
- ___PPC_RA(base) | IMM_L(i))
-#define PPC_STB(r, base, i) EMIT(PPC_INST_STB | ___PPC_RS(r) | \
- ___PPC_RA(base) | IMM_L(i))
-
-#define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
- ___PPC_RA(base) | IMM_L(i))
-#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
- ___PPC_RA(base) | ((i) & 0xfffc))
-#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \
- ___PPC_RA(base) | ___PPC_RB(b))
-#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
- ___PPC_RA(base) | IMM_L(i))
-#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
- ___PPC_RA(base) | IMM_L(i))
-#define PPC_LHBRX(r, base, b) EMIT(PPC_INST_LHBRX | ___PPC_RT(r) | \
- ___PPC_RA(base) | ___PPC_RB(b))
-#define PPC_LDBRX(r, base, b) EMIT(PPC_INST_LDBRX | ___PPC_RT(r) | \
- ___PPC_RA(base) | ___PPC_RB(b))
-
-#define PPC_BPF_LDARX(t, a, b, eh) EMIT(PPC_INST_LDARX | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b) | \
- __PPC_EH(eh))
-#define PPC_BPF_LWARX(t, a, b, eh) EMIT(PPC_INST_LWARX | ___PPC_RT(t) | \
- ___PPC_RA(a) | ___PPC_RB(b) | \
- __PPC_EH(eh))
-#define PPC_BPF_STWCX(s, a, b) EMIT(PPC_INST_STWCX | ___PPC_RS(s) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
-#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
-#define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \
- ___PPC_RB(b))
-#define PPC_CMPD(a, b) EMIT(PPC_INST_CMPD | ___PPC_RA(a) | \
- ___PPC_RB(b))
-#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i))
-#define PPC_CMPLDI(a, i) EMIT(PPC_INST_CMPLDI | ___PPC_RA(a) | IMM_L(i))
-#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | \
- ___PPC_RB(b))
-#define PPC_CMPLD(a, b) EMIT(PPC_INST_CMPLD | ___PPC_RA(a) | \
- ___PPC_RB(b))
-
-#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | ___PPC_RT(d) | \
- ___PPC_RB(a) | ___PPC_RA(b))
-#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | ___PPC_RT(d) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_MULD(d, a, b) EMIT(PPC_INST_MULLD | ___PPC_RT(d) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_MULW(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | ___PPC_RT(d) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | ___PPC_RT(d) | \
- ___PPC_RA(a) | IMM_L(i))
-#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_DIVDU(d, a, b) EMIT(PPC_INST_DIVDU | ___PPC_RT(d) | \
- ___PPC_RA(a) | ___PPC_RB(b))
-#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \
- ___PPC_RS(a) | ___PPC_RB(b))
-#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | ___PPC_RA(d) | \
- ___PPC_RS(a) | IMM_L(i))
-#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | ___PPC_RA(d) | \
- ___PPC_RS(a) | ___PPC_RB(b))
-#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | ___PPC_RA(d) | \
- ___PPC_RS(a) | ___PPC_RB(b))
-#define PPC_MR(d, a) PPC_OR(d, a, a)
-#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | ___PPC_RA(d) | \
- ___PPC_RS(a) | IMM_L(i))
-#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | ___PPC_RA(d) | \
- ___PPC_RS(a) | IMM_L(i))
-#define PPC_XOR(d, a, b) EMIT(PPC_INST_XOR | ___PPC_RA(d) | \
- ___PPC_RS(a) | ___PPC_RB(b))
-#define PPC_XORI(d, a, i) EMIT(PPC_INST_XORI | ___PPC_RA(d) | \
- ___PPC_RS(a) | IMM_L(i))
-#define PPC_XORIS(d, a, i) EMIT(PPC_INST_XORIS | ___PPC_RA(d) | \
- ___PPC_RS(a) | IMM_L(i))
-#define PPC_EXTSW(d, a) EMIT(PPC_INST_EXTSW | ___PPC_RA(d) | \
- ___PPC_RS(a))
-#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | ___PPC_RA(d) | \
- ___PPC_RS(a) | ___PPC_RB(s))
-#define PPC_SLD(d, a, s) EMIT(PPC_INST_SLD | ___PPC_RA(d) | \
- ___PPC_RS(a) | ___PPC_RB(s))
-#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \
- ___PPC_RS(a) | ___PPC_RB(s))
-#define PPC_SRAW(d, a, s) EMIT(PPC_INST_SRAW | ___PPC_RA(d) | \
- ___PPC_RS(a) | ___PPC_RB(s))
-#define PPC_SRAWI(d, a, i) EMIT(PPC_INST_SRAWI | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH(i))
-#define PPC_SRD(d, a, s) EMIT(PPC_INST_SRD | ___PPC_RA(d) | \
- ___PPC_RS(a) | ___PPC_RB(s))
-#define PPC_SRAD(d, a, s) EMIT(PPC_INST_SRAD | ___PPC_RA(d) | \
- ___PPC_RS(a) | ___PPC_RB(s))
-#define PPC_SRADI(d, a, i) EMIT(PPC_INST_SRADI | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH64(i))
-#define PPC_RLWINM(d, a, i, mb, me) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH(i) | \
- __PPC_MB(mb) | __PPC_ME(me))
-#define PPC_RLWINM_DOT(d, a, i, mb, me) EMIT(PPC_INST_RLWINM_DOT | \
- ___PPC_RA(d) | ___PPC_RS(a) | \
- __PPC_SH(i) | __PPC_MB(mb) | \
- __PPC_ME(me))
-#define PPC_RLWIMI(d, a, i, mb, me) EMIT(PPC_INST_RLWIMI | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH(i) | \
- __PPC_MB(mb) | __PPC_ME(me))
-#define PPC_RLDICL(d, a, i, mb) EMIT(PPC_INST_RLDICL | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH64(i) | \
- __PPC_MB64(mb))
-#define PPC_RLDICR(d, a, i, me) EMIT(PPC_INST_RLDICR | ___PPC_RA(d) | \
- ___PPC_RS(a) | __PPC_SH64(i) | \
- __PPC_ME64(me))
-
-/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
-#define PPC_SLWI(d, a, i) PPC_RLWINM(d, a, i, 0, 31-(i))
-/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
-#define PPC_SRWI(d, a, i) PPC_RLWINM(d, a, 32-(i), i, 31)
-/* sldi = rldicr Rx, Ry, n, 63-n */
-#define PPC_SLDI(d, a, i) PPC_RLDICR(d, a, i, 63-(i))
-/* sldi = rldicl Rx, Ry, 64-n, n */
-#define PPC_SRDI(d, a, i) PPC_RLDICL(d, a, 64-(i), i)
-
-#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | ___PPC_RT(d) | ___PPC_RA(a))
-
/* Long jump; (unconditional 'branch') */
-#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
- (((dest) - (ctx->idx * 4)) & 0x03fffffc))
+#define PPC_JMP(dest) \
+ do { \
+ long offset = (long)(dest) - (ctx->idx * 4); \
+ if ((dest) != 0 && !is_offset_in_branch_range(offset)) { \
+ pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
+ return -ERANGE; \
+ } \
+ EMIT(PPC_RAW_BRANCH(offset)); \
+ } while (0)
+
+/* bl (unconditional 'branch' with link) */
+#define PPC_BL(dest) EMIT(PPC_RAW_BL((dest) - (unsigned long)(image + ctx->idx)))
+
/* "cond" here covers BO:BI fields. */
-#define PPC_BCC_SHORT(cond, dest) EMIT(PPC_INST_BRANCH_COND | \
- (((cond) & 0x3ff) << 16) | \
- (((dest) - (ctx->idx * 4)) & \
- 0xfffc))
+#define PPC_BCC_SHORT(cond, dest) \
+ do { \
+ long offset = (long)(dest) - (ctx->idx * 4); \
+ if ((dest) != 0 && !is_offset_in_cond_branch_range(offset)) { \
+ pr_err_ratelimited("Conditional branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
+ return -ERANGE; \
+ } \
+ EMIT(PPC_INST_BRANCH_COND | (((cond) & 0x3ff) << 16) | (offset & 0xfffc)); \
+ } while (0)
+
/* Sign-extended 32-bit immediate load */
#define PPC_LI32(d, i) do { \
if ((int)(uintptr_t)(i) >= -32768 && \
(int)(uintptr_t)(i) < 32768) \
- PPC_LI(d, i); \
+ EMIT(PPC_RAW_LI(d, i)); \
else { \
- PPC_LIS(d, IMM_H(i)); \
+ EMIT(PPC_RAW_LIS(d, IMM_H(i))); \
if (IMM_L(i)) \
- PPC_ORI(d, d, IMM_L(i)); \
+ EMIT(PPC_RAW_ORI(d, d, IMM_L(i))); \
} } while(0)
+#ifdef CONFIG_PPC64
#define PPC_LI64(d, i) do { \
if ((long)(i) >= -2147483648 && \
(long)(i) < 2147483648) \
PPC_LI32(d, i); \
else { \
if (!((uintptr_t)(i) & 0xffff800000000000ULL)) \
- PPC_LI(d, ((uintptr_t)(i) >> 32) & 0xffff); \
+ EMIT(PPC_RAW_LI(d, ((uintptr_t)(i) >> 32) & \
+ 0xffff)); \
else { \
- PPC_LIS(d, ((uintptr_t)(i) >> 48)); \
+ EMIT(PPC_RAW_LIS(d, ((uintptr_t)(i) >> 48))); \
if ((uintptr_t)(i) & 0x0000ffff00000000ULL) \
- PPC_ORI(d, d, \
- ((uintptr_t)(i) >> 32) & 0xffff); \
+ EMIT(PPC_RAW_ORI(d, d, \
+ ((uintptr_t)(i) >> 32) & 0xffff)); \
} \
- PPC_SLDI(d, d, 32); \
+ EMIT(PPC_RAW_SLDI(d, d, 32)); \
if ((uintptr_t)(i) & 0x00000000ffff0000ULL) \
- PPC_ORIS(d, d, \
- ((uintptr_t)(i) >> 16) & 0xffff); \
+ EMIT(PPC_RAW_ORIS(d, d, \
+ ((uintptr_t)(i) >> 16) & 0xffff)); \
if ((uintptr_t)(i) & 0x000000000000ffffULL) \
- PPC_ORI(d, d, (uintptr_t)(i) & 0xffff); \
+ EMIT(PPC_RAW_ORI(d, d, (uintptr_t)(i) & \
+ 0xffff)); \
} } while (0)
-
-#ifdef CONFIG_PPC64
-#define PPC_FUNC_ADDR(d,i) do { PPC_LI64(d, i); } while(0)
-#else
-#define PPC_FUNC_ADDR(d,i) do { PPC_LI32(d, i); } while(0)
#endif
-static inline bool is_nearbranch(int offset)
-{
- return (offset < 32768) && (offset >= -32768);
-}
-
/*
* The fly in the ointment of code size changing from pass to pass is
* avoided by padding the short branch case with a NOP. If code size differs
@@ -238,9 +92,9 @@ static inline bool is_nearbranch(int offset)
* state.
*/
#define PPC_BCC(cond, dest) do { \
- if (is_nearbranch((dest) - (ctx->idx * 4))) { \
+ if (is_offset_in_cond_branch_range((long)(dest) - (ctx->idx * 4))) { \
PPC_BCC_SHORT(cond, dest); \
- PPC_NOP(); \
+ EMIT(PPC_RAW_NOP()); \
} else { \
/* Flip the 'T or F' bit to invert comparison */ \
PPC_BCC_SHORT(cond ^ COND_CMP_TRUE, (ctx->idx+2)*4); \
@@ -262,6 +116,68 @@ static inline bool is_nearbranch(int offset)
#define COND_LT (CR0_LT | COND_CMP_TRUE)
#define COND_LE (CR0_GT | COND_CMP_FALSE)
+#define SEEN_FUNC 0x20000000 /* might call external helpers */
+#define SEEN_TAILCALL 0x40000000 /* uses tail calls */
+
+struct codegen_context {
+ /*
+ * This is used to track register usage as well
+ * as calls to external helpers.
+ * - register usage is tracked with corresponding
+ * bits (r3-r31)
+ * - rest of the bits can be used to track other
+ * things -- for now, we use bits 0 to 2
+ * encoded in SEEN_* macros above
+ */
+ unsigned int seen;
+ unsigned int idx;
+ unsigned int stack_size;
+ int b2p[MAX_BPF_JIT_REG + 2];
+ unsigned int exentry_idx;
+ unsigned int alt_exit_addr;
+};
+
+#define bpf_to_ppc(r) (ctx->b2p[r])
+
+#ifdef CONFIG_PPC32
+#define BPF_FIXUP_LEN 3 /* Three instructions => 12 bytes */
+#else
+#define BPF_FIXUP_LEN 2 /* Two instructions => 8 bytes */
+#endif
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ smp_wmb(); /* smp write barrier */
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
+{
+ return ctx->seen & (1 << (31 - i));
+}
+
+static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
+{
+ ctx->seen |= 1 << (31 - i);
+}
+
+static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
+{
+ ctx->seen &= ~(1 << (31 - i));
+}
+
+void bpf_jit_init_reg_mapping(struct codegen_context *ctx);
+int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+ u32 *addrs, int pass);
+void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
+void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
+void bpf_jit_realloc_regs(struct codegen_context *ctx);
+int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr);
+
+int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
+ int insn_idx, int jmp_off, int dst_reg);
+
#endif
#endif
diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
deleted file mode 100644
index 4ec2a9f14f84..000000000000
--- a/arch/powerpc/net/bpf_jit32.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * bpf_jit32.h: BPF JIT compiler for PPC
- *
- * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
- *
- * Split from bpf_jit.h
- */
-#ifndef _BPF_JIT32_H
-#define _BPF_JIT32_H
-
-#include <asm/asm-compat.h>
-#include "bpf_jit.h"
-
-#ifdef CONFIG_PPC64
-#define BPF_PPC_STACK_R3_OFF 48
-#define BPF_PPC_STACK_LOCALS 32
-#define BPF_PPC_STACK_BASIC (48+64)
-#define BPF_PPC_STACK_SAVE (18*8)
-#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
- BPF_PPC_STACK_SAVE)
-#define BPF_PPC_SLOWPATH_FRAME (48+64)
-#else
-#define BPF_PPC_STACK_R3_OFF 24
-#define BPF_PPC_STACK_LOCALS 16
-#define BPF_PPC_STACK_BASIC (24+32)
-#define BPF_PPC_STACK_SAVE (18*4)
-#define BPF_PPC_STACKFRAME (BPF_PPC_STACK_BASIC+BPF_PPC_STACK_LOCALS+ \
- BPF_PPC_STACK_SAVE)
-#define BPF_PPC_SLOWPATH_FRAME (24+32)
-#endif
-
-#define REG_SZ (BITS_PER_LONG/8)
-
-/*
- * Generated code register usage:
- *
- * As normal PPC C ABI (e.g. r1=sp, r2=TOC), with:
- *
- * skb r3 (Entry parameter)
- * A register r4
- * X register r5
- * addr param r6
- * r7-r10 scratch
- * skb->data r14
- * skb headlen r15 (skb->len - skb->data_len)
- * m[0] r16
- * m[...] ...
- * m[15] r31
- */
-#define r_skb 3
-#define r_ret 3
-#define r_A 4
-#define r_X 5
-#define r_addr 6
-#define r_scratch1 7
-#define r_scratch2 8
-#define r_D 14
-#define r_HL 15
-#define r_M 16
-
-#ifndef __ASSEMBLY__
-
-/*
- * Assembly helpers from arch/powerpc/net/bpf_jit.S:
- */
-#define DECLARE_LOAD_FUNC(func) \
- extern u8 func[], func##_negative_offset[], func##_positive_offset[]
-
-DECLARE_LOAD_FUNC(sk_load_word);
-DECLARE_LOAD_FUNC(sk_load_half);
-DECLARE_LOAD_FUNC(sk_load_byte);
-DECLARE_LOAD_FUNC(sk_load_byte_msh);
-
-#define PPC_LBZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LBZ(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LBZ(r, r, IMM_L(i)); } } while(0)
-
-#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LD(r, r, IMM_L(i)); } } while(0)
-
-#define PPC_LWZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LWZ(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LWZ(r, r, IMM_L(i)); } } while(0)
-
-#define PPC_LHZ_OFFS(r, base, i) do { if ((i) < 32768) PPC_LHZ(r, base, i); \
- else { PPC_ADDIS(r, base, IMM_HA(i)); \
- PPC_LHZ(r, r, IMM_L(i)); } } while(0)
-
-#ifdef CONFIG_PPC64
-#define PPC_LL_OFFS(r, base, i) do { PPC_LD_OFFS(r, base, i); } while(0)
-#else
-#define PPC_LL_OFFS(r, base, i) do { PPC_LWZ_OFFS(r, base, i); } while(0)
-#endif
-
-#ifdef CONFIG_SMP
-#ifdef CONFIG_PPC64
-#define PPC_BPF_LOAD_CPU(r) \
- do { BUILD_BUG_ON(sizeof_field(struct paca_struct, paca_index) != 2); \
- PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \
- } while (0)
-#else
-#define PPC_BPF_LOAD_CPU(r) \
- do { BUILD_BUG_ON(sizeof_field(struct task_struct, cpu) != 4); \
- PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu)); \
- } while(0)
-#endif
-#else
-#define PPC_BPF_LOAD_CPU(r) do { PPC_LI(r, 0); } while(0)
-#endif
-
-#define PPC_LHBRX_OFFS(r, base, i) \
- do { PPC_LI32(r, i); PPC_LHBRX(r, r, base); } while(0)
-#ifdef __LITTLE_ENDIAN__
-#define PPC_NTOHS_OFFS(r, base, i) PPC_LHBRX_OFFS(r, base, i)
-#else
-#define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
-#endif
-
-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
-
-#define SEEN_DATAREF 0x10000 /* might call external helpers */
-#define SEEN_XREG 0x20000 /* X reg is used */
-#define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
- * storage */
-#define SEEN_MEM_MSK 0x0ffff
-
-struct codegen_context {
- unsigned int seen;
- unsigned int idx;
- int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
-};
-
-#endif
-
-#endif
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
deleted file mode 100644
index cf3a7e337f02..000000000000
--- a/arch/powerpc/net/bpf_jit64.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * bpf_jit64.h: BPF JIT compiler for PPC64
- *
- * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
- * IBM Corporation
- */
-#ifndef _BPF_JIT64_H
-#define _BPF_JIT64_H
-
-#include "bpf_jit.h"
-
-/*
- * Stack layout:
- * Ensure the top half (upto local_tmp_var) stays consistent
- * with our redzone usage.
- *
- * [ prev sp ] <-------------
- * [ nv gpr save area ] 6*8 |
- * [ tail_call_cnt ] 8 |
- * [ local_tmp_var ] 8 |
- * fp (r31) --> [ ebpf stack space ] upto 512 |
- * [ frame header ] 32/112 |
- * sp (r1) ---> [ stack pointer ] --------------
- */
-
-/* for gpr non volatile registers BPG_REG_6 to 10 */
-#define BPF_PPC_STACK_SAVE (6*8)
-/* for bpf JIT code internal usage */
-#define BPF_PPC_STACK_LOCALS 16
-/* stack frame excluding BPF stack, ensure this is quadword aligned */
-#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
- BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
-
-#ifndef __ASSEMBLY__
-
-/* BPF register usage */
-#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
-#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
-
-/* BPF to ppc register mappings */
-static const int b2p[] = {
- /* function return value */
- [BPF_REG_0] = 8,
- /* function arguments */
- [BPF_REG_1] = 3,
- [BPF_REG_2] = 4,
- [BPF_REG_3] = 5,
- [BPF_REG_4] = 6,
- [BPF_REG_5] = 7,
- /* non volatile registers */
- [BPF_REG_6] = 27,
- [BPF_REG_7] = 28,
- [BPF_REG_8] = 29,
- [BPF_REG_9] = 30,
- /* frame pointer aka BPF_REG_10 */
- [BPF_REG_FP] = 31,
- /* eBPF jit internal registers */
- [BPF_REG_AX] = 2,
- [TMP_REG_1] = 9,
- [TMP_REG_2] = 10
-};
-
-/* PPC NVR range -- update this if we ever use NVRs below r27 */
-#define BPF_PPC_NVR_MIN 27
-
-/*
- * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
- * so ensure that it isn't in use already.
- */
-#define PPC_BPF_LL(r, base, i) do { \
- if ((i) % 4) { \
- PPC_LI(b2p[TMP_REG_2], (i)); \
- PPC_LDX(r, base, b2p[TMP_REG_2]); \
- } else \
- PPC_LD(r, base, i); \
- } while(0)
-#define PPC_BPF_STL(r, base, i) do { \
- if ((i) % 4) { \
- PPC_LI(b2p[TMP_REG_2], (i)); \
- PPC_STDX(r, base, b2p[TMP_REG_2]); \
- } else \
- PPC_STD(r, base, i); \
- } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
-
-#define SEEN_FUNC 0x1000 /* might call external helpers */
-#define SEEN_STACK 0x2000 /* uses BPF stack */
-#define SEEN_TAILCALL 0x4000 /* uses tail calls */
-
-struct codegen_context {
- /*
- * This is used to track register usage as well
- * as calls to external helpers.
- * - register usage is tracked with corresponding
- * bits (r3-r10 and r27-r31)
- * - rest of the bits can be used to track other
- * things -- for now, we use bits 16 to 23
- * encoded in SEEN_* macros above
- */
- unsigned int seen;
- unsigned int idx;
- unsigned int stack_size;
-};
-
-#endif /* !__ASSEMBLY__ */
-
-#endif
diff --git a/arch/powerpc/net/bpf_jit_asm.S b/arch/powerpc/net/bpf_jit_asm.S
deleted file mode 100644
index 2f5030d8383f..000000000000
--- a/arch/powerpc/net/bpf_jit_asm.S
+++ /dev/null
@@ -1,226 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* bpf_jit.S: Packet/header access helper functions
- * for PPC64 BPF compiler.
- *
- * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/asm-compat.h>
-#include "bpf_jit32.h"
-
-/*
- * All of these routines are called directly from generated code,
- * whose register usage is:
- *
- * r3 skb
- * r4,r5 A,X
- * r6 *** address parameter to helper ***
- * r7-r10 scratch
- * r14 skb->data
- * r15 skb headlen
- * r16-31 M[]
- */
-
-/*
- * To consider: These helpers are so small it could be better to just
- * generate them inline. Inline code can do the simple headlen check
- * then branch directly to slow_path_XXX if required. (In fact, could
- * load a spare GPR with the address of slow_path_generic and pass size
- * as an argument, making the call site a mtlr, li and bllr.)
- */
- .globl sk_load_word
-sk_load_word:
- PPC_LCMPI r_addr, 0
- blt bpf_slow_path_word_neg
- .globl sk_load_word_positive_offset
-sk_load_word_positive_offset:
- /* Are we accessing past headlen? */
- subi r_scratch1, r_HL, 4
- PPC_LCMP r_scratch1, r_addr
- blt bpf_slow_path_word
- /* Nope, just hitting the header. cr0 here is eq or gt! */
-#ifdef __LITTLE_ENDIAN__
- lwbrx r_A, r_D, r_addr
-#else
- lwzx r_A, r_D, r_addr
-#endif
- blr /* Return success, cr0 != LT */
-
- .globl sk_load_half
-sk_load_half:
- PPC_LCMPI r_addr, 0
- blt bpf_slow_path_half_neg
- .globl sk_load_half_positive_offset
-sk_load_half_positive_offset:
- subi r_scratch1, r_HL, 2
- PPC_LCMP r_scratch1, r_addr
- blt bpf_slow_path_half
-#ifdef __LITTLE_ENDIAN__
- lhbrx r_A, r_D, r_addr
-#else
- lhzx r_A, r_D, r_addr
-#endif
- blr
-
- .globl sk_load_byte
-sk_load_byte:
- PPC_LCMPI r_addr, 0
- blt bpf_slow_path_byte_neg
- .globl sk_load_byte_positive_offset
-sk_load_byte_positive_offset:
- PPC_LCMP r_HL, r_addr
- ble bpf_slow_path_byte
- lbzx r_A, r_D, r_addr
- blr
-
-/*
- * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf)
- * r_addr is the offset value
- */
- .globl sk_load_byte_msh
-sk_load_byte_msh:
- PPC_LCMPI r_addr, 0
- blt bpf_slow_path_byte_msh_neg
- .globl sk_load_byte_msh_positive_offset
-sk_load_byte_msh_positive_offset:
- PPC_LCMP r_HL, r_addr
- ble bpf_slow_path_byte_msh
- lbzx r_X, r_D, r_addr
- rlwinm r_X, r_X, 2, 32-4-2, 31-2
- blr
-
-/* Call out to skb_copy_bits:
- * We'll need to back up our volatile regs first; we have
- * local variable space at r1+(BPF_PPC_STACK_BASIC).
- * Allocate a new stack frame here to remain ABI-compliant in
- * stashing LR.
- */
-#define bpf_slow_path_common(SIZE) \
- mflr r0; \
- PPC_STL r0, PPC_LR_STKOFF(r1); \
- /* R3 goes in parameter space of caller's frame */ \
- PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
- PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
- PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
- addi r5, r1, BPF_PPC_STACK_BASIC+(2*REG_SZ); \
- PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
- /* R3 = r_skb, as passed */ \
- mr r4, r_addr; \
- li r6, SIZE; \
- bl skb_copy_bits; \
- nop; \
- /* R3 = 0 on success */ \
- addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
- PPC_LL r0, PPC_LR_STKOFF(r1); \
- PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
- PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
- mtlr r0; \
- PPC_LCMPI r3, 0; \
- blt bpf_error; /* cr0 = LT */ \
- PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
- /* Great success! */
-
-bpf_slow_path_word:
- bpf_slow_path_common(4)
- /* Data value is on stack, and cr0 != LT */
- lwz r_A, BPF_PPC_STACK_BASIC+(2*REG_SZ)(r1)
- blr
-
-bpf_slow_path_half:
- bpf_slow_path_common(2)
- lhz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
- blr
-
-bpf_slow_path_byte:
- bpf_slow_path_common(1)
- lbz r_A, BPF_PPC_STACK_BASIC+(2*8)(r1)
- blr
-
-bpf_slow_path_byte_msh:
- bpf_slow_path_common(1)
- lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
- rlwinm r_X, r_X, 2, 32-4-2, 31-2
- blr
-
-/* Call out to bpf_internal_load_pointer_neg_helper:
- * We'll need to back up our volatile regs first; we have
- * local variable space at r1+(BPF_PPC_STACK_BASIC).
- * Allocate a new stack frame here to remain ABI-compliant in
- * stashing LR.
- */
-#define sk_negative_common(SIZE) \
- mflr r0; \
- PPC_STL r0, PPC_LR_STKOFF(r1); \
- /* R3 goes in parameter space of caller's frame */ \
- PPC_STL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
- PPC_STL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
- PPC_STL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
- PPC_STLU r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
- /* R3 = r_skb, as passed */ \
- mr r4, r_addr; \
- li r5, SIZE; \
- bl bpf_internal_load_pointer_neg_helper; \
- nop; \
- /* R3 != 0 on success */ \
- addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
- PPC_LL r0, PPC_LR_STKOFF(r1); \
- PPC_LL r_A, (BPF_PPC_STACK_BASIC+(0*REG_SZ))(r1); \
- PPC_LL r_X, (BPF_PPC_STACK_BASIC+(1*REG_SZ))(r1); \
- mtlr r0; \
- PPC_LCMPLI r3, 0; \
- beq bpf_error_slow; /* cr0 = EQ */ \
- mr r_addr, r3; \
- PPC_LL r_skb, (BPF_PPC_STACKFRAME+BPF_PPC_STACK_R3_OFF)(r1); \
- /* Great success! */
-
-bpf_slow_path_word_neg:
- lis r_scratch1,-32 /* SKF_LL_OFF */
- PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- .globl sk_load_word_negative_offset
-sk_load_word_negative_offset:
- sk_negative_common(4)
- lwz r_A, 0(r_addr)
- blr
-
-bpf_slow_path_half_neg:
- lis r_scratch1,-32 /* SKF_LL_OFF */
- PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- .globl sk_load_half_negative_offset
-sk_load_half_negative_offset:
- sk_negative_common(2)
- lhz r_A, 0(r_addr)
- blr
-
-bpf_slow_path_byte_neg:
- lis r_scratch1,-32 /* SKF_LL_OFF */
- PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- .globl sk_load_byte_negative_offset
-sk_load_byte_negative_offset:
- sk_negative_common(1)
- lbz r_A, 0(r_addr)
- blr
-
-bpf_slow_path_byte_msh_neg:
- lis r_scratch1,-32 /* SKF_LL_OFF */
- PPC_LCMP r_addr, r_scratch1 /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- .globl sk_load_byte_msh_negative_offset
-sk_load_byte_msh_negative_offset:
- sk_negative_common(1)
- lbz r_X, 0(r_addr)
- rlwinm r_X, r_X, 2, 32-4-2, 31-2
- blr
-
-bpf_error_slow:
- /* fabricate a cr0 = lt */
- li r_scratch1, -1
- PPC_LCMPI r_scratch1, 0
-bpf_error:
- /* Entered with cr0 = lt */
- li r3, 0
- /* Generated code will 'blt epilogue', returning 0. */
- blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 0acc9d5fb19e..43e634126514 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* bpf_jit_comp.c: BPF JIT compiler
+/*
+ * eBPF JIT compiler
*
- * Copyright 2011 Matt Evans <matt@ozlabs.org>, IBM Corporation
+ * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
+ * IBM Corporation
*
- * Based on the x86 BPF compiler, by Eric Dumazet (eric.dumazet@gmail.com)
- * Ported to ppc32 by Denis Kirjanov <kda@linux-powerpc.org>
+ * Based on the powerpc classic BPF JIT compiler by Matt Evans
*/
#include <linux/moduleloader.h>
#include <asm/cacheflush.h>
@@ -12,640 +13,254 @@
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
+#include <asm/kprobes.h>
+#include <linux/bpf.h>
-#include "bpf_jit32.h"
+#include "bpf_jit.h"
-static inline void bpf_flush_icache(void *start, void *end)
+static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
{
- smp_wmb();
- flush_icache_range((unsigned long)start, (unsigned long)end);
+ memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
}
-static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx)
+/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
+static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
+ struct codegen_context *ctx, u32 *addrs)
{
- int i;
- const struct sock_filter *filter = fp->insns;
-
- if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
- /* Make stackframe */
- if (ctx->seen & SEEN_DATAREF) {
- /* If we call any helpers (for loads), save LR */
- EMIT(PPC_INST_MFLR | __PPC_RT(R0));
- PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
-
- /* Back up non-volatile regs. */
- PPC_BPF_STL(r_D, 1, -(REG_SZ*(32-r_D)));
- PPC_BPF_STL(r_HL, 1, -(REG_SZ*(32-r_HL)));
- }
- if (ctx->seen & SEEN_MEM) {
- /*
- * Conditionally save regs r15-r31 as some will be used
- * for M[] data.
- */
- for (i = r_M; i < (r_M+16); i++) {
- if (ctx->seen & (1 << (i-r_M)))
- PPC_BPF_STL(i, 1, -(REG_SZ*(32-i)));
- }
- }
- PPC_BPF_STLU(1, 1, -BPF_PPC_STACKFRAME);
- }
+ const struct bpf_insn *insn = fp->insnsi;
+ bool func_addr_fixed;
+ u64 func_addr;
+ u32 tmp_idx;
+ int i, j, ret;
- if (ctx->seen & SEEN_DATAREF) {
+ for (i = 0; i < fp->len; i++) {
/*
- * If this filter needs to access skb data,
- * prepare r_D and r_HL:
- * r_HL = skb->len - skb->data_len
- * r_D = skb->data
+ * During the extra pass, only the branch target addresses for
+ * the subprog calls need to be fixed. All other instructions
+ * can left untouched.
+ *
+ * The JITed image length does not change because we already
+ * ensure that the JITed instruction sequence for these calls
+ * are of fixed length by padding them with NOPs.
*/
- PPC_LWZ_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
- data_len));
- PPC_LWZ_OFFS(r_HL, r_skb, offsetof(struct sk_buff, len));
- PPC_SUB(r_HL, r_HL, r_scratch1);
- PPC_LL_OFFS(r_D, r_skb, offsetof(struct sk_buff, data));
- }
+ if (insn[i].code == (BPF_JMP | BPF_CALL) &&
+ insn[i].src_reg == BPF_PSEUDO_CALL) {
+ ret = bpf_jit_get_func_addr(fp, &insn[i], true,
+ &func_addr,
+ &func_addr_fixed);
+ if (ret < 0)
+ return ret;
- if (ctx->seen & SEEN_XREG) {
- /*
- * TODO: Could also detect whether first instr. sets X and
- * avoid this (as below, with A).
- */
- PPC_LI(r_X, 0);
+ /*
+ * Save ctx->idx as this would currently point to the
+ * end of the JITed image and set it to the offset of
+ * the instruction sequence corresponding to the
+ * subprog call temporarily.
+ */
+ tmp_idx = ctx->idx;
+ ctx->idx = addrs[i] / 4;
+ ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ if (ret)
+ return ret;
+
+ /*
+ * Restore ctx->idx here. This is safe as the length
+ * of the JITed sequence remains unchanged.
+ */
+ ctx->idx = tmp_idx;
+ } else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
+ tmp_idx = ctx->idx;
+ ctx->idx = addrs[i] / 4;
+#ifdef CONFIG_PPC32
+ PPC_LI32(bpf_to_ppc(insn[i].dst_reg) - 1, (u32)insn[i + 1].imm);
+ PPC_LI32(bpf_to_ppc(insn[i].dst_reg), (u32)insn[i].imm);
+ for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
+ EMIT(PPC_RAW_NOP());
+#else
+ func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
+ PPC_LI64(bpf_to_ppc(insn[i].dst_reg), func_addr);
+ /* overwrite rest with nops */
+ for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
+ EMIT(PPC_RAW_NOP());
+#endif
+ ctx->idx = tmp_idx;
+ i++;
+ }
}
- /* make sure we dont leak kernel information to user */
- if (bpf_needs_clear_a(&filter[0]))
- PPC_LI(r_A, 0);
+ return 0;
}
-static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
{
- int i;
-
- if (ctx->seen & (SEEN_MEM | SEEN_DATAREF)) {
- PPC_ADDI(1, 1, BPF_PPC_STACKFRAME);
- if (ctx->seen & SEEN_DATAREF) {
- PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
- PPC_MTLR(0);
- PPC_BPF_LL(r_D, 1, -(REG_SZ*(32-r_D)));
- PPC_BPF_LL(r_HL, 1, -(REG_SZ*(32-r_HL)));
- }
- if (ctx->seen & SEEN_MEM) {
- /* Restore any saved non-vol registers */
- for (i = r_M; i < (r_M+16); i++) {
- if (ctx->seen & (1 << (i-r_M)))
- PPC_BPF_LL(i, 1, -(REG_SZ*(32-i)));
- }
- }
+ if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
+ PPC_JMP(exit_addr);
+ } else if (ctx->alt_exit_addr) {
+ if (WARN_ON(!is_offset_in_branch_range((long)ctx->alt_exit_addr - (ctx->idx * 4))))
+ return -1;
+ PPC_JMP(ctx->alt_exit_addr);
+ } else {
+ ctx->alt_exit_addr = ctx->idx * 4;
+ bpf_jit_build_epilogue(image, ctx);
}
- /* The RETs have left a return value in R3. */
- PPC_BLR();
+ return 0;
}
-#define CHOOSE_LOAD_FUNC(K, func) \
- ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
+struct powerpc64_jit_data {
+ struct bpf_binary_header *header;
+ u32 *addrs;
+ u8 *image;
+ u32 proglen;
+ struct codegen_context ctx;
+};
-/* Assemble the body code between the prologue & epilogue. */
-static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx,
- unsigned int *addrs)
+bool bpf_jit_needs_zext(void)
{
- const struct sock_filter *filter = fp->insns;
- int flen = fp->len;
- u8 *func;
- unsigned int true_cond;
- int i;
-
- /* Start of epilogue code */
- unsigned int exit_addr = addrs[flen];
-
- for (i = 0; i < flen; i++) {
- unsigned int K = filter[i].k;
- u16 code = bpf_anc_helper(&filter[i]);
-
- /*
- * addrs[] maps a BPF bytecode address into a real offset from
- * the start of the body code.
- */
- addrs[i] = ctx->idx * 4;
-
- switch (code) {
- /*** ALU ops ***/
- case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */
- ctx->seen |= SEEN_XREG;
- PPC_ADD(r_A, r_A, r_X);
- break;
- case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */
- if (!K)
- break;
- PPC_ADDI(r_A, r_A, IMM_L(K));
- if (K >= 32768)
- PPC_ADDIS(r_A, r_A, IMM_HA(K));
- break;
- case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */
- ctx->seen |= SEEN_XREG;
- PPC_SUB(r_A, r_A, r_X);
- break;
- case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */
- if (!K)
- break;
- PPC_ADDI(r_A, r_A, IMM_L(-K));
- if (K >= 32768)
- PPC_ADDIS(r_A, r_A, IMM_HA(-K));
- break;
- case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */
- ctx->seen |= SEEN_XREG;
- PPC_MULW(r_A, r_A, r_X);
- break;
- case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */
- if (K < 32768)
- PPC_MULI(r_A, r_A, K);
- else {
- PPC_LI32(r_scratch1, K);
- PPC_MULW(r_A, r_A, r_scratch1);
- }
- break;
- case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */
- case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */
- ctx->seen |= SEEN_XREG;
- PPC_CMPWI(r_X, 0);
- if (ctx->pc_ret0 != -1) {
- PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
- } else {
- PPC_BCC_SHORT(COND_NE, (ctx->idx*4)+12);
- PPC_LI(r_ret, 0);
- PPC_JMP(exit_addr);
- }
- if (code == (BPF_ALU | BPF_MOD | BPF_X)) {
- PPC_DIVWU(r_scratch1, r_A, r_X);
- PPC_MULW(r_scratch1, r_X, r_scratch1);
- PPC_SUB(r_A, r_A, r_scratch1);
- } else {
- PPC_DIVWU(r_A, r_A, r_X);
- }
- break;
- case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */
- PPC_LI32(r_scratch2, K);
- PPC_DIVWU(r_scratch1, r_A, r_scratch2);
- PPC_MULW(r_scratch1, r_scratch2, r_scratch1);
- PPC_SUB(r_A, r_A, r_scratch1);
- break;
- case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */
- if (K == 1)
- break;
- PPC_LI32(r_scratch1, K);
- PPC_DIVWU(r_A, r_A, r_scratch1);
- break;
- case BPF_ALU | BPF_AND | BPF_X:
- ctx->seen |= SEEN_XREG;
- PPC_AND(r_A, r_A, r_X);
- break;
- case BPF_ALU | BPF_AND | BPF_K:
- if (!IMM_H(K))
- PPC_ANDI(r_A, r_A, K);
- else {
- PPC_LI32(r_scratch1, K);
- PPC_AND(r_A, r_A, r_scratch1);
- }
- break;
- case BPF_ALU | BPF_OR | BPF_X:
- ctx->seen |= SEEN_XREG;
- PPC_OR(r_A, r_A, r_X);
- break;
- case BPF_ALU | BPF_OR | BPF_K:
- if (IMM_L(K))
- PPC_ORI(r_A, r_A, IMM_L(K));
- if (K >= 65536)
- PPC_ORIS(r_A, r_A, IMM_H(K));
- break;
- case BPF_ANC | SKF_AD_ALU_XOR_X:
- case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */
- ctx->seen |= SEEN_XREG;
- PPC_XOR(r_A, r_A, r_X);
- break;
- case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */
- if (IMM_L(K))
- PPC_XORI(r_A, r_A, IMM_L(K));
- if (K >= 65536)
- PPC_XORIS(r_A, r_A, IMM_H(K));
- break;
- case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */
- ctx->seen |= SEEN_XREG;
- PPC_SLW(r_A, r_A, r_X);
- break;
- case BPF_ALU | BPF_LSH | BPF_K:
- if (K == 0)
- break;
- else
- PPC_SLWI(r_A, r_A, K);
- break;
- case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */
- ctx->seen |= SEEN_XREG;
- PPC_SRW(r_A, r_A, r_X);
- break;
- case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */
- if (K == 0)
- break;
- else
- PPC_SRWI(r_A, r_A, K);
- break;
- case BPF_ALU | BPF_NEG:
- PPC_NEG(r_A, r_A);
- break;
- case BPF_RET | BPF_K:
- PPC_LI32(r_ret, K);
- if (!K) {
- if (ctx->pc_ret0 == -1)
- ctx->pc_ret0 = i;
- }
- /*
- * If this isn't the very last instruction, branch to
- * the epilogue if we've stuff to clean up. Otherwise,
- * if there's nothing to tidy, just return. If we /are/
- * the last instruction, we're about to fall through to
- * the epilogue to return.
- */
- if (i != flen - 1) {
- /*
- * Note: 'seen' is properly valid only on pass
- * #2. Both parts of this conditional are the
- * same instruction size though, meaning the
- * first pass will still correctly determine the
- * code size/addresses.
- */
- if (ctx->seen)
- PPC_JMP(exit_addr);
- else
- PPC_BLR();
- }
- break;
- case BPF_RET | BPF_A:
- PPC_MR(r_ret, r_A);
- if (i != flen - 1) {
- if (ctx->seen)
- PPC_JMP(exit_addr);
- else
- PPC_BLR();
- }
- break;
- case BPF_MISC | BPF_TAX: /* X = A */
- PPC_MR(r_X, r_A);
- break;
- case BPF_MISC | BPF_TXA: /* A = X */
- ctx->seen |= SEEN_XREG;
- PPC_MR(r_A, r_X);
- break;
-
- /*** Constant loads/M[] access ***/
- case BPF_LD | BPF_IMM: /* A = K */
- PPC_LI32(r_A, K);
- break;
- case BPF_LDX | BPF_IMM: /* X = K */
- PPC_LI32(r_X, K);
- break;
- case BPF_LD | BPF_MEM: /* A = mem[K] */
- PPC_MR(r_A, r_M + (K & 0xf));
- ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
- break;
- case BPF_LDX | BPF_MEM: /* X = mem[K] */
- PPC_MR(r_X, r_M + (K & 0xf));
- ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
- break;
- case BPF_ST: /* mem[K] = A */
- PPC_MR(r_M + (K & 0xf), r_A);
- ctx->seen |= SEEN_MEM | (1<<(K & 0xf));
- break;
- case BPF_STX: /* mem[K] = X */
- PPC_MR(r_M + (K & 0xf), r_X);
- ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf));
- break;
- case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */
- BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4);
- PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
- break;
- case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
- PPC_LWZ_OFFS(r_A, r_skb, K);
- break;
- case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
- PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
- break;
-
- /*** Ancillary info loads ***/
- case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */
- BUILD_BUG_ON(sizeof_field(struct sk_buff,
- protocol) != 2);
- PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- protocol));
- break;
- case BPF_ANC | SKF_AD_IFINDEX:
- case BPF_ANC | SKF_AD_HATYPE:
- BUILD_BUG_ON(sizeof_field(struct net_device,
- ifindex) != 4);
- BUILD_BUG_ON(sizeof_field(struct net_device,
- type) != 2);
- PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff,
- dev));
- PPC_CMPDI(r_scratch1, 0);
- if (ctx->pc_ret0 != -1) {
- PPC_BCC(COND_EQ, addrs[ctx->pc_ret0]);
- } else {
- /* Exit, returning 0; first pass hits here. */
- PPC_BCC_SHORT(COND_NE, ctx->idx * 4 + 12);
- PPC_LI(r_ret, 0);
- PPC_JMP(exit_addr);
- }
- if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
- PPC_LWZ_OFFS(r_A, r_scratch1,
- offsetof(struct net_device, ifindex));
- } else {
- PPC_LHZ_OFFS(r_A, r_scratch1,
- offsetof(struct net_device, type));
- }
-
- break;
- case BPF_ANC | SKF_AD_MARK:
- BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
- PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- mark));
- break;
- case BPF_ANC | SKF_AD_RXHASH:
- BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
- PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- hash));
- break;
- case BPF_ANC | SKF_AD_VLAN_TAG:
- BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
-
- PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- vlan_tci));
- break;
- case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
- PPC_LBZ_OFFS(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET());
- if (PKT_VLAN_PRESENT_BIT)
- PPC_SRWI(r_A, r_A, PKT_VLAN_PRESENT_BIT);
- if (PKT_VLAN_PRESENT_BIT < 7)
- PPC_ANDI(r_A, r_A, 1);
- break;
- case BPF_ANC | SKF_AD_QUEUE:
- BUILD_BUG_ON(sizeof_field(struct sk_buff,
- queue_mapping) != 2);
- PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
- queue_mapping));
- break;
- case BPF_ANC | SKF_AD_PKTTYPE:
- PPC_LBZ_OFFS(r_A, r_skb, PKT_TYPE_OFFSET());
- PPC_ANDI(r_A, r_A, PKT_TYPE_MAX);
- PPC_SRWI(r_A, r_A, 5);
- break;
- case BPF_ANC | SKF_AD_CPU:
- PPC_BPF_LOAD_CPU(r_A);
- break;
- /*** Absolute loads from packet header/data ***/
- case BPF_LD | BPF_W | BPF_ABS:
- func = CHOOSE_LOAD_FUNC(K, sk_load_word);
- goto common_load;
- case BPF_LD | BPF_H | BPF_ABS:
- func = CHOOSE_LOAD_FUNC(K, sk_load_half);
- goto common_load;
- case BPF_LD | BPF_B | BPF_ABS:
- func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
- common_load:
- /* Load from [K]. */
- ctx->seen |= SEEN_DATAREF;
- PPC_FUNC_ADDR(r_scratch1, func);
- PPC_MTLR(r_scratch1);
- PPC_LI32(r_addr, K);
- PPC_BLRL();
- /*
- * Helper returns 'lt' condition on error, and an
- * appropriate return value in r3
- */
- PPC_BCC(COND_LT, exit_addr);
- break;
-
- /*** Indirect loads from packet header/data ***/
- case BPF_LD | BPF_W | BPF_IND:
- func = sk_load_word;
- goto common_load_ind;
- case BPF_LD | BPF_H | BPF_IND:
- func = sk_load_half;
- goto common_load_ind;
- case BPF_LD | BPF_B | BPF_IND:
- func = sk_load_byte;
- common_load_ind:
- /*
- * Load from [X + K]. Negative offsets are tested for
- * in the helper functions.
- */
- ctx->seen |= SEEN_DATAREF | SEEN_XREG;
- PPC_FUNC_ADDR(r_scratch1, func);
- PPC_MTLR(r_scratch1);
- PPC_ADDI(r_addr, r_X, IMM_L(K));
- if (K >= 32768)
- PPC_ADDIS(r_addr, r_addr, IMM_HA(K));
- PPC_BLRL();
- /* If error, cr0.LT set */
- PPC_BCC(COND_LT, exit_addr);
- break;
-
- case BPF_LDX | BPF_B | BPF_MSH:
- func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
- goto common_load;
- break;
-
- /*** Jump and branches ***/
- case BPF_JMP | BPF_JA:
- if (K != 0)
- PPC_JMP(addrs[i + 1 + K]);
- break;
-
- case BPF_JMP | BPF_JGT | BPF_K:
- case BPF_JMP | BPF_JGT | BPF_X:
- true_cond = COND_GT;
- goto cond_branch;
- case BPF_JMP | BPF_JGE | BPF_K:
- case BPF_JMP | BPF_JGE | BPF_X:
- true_cond = COND_GE;
- goto cond_branch;
- case BPF_JMP | BPF_JEQ | BPF_K:
- case BPF_JMP | BPF_JEQ | BPF_X:
- true_cond = COND_EQ;
- goto cond_branch;
- case BPF_JMP | BPF_JSET | BPF_K:
- case BPF_JMP | BPF_JSET | BPF_X:
- true_cond = COND_NE;
- /* Fall through */
- cond_branch:
- /* same targets, can avoid doing the test :) */
- if (filter[i].jt == filter[i].jf) {
- if (filter[i].jt > 0)
- PPC_JMP(addrs[i + 1 + filter[i].jt]);
- break;
- }
-
- switch (code) {
- case BPF_JMP | BPF_JGT | BPF_X:
- case BPF_JMP | BPF_JGE | BPF_X:
- case BPF_JMP | BPF_JEQ | BPF_X:
- ctx->seen |= SEEN_XREG;
- PPC_CMPLW(r_A, r_X);
- break;
- case BPF_JMP | BPF_JSET | BPF_X:
- ctx->seen |= SEEN_XREG;
- PPC_AND_DOT(r_scratch1, r_A, r_X);
- break;
- case BPF_JMP | BPF_JEQ | BPF_K:
- case BPF_JMP | BPF_JGT | BPF_K:
- case BPF_JMP | BPF_JGE | BPF_K:
- if (K < 32768)
- PPC_CMPLWI(r_A, K);
- else {
- PPC_LI32(r_scratch1, K);
- PPC_CMPLW(r_A, r_scratch1);
- }
- break;
- case BPF_JMP | BPF_JSET | BPF_K:
- if (K < 32768)
- /* PPC_ANDI is /only/ dot-form */
- PPC_ANDI(r_scratch1, r_A, K);
- else {
- PPC_LI32(r_scratch1, K);
- PPC_AND_DOT(r_scratch1, r_A,
- r_scratch1);
- }
- break;
- }
- /* Sometimes branches are constructed "backward", with
- * the false path being the branch and true path being
- * a fallthrough to the next instruction.
- */
- if (filter[i].jt == 0)
- /* Swap the sense of the branch */
- PPC_BCC(true_cond ^ COND_CMP_TRUE,
- addrs[i + 1 + filter[i].jf]);
- else {
- PPC_BCC(true_cond, addrs[i + 1 + filter[i].jt]);
- if (filter[i].jf != 0)
- PPC_JMP(addrs[i + 1 + filter[i].jf]);
- }
- break;
- default:
- /* The filter contains something cruel & unusual.
- * We don't handle it, but also there shouldn't be
- * anything missing from our list.
- */
- if (printk_ratelimit())
- pr_err("BPF filter opcode %04x (@%d) unsupported\n",
- filter[i].code, i);
- return -ENOTSUPP;
- }
-
- }
- /* Set end-of-body-code address for exit. */
- addrs[i] = ctx->idx * 4;
-
- return 0;
+ return true;
}
-void bpf_jit_compile(struct bpf_prog *fp)
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
{
- unsigned int proglen;
- unsigned int alloclen;
- u32 *image = NULL;
+ u32 proglen;
+ u32 alloclen;
+ u8 *image = NULL;
u32 *code_base;
- unsigned int *addrs;
+ u32 *addrs;
+ struct powerpc64_jit_data *jit_data;
struct codegen_context cgctx;
int pass;
- int flen = fp->len;
+ int flen;
+ struct bpf_binary_header *bpf_hdr;
+ struct bpf_prog *org_fp = fp;
+ struct bpf_prog *tmp_fp;
+ bool bpf_blinded = false;
+ bool extra_pass = false;
+ u32 extable_len;
+ u32 fixup_len;
+
+ if (!fp->jit_requested)
+ return org_fp;
+
+ tmp_fp = bpf_jit_blind_constants(org_fp);
+ if (IS_ERR(tmp_fp))
+ return org_fp;
+
+ if (tmp_fp != org_fp) {
+ bpf_blinded = true;
+ fp = tmp_fp;
+ }
+
+ jit_data = fp->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ fp = org_fp;
+ goto out;
+ }
+ fp->aux->jit_data = jit_data;
+ }
- if (!bpf_jit_enable)
- return;
+ flen = fp->len;
+ addrs = jit_data->addrs;
+ if (addrs) {
+ cgctx = jit_data->ctx;
+ image = jit_data->image;
+ bpf_hdr = jit_data->header;
+ proglen = jit_data->proglen;
+ extra_pass = true;
+ goto skip_init_ctx;
+ }
addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
- if (addrs == NULL)
- return;
+ if (addrs == NULL) {
+ fp = org_fp;
+ goto out_addrs;
+ }
- /*
- * There are multiple assembly passes as the generated code will change
- * size as it settles down, figuring out the max branch offsets/exit
- * paths required.
- *
- * The range of standard conditional branches is +/- 32Kbytes. Since
- * BPF_MAXINSNS = 4096, we can only jump from (worst case) start to
- * finish with 8 bytes/instruction. Not feasible, so long jumps are
- * used, distinct from short branches.
- *
- * Current:
- *
- * For now, both branch types assemble to 2 words (short branches padded
- * with a NOP); this is less efficient, but assembly will always complete
- * after exactly 3 passes:
- *
- * First pass: No code buffer; Program is "faux-generated" -- no code
- * emitted but maximum size of output determined (and addrs[] filled
- * in). Also, we note whether we use M[], whether we use skb data, etc.
- * All generation choices assumed to be 'worst-case', e.g. branches all
- * far (2 instructions), return path code reduction not available, etc.
- *
- * Second pass: Code buffer allocated with size determined previously.
- * Prologue generated to support features we have seen used. Exit paths
- * determined and addrs[] is filled in again, as code may be slightly
- * smaller as a result.
- *
- * Third pass: Code generated 'for real', and branch destinations
- * determined from now-accurate addrs[] map.
- *
- * Ideal:
- *
- * If we optimise this, near branches will be shorter. On the
- * first assembly pass, we should err on the side of caution and
- * generate the biggest code. On subsequent passes, branches will be
- * generated short or long and code size will reduce. With smaller
- * code, more branches may fall into the short category, and code will
- * reduce more.
- *
- * Finally, if we see one pass generate code the same size as the
- * previous pass we have converged and should now generate code for
- * real. Allocating at the end will also save the memory that would
- * otherwise be wasted by the (small) current code shrinkage.
- * Preferably, we should do a small number of passes (e.g. 5) and if we
- * haven't converged by then, get impatient and force code to generate
- * as-is, even if the odd branch would be left long. The chances of a
- * long jump are tiny with all but the most enormous of BPF filter
- * inputs, so we should usually converge on the third pass.
- */
+ memset(&cgctx, 0, sizeof(struct codegen_context));
+ bpf_jit_init_reg_mapping(&cgctx);
+
+ /* Make sure that the stack is quadword aligned. */
+ cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
- cgctx.idx = 0;
- cgctx.seen = 0;
- cgctx.pc_ret0 = -1;
/* Scouting faux-generate pass 0 */
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs))
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
/* We hit something illegal or unsupported. */
- goto out;
+ fp = org_fp;
+ goto out_addrs;
+ }
+
+ /*
+ * If we have seen a tail call, we need a second pass.
+ * This is because bpf_jit_emit_common_epilogue() is called
+ * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
+ * We also need a second pass if we ended up with too large
+ * a program so as to ensure BPF_EXIT branches are in range.
+ */
+ if (cgctx.seen & SEEN_TAILCALL || !is_offset_in_branch_range((long)cgctx.idx * 4)) {
+ cgctx.idx = 0;
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
+ fp = org_fp;
+ goto out_addrs;
+ }
+ }
+ bpf_jit_realloc_regs(&cgctx);
/*
* Pretend to build prologue, given the features we've seen. This will
* update ctgtx.idx as it pretends to output instructions, then we can
* calculate total size from idx.
*/
- bpf_jit_build_prologue(fp, 0, &cgctx);
+ bpf_jit_build_prologue(0, &cgctx);
+ addrs[fp->len] = cgctx.idx * 4;
bpf_jit_build_epilogue(0, &cgctx);
+ fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
+ extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
+
proglen = cgctx.idx * 4;
- alloclen = proglen + FUNCTION_DESCR_SIZE;
- image = module_alloc(alloclen);
- if (!image)
- goto out;
+ alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
- code_base = image + (FUNCTION_DESCR_SIZE/4);
+ bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
+ if (!bpf_hdr) {
+ fp = org_fp;
+ goto out_addrs;
+ }
+
+ if (extable_len)
+ fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
+
+skip_init_ctx:
+ code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
+
+ if (extra_pass) {
+ /*
+ * Do not touch the prologue and epilogue as they will remain
+ * unchanged. Only fix the branch target address for subprog
+ * calls in the body, and ldimm64 instructions.
+ *
+ * This does not change the offsets and lengths of the subprog
+ * call instruction sequences and hence, the size of the JITed
+ * image as well.
+ */
+ bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs);
+
+ /* There is no need to perform the usual passes. */
+ goto skip_codegen_passes;
+ }
/* Code generation passes 1-2 */
for (pass = 1; pass < 3; pass++) {
/* Now build the prologue, body code & epilogue for real. */
cgctx.idx = 0;
- bpf_jit_build_prologue(fp, code_base, &cgctx);
- bpf_jit_build_body(fp, code_base, &cgctx, addrs);
+ cgctx.alt_exit_addr = 0;
+ bpf_jit_build_prologue(code_base, &cgctx);
+ if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass)) {
+ bpf_jit_binary_free(bpf_hdr);
+ fp = org_fp;
+ goto out_addrs;
+ }
bpf_jit_build_epilogue(code_base, &cgctx);
if (bpf_jit_enable > 1)
@@ -653,15 +268,15 @@ void bpf_jit_compile(struct bpf_prog *fp)
proglen - (cgctx.idx * 4), cgctx.seen);
}
+skip_codegen_passes:
if (bpf_jit_enable > 1)
- /* Note that we output the base address of the code_base
+ /*
+ * Note that we output the base address of the code_base
* rather than image, since opcodes are in code_base.
*/
bpf_jit_dump(flen, proglen, pass, code_base);
- bpf_flush_icache(code_base, code_base + (proglen/4));
-
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC64_ELF_ABI_V1
/* Function descriptor nastiness: Address + TOC */
((u64 *)image)[0] = (u64)code_base;
((u64 *)image)[1] = local_paca->kernel_toc;
@@ -669,16 +284,76 @@ void bpf_jit_compile(struct bpf_prog *fp)
fp->bpf_func = (void *)image;
fp->jited = 1;
+ fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
+
+ bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + bpf_hdr->size);
+ if (!fp->is_func || extra_pass) {
+ bpf_jit_binary_lock_ro(bpf_hdr);
+ bpf_prog_fill_jited_linfo(fp, addrs);
+out_addrs:
+ kfree(addrs);
+ kfree(jit_data);
+ fp->aux->jit_data = NULL;
+ } else {
+ jit_data->addrs = addrs;
+ jit_data->ctx = cgctx;
+ jit_data->proglen = proglen;
+ jit_data->image = image;
+ jit_data->header = bpf_hdr;
+ }
out:
- kfree(addrs);
- return;
+ if (bpf_blinded)
+ bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
+
+ return fp;
}
-void bpf_jit_free(struct bpf_prog *fp)
+/*
+ * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
+ * this function, as this only applies to BPF_PROBE_MEM, for now.
+ */
+int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
+ int insn_idx, int jmp_off, int dst_reg)
{
- if (fp->jited)
- module_memfree(fp->bpf_func);
+ off_t offset;
+ unsigned long pc;
+ struct exception_table_entry *ex;
+ u32 *fixup;
+
+ /* Populate extable entries only in the last pass */
+ if (pass != 2)
+ return 0;
- bpf_prog_unlock_free(fp);
+ if (!fp->aux->extable ||
+ WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
+ return -EINVAL;
+
+ pc = (unsigned long)&image[insn_idx];
+
+ fixup = (void *)fp->aux->extable -
+ (fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
+ (ctx->exentry_idx * BPF_FIXUP_LEN * 4);
+
+ fixup[0] = PPC_RAW_LI(dst_reg, 0);
+ if (IS_ENABLED(CONFIG_PPC32))
+ fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
+
+ fixup[BPF_FIXUP_LEN - 1] =
+ PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
+
+ ex = &fp->aux->extable[ctx->exentry_idx];
+
+ offset = pc - (long)&ex->insn;
+ if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
+ return -ERANGE;
+ ex->insn = offset;
+
+ offset = (long)fixup - (long)&ex->fixup;
+ if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
+ return -ERANGE;
+ ex->fixup = offset;
+
+ ctx->exentry_idx++;
+ return 0;
}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
new file mode 100644
index 000000000000..43f1c76d48ce
--- /dev/null
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -0,0 +1,1257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * eBPF JIT compiler for PPC32
+ *
+ * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu>
+ * CS GROUP France
+ *
+ * Based on PPC64 eBPF JIT compiler by Naveen N. Rao
+ */
+#include <linux/moduleloader.h>
+#include <asm/cacheflush.h>
+#include <asm/asm-compat.h>
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/if_vlan.h>
+#include <asm/kprobes.h>
+#include <linux/bpf.h>
+
+#include "bpf_jit.h"
+
+/*
+ * Stack layout:
+ *
+ * [ prev sp ] <-------------
+ * [ nv gpr save area ] 16 * 4 |
+ * fp (r31) --> [ ebpf stack space ] upto 512 |
+ * [ frame header ] 16 |
+ * sp (r1) ---> [ stack pointer ] --------------
+ */
+
+/* for gpr non volatile registers r17 to r31 (14) + tail call */
+#define BPF_PPC_STACK_SAVE (15 * 4 + 4)
+/* stack frame, ensure this is quadword aligned */
+#define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
+
+#define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
+
+/* PPC NVR range -- update this if we ever use NVRs below r17 */
+#define BPF_PPC_NVR_MIN _R17
+#define BPF_PPC_TC _R16
+
+/* BPF register usage */
+#define TMP_REG (MAX_BPF_JIT_REG + 0)
+
+/* BPF to ppc register mappings */
+void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
+{
+ /* function return value */
+ ctx->b2p[BPF_REG_0] = _R12;
+ /* function arguments */
+ ctx->b2p[BPF_REG_1] = _R4;
+ ctx->b2p[BPF_REG_2] = _R6;
+ ctx->b2p[BPF_REG_3] = _R8;
+ ctx->b2p[BPF_REG_4] = _R10;
+ ctx->b2p[BPF_REG_5] = _R22;
+ /* non volatile registers */
+ ctx->b2p[BPF_REG_6] = _R24;
+ ctx->b2p[BPF_REG_7] = _R26;
+ ctx->b2p[BPF_REG_8] = _R28;
+ ctx->b2p[BPF_REG_9] = _R30;
+ /* frame pointer aka BPF_REG_10 */
+ ctx->b2p[BPF_REG_FP] = _R18;
+ /* eBPF jit internal registers */
+ ctx->b2p[BPF_REG_AX] = _R20;
+ ctx->b2p[TMP_REG] = _R31; /* 32 bits */
+}
+
+static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
+{
+ if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
+ return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg);
+
+ WARN(true, "BPF JIT is asking about unknown registers, will crash the stack");
+ /* Use the hole we have left for alignment */
+ return BPF_PPC_STACKFRAME(ctx) - 4;
+}
+
+#define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */
+#define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */
+#define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
+
+void bpf_jit_realloc_regs(struct codegen_context *ctx)
+{
+ unsigned int nvreg_mask;
+
+ if (ctx->seen & SEEN_FUNC)
+ nvreg_mask = SEEN_NVREG_TEMP_MASK;
+ else
+ nvreg_mask = SEEN_NVREG_FULL_MASK;
+
+ while (ctx->seen & nvreg_mask &&
+ (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) {
+ int old = 32 - fls(ctx->seen & (nvreg_mask & 0xaaaaaaab));
+ int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa));
+ int i;
+
+ for (i = BPF_REG_0; i <= TMP_REG; i++) {
+ if (ctx->b2p[i] != old)
+ continue;
+ ctx->b2p[i] = new;
+ bpf_set_seen_register(ctx, new);
+ bpf_clear_seen_register(ctx, old);
+ if (i != TMP_REG) {
+ bpf_set_seen_register(ctx, new - 1);
+ bpf_clear_seen_register(ctx, old - 1);
+ }
+ break;
+ }
+ }
+}
+
+void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
+{
+ int i;
+
+ /* First arg comes in as a 32 bits pointer. */
+ EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
+ EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
+ EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
+
+ /*
+ * Initialize tail_call_cnt in stack frame if we do tail calls.
+ * Otherwise, put in NOPs so that it can be skipped when we are
+ * invoked through a tail call.
+ */
+ if (ctx->seen & SEEN_TAILCALL)
+ EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_1) - 1, _R1,
+ bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+ else
+ EMIT(PPC_RAW_NOP());
+
+#define BPF_TAILCALL_PROLOGUE_SIZE 16
+
+ /*
+ * We need a stack frame, but we don't necessarily need to
+ * save/restore LR unless we call other functions
+ */
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_MFLR(_R0));
+
+ /*
+ * Back up non-volatile regs -- registers r18-r31
+ */
+ for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
+ if (bpf_is_seen_register(ctx, i))
+ EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
+
+ /* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
+ EMIT(PPC_RAW_LWZ(bpf_to_ppc(BPF_REG_5) - 1, _R1, BPF_PPC_STACKFRAME(ctx)) + 8);
+ EMIT(PPC_RAW_LWZ(bpf_to_ppc(BPF_REG_5), _R1, BPF_PPC_STACKFRAME(ctx)) + 12);
+ }
+
+ /* Setup frame pointer to point to the bpf stack area */
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) {
+ EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0));
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
+ STACK_FRAME_MIN_SIZE + ctx->stack_size));
+ }
+
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+}
+
+static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
+{
+ int i;
+
+ /* Restore NVRs */
+ for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
+ if (bpf_is_seen_register(ctx, i))
+ EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
+}
+
+void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+{
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
+
+ bpf_jit_emit_common_epilogue(image, ctx);
+
+ /* Tear down our stack frame */
+
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+
+ EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
+
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_MTLR(_R0));
+
+ EMIT(PPC_RAW_BLR());
+}
+
+int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
+{
+ s32 rel = (s32)func - (s32)(image + ctx->idx);
+
+ if (image && rel < 0x2000000 && rel >= -0x2000000) {
+ PPC_BL(func);
+ EMIT(PPC_RAW_NOP());
+ EMIT(PPC_RAW_NOP());
+ EMIT(PPC_RAW_NOP());
+ } else {
+ /* Load function address into r0 */
+ EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
+ EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
+ EMIT(PPC_RAW_MTCTR(_R0));
+ EMIT(PPC_RAW_BCTRL());
+ }
+
+ return 0;
+}
+
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+{
+ /*
+ * By now, the eBPF program has already setup parameters in r3-r6
+ * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
+ * r5-r6/BPF_REG_2 - pointer to bpf_array
+ * r7-r8/BPF_REG_3 - index in bpf_array
+ */
+ int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
+ int b2p_index = bpf_to_ppc(BPF_REG_3);
+
+ /*
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+ EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
+ EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
+ EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+ PPC_BCC_SHORT(COND_GE, out);
+
+ /*
+ * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
+ * goto out;
+ */
+ EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
+ /* tail_call_cnt++; */
+ EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
+ PPC_BCC_SHORT(COND_GE, out);
+
+ /* prog = array->ptrs[index]; */
+ EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
+ EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
+ EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
+ EMIT(PPC_RAW_STW(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
+
+ /*
+ * if (prog == NULL)
+ * goto out;
+ */
+ EMIT(PPC_RAW_CMPLWI(_R3, 0));
+ PPC_BCC_SHORT(COND_EQ, out);
+
+ /* goto *(prog->bpf_func + prologue_size); */
+ EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
+
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
+
+ EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
+
+ if (ctx->seen & SEEN_FUNC)
+ EMIT(PPC_RAW_MTLR(_R0));
+
+ EMIT(PPC_RAW_MTCTR(_R3));
+
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1)));
+
+ /* tear restore NVRs, ... */
+ bpf_jit_emit_common_epilogue(image, ctx);
+
+ EMIT(PPC_RAW_BCTR());
+
+ /* out: */
+ return 0;
+}
+
+/* Assemble the body code between the prologue & epilogue */
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+ u32 *addrs, int pass)
+{
+ const struct bpf_insn *insn = fp->insnsi;
+ int flen = fp->len;
+ int i, ret;
+
+ /* Start of epilogue code - will only be valid 2nd pass onwards */
+ u32 exit_addr = addrs[flen];
+
+ for (i = 0; i < flen; i++) {
+ u32 code = insn[i].code;
+ u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
+ u32 dst_reg_h = dst_reg - 1;
+ u32 src_reg = bpf_to_ppc(insn[i].src_reg);
+ u32 src_reg_h = src_reg - 1;
+ u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
+ u32 tmp_reg = bpf_to_ppc(TMP_REG);
+ u32 size = BPF_SIZE(code);
+ u32 save_reg, ret_reg;
+ s16 off = insn[i].off;
+ s32 imm = insn[i].imm;
+ bool func_addr_fixed;
+ u64 func_addr;
+ u32 true_cond;
+ u32 tmp_idx;
+ int j;
+
+ /*
+ * addrs[] maps a BPF bytecode address into a real offset from
+ * the start of the body code.
+ */
+ addrs[i] = ctx->idx * 4;
+
+ /*
+ * As an optimization, we note down which registers
+ * are used so that we can only save/restore those in our
+ * prologue and epilogue. We do this here regardless of whether
+ * the actual BPF instruction uses src/dst registers or not
+ * (for instance, BPF_CALL does not use them). The expectation
+ * is that those instructions will have src_reg/dst_reg set to
+ * 0. Even otherwise, we just lose some prologue/epilogue
+ * optimization but everything else should work without
+ * any issues.
+ */
+ if (dst_reg >= 3 && dst_reg < 32) {
+ bpf_set_seen_register(ctx, dst_reg);
+ bpf_set_seen_register(ctx, dst_reg_h);
+ }
+
+ if (src_reg >= 3 && src_reg < 32) {
+ bpf_set_seen_register(ctx, src_reg);
+ bpf_set_seen_register(ctx, src_reg_h);
+ }
+
+ switch (code) {
+ /*
+ * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
+ */
+ case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
+ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
+ EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_ADDE(dst_reg_h, dst_reg_h, src_reg_h));
+ break;
+ case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
+ EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, dst_reg));
+ EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, dst_reg_h));
+ break;
+ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
+ imm = -imm;
+ fallthrough;
+ case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
+ if (IMM_HA(imm) & 0xffff)
+ EMIT(PPC_RAW_ADDIS(dst_reg, dst_reg, IMM_HA(imm)));
+ if (IMM_L(imm))
+ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
+ break;
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
+ imm = -imm;
+ fallthrough;
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+ if (!imm)
+ break;
+
+ if (imm >= -32768 && imm < 32768) {
+ EMIT(PPC_RAW_ADDIC(dst_reg, dst_reg, imm));
+ } else {
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
+ }
+ if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
+ EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
+ else
+ EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
+ break;
+ case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
+ bpf_set_seen_register(ctx, tmp_reg);
+ EMIT(PPC_RAW_MULW(_R0, dst_reg, src_reg_h));
+ EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, src_reg));
+ EMIT(PPC_RAW_MULHWU(tmp_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
+ EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
+ break;
+ case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
+ if (imm >= -32768 && imm < 32768) {
+ EMIT(PPC_RAW_MULI(dst_reg, dst_reg, imm));
+ } else {
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, _R0));
+ }
+ break;
+ case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
+ if (!imm) {
+ PPC_LI32(dst_reg, 0);
+ PPC_LI32(dst_reg_h, 0);
+ break;
+ }
+ if (imm == 1)
+ break;
+ if (imm == -1) {
+ EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
+ EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
+ break;
+ }
+ bpf_set_seen_register(ctx, tmp_reg);
+ PPC_LI32(tmp_reg, imm);
+ EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, tmp_reg));
+ if (imm < 0)
+ EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, dst_reg));
+ EMIT(PPC_RAW_MULHWU(_R0, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
+ break;
+ case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
+ EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
+ EMIT(PPC_RAW_DIVWU(_R0, dst_reg, src_reg));
+ EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
+ break;
+ case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
+ return -EOPNOTSUPP;
+ case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
+ return -EOPNOTSUPP;
+ case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
+ if (!imm)
+ return -EINVAL;
+ if (imm == 1)
+ break;
+
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, _R0));
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
+ if (!imm)
+ return -EINVAL;
+
+ if (!is_power_of_2((u32)imm)) {
+ bpf_set_seen_register(ctx, tmp_reg);
+ PPC_LI32(tmp_reg, imm);
+ EMIT(PPC_RAW_DIVWU(_R0, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
+ break;
+ }
+ if (imm == 1)
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ else
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2((u32)imm), 31));
+
+ break;
+ case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
+ if (!imm)
+ return -EINVAL;
+ if (imm < 0)
+ imm = -imm;
+ if (!is_power_of_2(imm))
+ return -EOPNOTSUPP;
+ if (imm == 1)
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ else
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31));
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ break;
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
+ if (!imm)
+ return -EINVAL;
+ if (!is_power_of_2(abs(imm)))
+ return -EOPNOTSUPP;
+
+ if (imm < 0) {
+ EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
+ EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
+ imm = -imm;
+ }
+ if (imm == 1)
+ break;
+ imm = ilog2(imm);
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
+ EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm));
+ break;
+ case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
+ EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
+ break;
+ case BPF_ALU64 | BPF_NEG: /* dst = -dst */
+ EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
+ EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
+ break;
+
+ /*
+ * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
+ */
+ case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_AND(dst_reg_h, dst_reg_h, src_reg_h));
+ break;
+ case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
+ if (imm >= 0)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ fallthrough;
+ case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
+ if (!IMM_H(imm)) {
+ EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
+ } else if (!IMM_L(imm)) {
+ EMIT(PPC_RAW_ANDIS(dst_reg, dst_reg, IMM_H(imm)));
+ } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) {
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0,
+ 32 - fls(imm), 32 - ffs(imm)));
+ } else {
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, _R0));
+ }
+ break;
+ case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, src_reg_h));
+ break;
+ case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
+ /* Sign-extended */
+ if (imm < 0)
+ EMIT(PPC_RAW_LI(dst_reg_h, -1));
+ fallthrough;
+ case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
+ if (IMM_L(imm))
+ EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
+ if (IMM_H(imm))
+ EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
+ break;
+ case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
+ if (dst_reg == src_reg) {
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ } else {
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_XOR(dst_reg_h, dst_reg_h, src_reg_h));
+ }
+ break;
+ case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
+ if (dst_reg == src_reg)
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ else
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
+ if (imm < 0)
+ EMIT(PPC_RAW_NOR(dst_reg_h, dst_reg_h, dst_reg_h));
+ fallthrough;
+ case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
+ if (IMM_L(imm))
+ EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
+ if (IMM_H(imm))
+ EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
+ break;
+ case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
+ EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
+ bpf_set_seen_register(ctx, tmp_reg);
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
+ EMIT(PPC_RAW_SLW(dst_reg_h, dst_reg_h, src_reg));
+ EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
+ EMIT(PPC_RAW_SRW(_R0, dst_reg, _R0));
+ EMIT(PPC_RAW_SLW(tmp_reg, dst_reg, tmp_reg));
+ EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
+ EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */
+ if (!imm)
+ break;
+ EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
+ break;
+ case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */
+ if (imm < 0)
+ return -EINVAL;
+ if (!imm)
+ break;
+ if (imm < 32) {
+ EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, imm, 0, 31 - imm));
+ EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31));
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, imm, 0, 31 - imm));
+ break;
+ }
+ if (imm < 64)
+ EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg, imm, 0, 31 - imm));
+ else
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ break;
+ case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
+ EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
+ bpf_set_seen_register(ctx, tmp_reg);
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
+ EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
+ EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
+ EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
+ EMIT(PPC_RAW_SRW(dst_reg_h, dst_reg_h, src_reg));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
+ if (!imm)
+ break;
+ EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
+ break;
+ case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
+ if (imm < 0)
+ return -EINVAL;
+ if (!imm)
+ break;
+ if (imm < 32) {
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
+ EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
+ EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, 32 - imm, imm, 31));
+ break;
+ }
+ if (imm < 64)
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg_h, 64 - imm, imm - 32, 31));
+ else
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
+ EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
+ bpf_set_seen_register(ctx, tmp_reg);
+ EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
+ EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
+ EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
+ EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
+ EMIT(PPC_RAW_SRAW(tmp_reg, dst_reg_h, tmp_reg));
+ EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg_h, src_reg));
+ EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
+ if (!imm)
+ break;
+ EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
+ break;
+ case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
+ if (imm < 0)
+ return -EINVAL;
+ if (!imm)
+ break;
+ if (imm < 32) {
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
+ EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm));
+ break;
+ }
+ if (imm < 64)
+ EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, imm - 32));
+ else
+ EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, 31));
+ EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, 31));
+ break;
+
+ /*
+ * MOV
+ */
+ case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
+ if (dst_reg == src_reg)
+ break;
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
+ break;
+ case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
+ /* special mov32 for zext */
+ if (imm == 1)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ else if (dst_reg != src_reg)
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
+ break;
+ case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
+ PPC_LI32(dst_reg, imm);
+ PPC_EX32(dst_reg_h, imm);
+ break;
+ case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
+ PPC_LI32(dst_reg, imm);
+ break;
+
+ /*
+ * BPF_FROM_BE/LE
+ */
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ switch (imm) {
+ case 16:
+ /* Copy 16 bits to upper part */
+ EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg, 16, 0, 15));
+ /* Rotate 8 bits right & mask */
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31));
+ break;
+ case 32:
+ /*
+ * Rotate word left by 8 bits:
+ * 2 bytes are already in their final position
+ * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
+ */
+ EMIT(PPC_RAW_RLWINM(_R0, dst_reg, 8, 0, 31));
+ /* Rotate 24 bits and insert byte 1 */
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 0, 7));
+ /* Rotate 24 bits and insert byte 3 */
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 16, 23));
+ EMIT(PPC_RAW_MR(dst_reg, _R0));
+ break;
+ case 64:
+ bpf_set_seen_register(ctx, tmp_reg);
+ EMIT(PPC_RAW_RLWINM(tmp_reg, dst_reg, 8, 0, 31));
+ EMIT(PPC_RAW_RLWINM(_R0, dst_reg_h, 8, 0, 31));
+ /* Rotate 24 bits and insert byte 1 */
+ EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 0, 7));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 0, 7));
+ /* Rotate 24 bits and insert byte 3 */
+ EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 16, 23));
+ EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 16, 23));
+ EMIT(PPC_RAW_MR(dst_reg, _R0));
+ EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
+ break;
+ }
+ break;
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ switch (imm) {
+ case 16:
+ /* zero-extend 16 bits into 32 bits */
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 16, 31));
+ break;
+ case 32:
+ case 64:
+ /* nop */
+ break;
+ }
+ break;
+
+ /*
+ * BPF_ST NOSPEC (speculation barrier)
+ */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
+ /*
+ * BPF_ST(X)
+ */
+ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
+ EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
+ break;
+ case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STB(_R0, dst_reg, off));
+ break;
+ case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
+ EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
+ break;
+ case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STH(_R0, dst_reg, off));
+ break;
+ case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
+ EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
+ break;
+ case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off));
+ break;
+ case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
+ EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
+ EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
+ break;
+ case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
+ PPC_EX32(_R0, imm);
+ EMIT(PPC_RAW_STW(_R0, dst_reg, off));
+ break;
+
+ /*
+ * BPF_STX ATOMIC (atomic ops)
+ */
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ save_reg = _R0;
+ ret_reg = src_reg;
+
+ bpf_set_seen_register(ctx, tmp_reg);
+ bpf_set_seen_register(ctx, ax_reg);
+
+ /* Get offset into TMP_REG */
+ EMIT(PPC_RAW_LI(tmp_reg, off));
+ tmp_idx = ctx->idx * 4;
+ /* load value from memory into r0 */
+ EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
+
+ /* Save old value in BPF_REG_AX */
+ if (imm & BPF_FETCH)
+ EMIT(PPC_RAW_MR(ax_reg, _R0));
+
+ switch (imm) {
+ case BPF_ADD:
+ case BPF_ADD | BPF_FETCH:
+ EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
+ break;
+ case BPF_AND:
+ case BPF_AND | BPF_FETCH:
+ EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
+ break;
+ case BPF_OR:
+ case BPF_OR | BPF_FETCH:
+ EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
+ break;
+ case BPF_XOR:
+ case BPF_XOR | BPF_FETCH:
+ EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
+ break;
+ case BPF_CMPXCHG:
+ /*
+ * Return old value in BPF_REG_0 for BPF_CMPXCHG &
+ * in src_reg for other cases.
+ */
+ ret_reg = bpf_to_ppc(BPF_REG_0);
+
+ /* Compare with old value in BPF_REG_0 */
+ EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
+ /* Don't set if different from old value */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
+ fallthrough;
+ case BPF_XCHG:
+ save_reg = src_reg;
+ break;
+ default:
+ pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
+ code, i);
+ return -EOPNOTSUPP;
+ }
+
+ /* store new value */
+ EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
+ /* we're done if this succeeded */
+ PPC_BCC_SHORT(COND_NE, tmp_idx);
+
+ /* For the BPF_FETCH variant, get old data into src_reg */
+ if (imm & BPF_FETCH) {
+ EMIT(PPC_RAW_MR(ret_reg, ax_reg));
+ if (!fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
+ }
+ break;
+
+ case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
+ return -EOPNOTSUPP;
+
+ /*
+ * BPF_LDX
+ */
+ case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
+ case BPF_LDX | BPF_PROBE_MEM | BPF_B:
+ case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
+ case BPF_LDX | BPF_PROBE_MEM | BPF_H:
+ case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
+ case BPF_LDX | BPF_PROBE_MEM | BPF_W:
+ case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
+ case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ /*
+ * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
+ * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
+ * load only if addr is kernel address (see is_kernel_addr()), otherwise
+ * set dst_reg=0 and move on.
+ */
+ if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ PPC_LI32(_R0, TASK_SIZE - off);
+ EMIT(PPC_RAW_CMPLW(src_reg, _R0));
+ PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ /*
+ * For BPF_DW case, "li reg_h,0" would be needed when
+ * !fp->aux->verifier_zext. Emit NOP otherwise.
+ *
+ * Note that "li reg_h,0" is emitted for BPF_B/H/W case,
+ * if necessary. So, jump there insted of emitting an
+ * additional "li reg_h,0" instruction.
+ */
+ if (size == BPF_DW && !fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ else
+ EMIT(PPC_RAW_NOP());
+ /*
+ * Need to jump two instructions instead of one for BPF_DW case
+ * as there are two load instructions for dst_reg_h & dst_reg
+ * respectively.
+ */
+ if (size == BPF_DW)
+ PPC_JMP((ctx->idx + 3) * 4);
+ else
+ PPC_JMP((ctx->idx + 2) * 4);
+ }
+
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ case BPF_DW:
+ EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
+ break;
+ }
+
+ if (size != BPF_DW && !fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+
+ if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ int insn_idx = ctx->idx - 1;
+ int jmp_off = 4;
+
+ /*
+ * In case of BPF_DW, two lwz instructions are emitted, one
+ * for higher 32-bit and another for lower 32-bit. So, set
+ * ex->insn to the first of the two and jump over both
+ * instructions in fixup.
+ *
+ * Similarly, with !verifier_zext, two instructions are
+ * emitted for BPF_B/H/W case. So, set ex->insn to the
+ * instruction that could fault and skip over both
+ * instructions.
+ */
+ if (size == BPF_DW || !fp->aux->verifier_zext) {
+ insn_idx -= 1;
+ jmp_off += 4;
+ }
+
+ ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx,
+ jmp_off, dst_reg);
+ if (ret)
+ return ret;
+ }
+ break;
+
+ /*
+ * Doubleword load
+ * 16 byte instruction that uses two 'struct bpf_insn'
+ */
+ case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
+ tmp_idx = ctx->idx;
+ PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
+ PPC_LI32(dst_reg, (u32)insn[i].imm);
+ /* padding to allow full 4 instructions for later patching */
+ for (j = ctx->idx - tmp_idx; j < 4; j++)
+ EMIT(PPC_RAW_NOP());
+ /* Adjust for two bpf instructions */
+ addrs[++i] = ctx->idx * 4;
+ break;
+
+ /*
+ * Return/Exit
+ */
+ case BPF_JMP | BPF_EXIT:
+ /*
+ * If this isn't the very last instruction, branch to
+ * the epilogue. If we _are_ the last instruction,
+ * we'll just fall through to the epilogue.
+ */
+ if (i != flen - 1) {
+ ret = bpf_jit_emit_exit_insn(image, ctx, _R0, exit_addr);
+ if (ret)
+ return ret;
+ }
+ /* else fall through to the epilogue */
+ break;
+
+ /*
+ * Call kernel helper or bpf function
+ */
+ case BPF_JMP | BPF_CALL:
+ ctx->seen |= SEEN_FUNC;
+
+ ret = bpf_jit_get_func_addr(fp, &insn[i], false,
+ &func_addr, &func_addr_fixed);
+ if (ret < 0)
+ return ret;
+
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
+ EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5) - 1, _R1, 8));
+ EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
+ }
+
+ ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ if (ret)
+ return ret;
+
+ EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0) - 1, _R3));
+ EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R4));
+ break;
+
+ /*
+ * Jumps and branches
+ */
+ case BPF_JMP | BPF_JA:
+ PPC_JMP(addrs[i + 1 + off]);
+ break;
+
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ true_cond = COND_GT;
+ goto cond_branch;
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ true_cond = COND_LT;
+ goto cond_branch;
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ true_cond = COND_GE;
+ goto cond_branch;
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ true_cond = COND_LE;
+ goto cond_branch;
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ true_cond = COND_EQ;
+ goto cond_branch;
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ true_cond = COND_NE;
+ goto cond_branch;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ true_cond = COND_NE;
+ /* fallthrough; */
+
+cond_branch:
+ switch (code) {
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ /* unsigned comparison */
+ EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
+ break;
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ /* unsigned comparison */
+ EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ /* signed comparison */
+ EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
+ break;
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ /* signed comparison */
+ EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
+ break;
+ case BPF_JMP | BPF_JSET | BPF_X:
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
+ break;
+ case BPF_JMP32 | BPF_JSET | BPF_X: {
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
+ break;
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ /*
+ * Need sign-extended load, so only positive
+ * values can be used as imm in cmplwi
+ */
+ if (imm >= 0 && imm < 32768) {
+ EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
+ } else {
+ /* sign-extending load ... but unsigned comparison */
+ PPC_EX32(_R0, imm);
+ EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
+ PPC_LI32(_R0, imm);
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
+ }
+ break;
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ if (imm >= 0 && imm < 65536) {
+ EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
+ } else {
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
+ }
+ break;
+ }
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ if (imm >= 0 && imm < 65536) {
+ EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
+ } else {
+ /* sign-extending load */
+ EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
+ PPC_LI32(_R0, imm);
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
+ }
+ break;
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ /*
+ * signed comparison, so any 16-bit value
+ * can be used in cmpwi
+ */
+ if (imm >= -32768 && imm < 32768) {
+ EMIT(PPC_RAW_CMPWI(dst_reg, imm));
+ } else {
+ /* sign-extending load */
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_CMPW(dst_reg, _R0));
+ }
+ break;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ /* andi does not sign-extend the immediate */
+ if (imm >= 0 && imm < 32768) {
+ /* PPC_ANDI is _only/always_ dot-form */
+ EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
+ } else {
+ PPC_LI32(_R0, imm);
+ if (imm < 0) {
+ EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
+ }
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
+ }
+ break;
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ /* andi does not sign-extend the immediate */
+ if (imm >= 0 && imm < 32768) {
+ /* PPC_ANDI is _only/always_ dot-form */
+ EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
+ } else {
+ PPC_LI32(_R0, imm);
+ EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
+ }
+ break;
+ }
+ PPC_BCC(true_cond, addrs[i + 1 + off]);
+ break;
+
+ /*
+ * Tail call
+ */
+ case BPF_JMP | BPF_TAIL_CALL:
+ ctx->seen |= SEEN_TAILCALL;
+ ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ if (ret < 0)
+ return ret;
+ break;
+
+ default:
+ /*
+ * The filter contains something cruel & unusual.
+ * We don't handle it, but also there shouldn't be
+ * anything missing from our list.
+ */
+ pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i);
+ return -EOPNOTSUPP;
+ }
+ if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
+ !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ }
+
+ /* Set end-of-body-code address for exit. */
+ addrs[i] = ctx->idx * 4;
+
+ return 0;
+}
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index be3517ef0574..29ee306d6302 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -15,30 +15,63 @@
#include <linux/if_vlan.h>
#include <asm/kprobes.h>
#include <linux/bpf.h>
+#include <asm/security_features.h>
-#include "bpf_jit64.h"
+#include "bpf_jit.h"
-static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
-{
- memset32(area, BREAKPOINT_INSTRUCTION, size/4);
-}
+/*
+ * Stack layout:
+ * Ensure the top half (upto local_tmp_var) stays consistent
+ * with our redzone usage.
+ *
+ * [ prev sp ] <-------------
+ * [ nv gpr save area ] 5*8 |
+ * [ tail_call_cnt ] 8 |
+ * [ local_tmp_var ] 16 |
+ * fp (r31) --> [ ebpf stack space ] upto 512 |
+ * [ frame header ] 32/112 |
+ * sp (r1) ---> [ stack pointer ] --------------
+ */
-static inline void bpf_flush_icache(void *start, void *end)
-{
- smp_wmb();
- flush_icache_range((unsigned long)start, (unsigned long)end);
-}
+/* for gpr non volatile registers BPG_REG_6 to 10 */
+#define BPF_PPC_STACK_SAVE (5*8)
+/* for bpf JIT code internal usage */
+#define BPF_PPC_STACK_LOCALS 24
+/* stack frame excluding BPF stack, ensure this is quadword aligned */
+#define BPF_PPC_STACKFRAME (STACK_FRAME_MIN_SIZE + \
+ BPF_PPC_STACK_LOCALS + BPF_PPC_STACK_SAVE)
-static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
-{
- return (ctx->seen & (1 << (31 - b2p[i])));
-}
+/* BPF register usage */
+#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
+#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
-static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
+/* BPF to ppc register mappings */
+void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
{
- ctx->seen |= (1 << (31 - b2p[i]));
+ /* function return value */
+ ctx->b2p[BPF_REG_0] = _R8;
+ /* function arguments */
+ ctx->b2p[BPF_REG_1] = _R3;
+ ctx->b2p[BPF_REG_2] = _R4;
+ ctx->b2p[BPF_REG_3] = _R5;
+ ctx->b2p[BPF_REG_4] = _R6;
+ ctx->b2p[BPF_REG_5] = _R7;
+ /* non volatile registers */
+ ctx->b2p[BPF_REG_6] = _R27;
+ ctx->b2p[BPF_REG_7] = _R28;
+ ctx->b2p[BPF_REG_8] = _R29;
+ ctx->b2p[BPF_REG_9] = _R30;
+ /* frame pointer aka BPF_REG_10 */
+ ctx->b2p[BPF_REG_FP] = _R31;
+ /* eBPF jit internal registers */
+ ctx->b2p[BPF_REG_AX] = _R12;
+ ctx->b2p[TMP_REG_1] = _R9;
+ ctx->b2p[TMP_REG_2] = _R10;
}
+/* PPC NVR range -- update this if we ever use NVRs below r27 */
+#define BPF_PPC_NVR_MIN _R27
+
static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
{
/*
@@ -47,7 +80,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* - the bpf program uses its stack area
* The latter condition is deduced from the usage of BPF_REG_FP
*/
- return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
+ return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
}
/*
@@ -56,9 +89,9 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* [ prev sp ] <-------------
* [ ... ] |
* sp (r1) ---> [ stack pointer ] --------------
- * [ nv gpr save area ] 6*8
+ * [ nv gpr save area ] 5*8
* [ tail_call_cnt ] 8
- * [ local_tmp_var ] 8
+ * [ local_tmp_var ] 16
* [ unused red zone ] 208 bytes protected
*/
static int bpf_jit_stack_local(struct codegen_context *ctx)
@@ -66,12 +99,12 @@ static int bpf_jit_stack_local(struct codegen_context *ctx)
if (bpf_has_stack_frame(ctx))
return STACK_FRAME_MIN_SIZE + ctx->stack_size;
else
- return -(BPF_PPC_STACK_SAVE + 16);
+ return -(BPF_PPC_STACK_SAVE + 24);
}
static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
{
- return bpf_jit_stack_local(ctx) + 8;
+ return bpf_jit_stack_local(ctx) + 16;
}
static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
@@ -85,37 +118,42 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
BUG();
}
-static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
+void bpf_jit_realloc_regs(struct codegen_context *ctx)
+{
+}
+
+void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
int i;
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ EMIT(PPC_RAW_LD(_R2, _R13, offsetof(struct paca_struct, kernel_toc)));
+
/*
* Initialize tail_call_cnt if we do tail calls.
* Otherwise, put in NOPs so that it can be skipped when we are
* invoked through a tail call.
*/
if (ctx->seen & SEEN_TAILCALL) {
- PPC_LI(b2p[TMP_REG_1], 0);
+ EMIT(PPC_RAW_LI(bpf_to_ppc(TMP_REG_1), 0));
/* this goes in the redzone */
- PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
+ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, -(BPF_PPC_STACK_SAVE + 8)));
} else {
- PPC_NOP();
- PPC_NOP();
+ EMIT(PPC_RAW_NOP());
+ EMIT(PPC_RAW_NOP());
}
-#define BPF_TAILCALL_PROLOGUE_SIZE 8
-
if (bpf_has_stack_frame(ctx)) {
/*
* We need a stack frame, but we don't necessarily need to
* save/restore LR unless we call other functions
*/
if (ctx->seen & SEEN_FUNC) {
- EMIT(PPC_INST_MFLR | __PPC_RT(R0));
- PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
+ EMIT(PPC_RAW_MFLR(_R0));
+ EMIT(PPC_RAW_STD(_R0, _R1, PPC_LR_STKOFF));
}
- PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
+ EMIT(PPC_RAW_STDU(_R1, _R1, -(BPF_PPC_STACKFRAME + ctx->stack_size)));
}
/*
@@ -124,13 +162,13 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
* in the protected zone below the previous stack frame
*/
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
- if (bpf_is_seen_register(ctx, i))
- PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
+ EMIT(PPC_RAW_STD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
/* Setup frame pointer to point to the bpf stack area */
- if (bpf_is_seen_register(ctx, BPF_REG_FP))
- PPC_ADDI(b2p[BPF_REG_FP], 1,
- STACK_FRAME_MIN_SIZE + ctx->stack_size);
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP)))
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
+ STACK_FRAME_MIN_SIZE + ctx->stack_size));
}
static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
@@ -139,61 +177,63 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
/* Restore NVRs */
for (i = BPF_REG_6; i <= BPF_REG_10; i++)
- if (bpf_is_seen_register(ctx, i))
- PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
+ if (bpf_is_seen_register(ctx, bpf_to_ppc(i)))
+ EMIT(PPC_RAW_LD(bpf_to_ppc(i), _R1, bpf_jit_stack_offsetof(ctx, bpf_to_ppc(i))));
/* Tear down our stack frame */
if (bpf_has_stack_frame(ctx)) {
- PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
+ EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME + ctx->stack_size));
if (ctx->seen & SEEN_FUNC) {
- PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
- PPC_MTLR(0);
+ EMIT(PPC_RAW_LD(_R0, _R1, PPC_LR_STKOFF));
+ EMIT(PPC_RAW_MTLR(_R0));
}
}
}
-static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
+void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
{
bpf_jit_emit_common_epilogue(image, ctx);
/* Move result to r3 */
- PPC_MR(3, b2p[BPF_REG_0]);
+ EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
- PPC_BLR();
+ EMIT(PPC_RAW_BLR());
}
-static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
- u64 func)
+static int bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, u64 func)
{
-#ifdef PPC64_ELF_ABI_v1
- /* func points to the function descriptor */
- PPC_LI64(b2p[TMP_REG_2], func);
- /* Load actual entry point from function descriptor */
- PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
- /* ... and move it to LR */
- PPC_MTLR(b2p[TMP_REG_1]);
- /*
- * Load TOC from function descriptor at offset 8.
- * We can clobber r2 since we get called through a
- * function pointer (so caller will save/restore r2)
- * and since we don't use a TOC ourself.
- */
- PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
-#else
- /* We can clobber r12 */
- PPC_FUNC_ADDR(12, func);
- PPC_MTLR(12);
-#endif
- PPC_BLRL();
+ unsigned long func_addr = func ? ppc_function_entry((void *)func) : 0;
+ long reladdr;
+
+ if (WARN_ON_ONCE(!core_kernel_text(func_addr)))
+ return -EINVAL;
+
+ reladdr = func_addr - kernel_toc_addr();
+ if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+ pr_err("eBPF: address of %ps out of range of kernel_toc.\n", (void *)func);
+ return -ERANGE;
+ }
+
+ EMIT(PPC_RAW_ADDIS(_R12, _R2, PPC_HA(reladdr)));
+ EMIT(PPC_RAW_ADDI(_R12, _R12, PPC_LO(reladdr)));
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
+
+ return 0;
}
-static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
- u64 func)
+int bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
{
unsigned int i, ctx_idx = ctx->idx;
+ if (WARN_ON_ONCE(func && is_module_text_address(func)))
+ return -EINVAL;
+
+ /* skip past descriptor if elf v1 */
+ func += FUNCTION_DESCR_SIZE;
+
/* Load function address into r12 */
- PPC_LI64(12, func);
+ PPC_LI64(_R12, func);
/* For bpf-to-bpf function calls, the callee's address is unknown
* until the last extra pass. As seen above, we use PPC_LI64() to
@@ -206,25 +246,15 @@ static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
* that PPC_LI64() can emit.
*/
for (i = ctx->idx - ctx_idx; i < 5; i++)
- PPC_NOP();
+ EMIT(PPC_RAW_NOP());
-#ifdef PPC64_ELF_ABI_v1
- /*
- * Load TOC from function descriptor at offset 8.
- * We can clobber r2 since we get called through a
- * function pointer (so caller will save/restore r2)
- * and since we don't use a TOC ourself.
- */
- PPC_BPF_LL(2, 12, 8);
- /* Load actual entry point from function descriptor */
- PPC_BPF_LL(12, 12, 0);
-#endif
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
- PPC_MTLR(12);
- PPC_BLRL();
+ return 0;
}
-static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
+static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
{
/*
* By now, the eBPF program has already setup parameters in r3, r4 and r5
@@ -232,67 +262,90 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* r4/BPF_REG_2 - pointer to bpf_array
* r5/BPF_REG_3 - index in bpf_array
*/
- int b2p_bpf_array = b2p[BPF_REG_2];
- int b2p_index = b2p[BPF_REG_3];
+ int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
+ int b2p_index = bpf_to_ppc(BPF_REG_3);
+ int bpf_tailcall_prologue_size = 8;
+
+ if (IS_ENABLED(CONFIG_PPC64_ELF_ABI_V2))
+ bpf_tailcall_prologue_size += 4; /* skip past the toc load */
/*
* if (index >= array->map.max_entries)
* goto out;
*/
- PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
- PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
- PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
- PPC_BCC(COND_GE, out);
+ EMIT(PPC_RAW_LWZ(bpf_to_ppc(TMP_REG_1), b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
+ EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
+ EMIT(PPC_RAW_CMPLW(b2p_index, bpf_to_ppc(TMP_REG_1)));
+ PPC_BCC_SHORT(COND_GE, out);
/*
- * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
+ * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
* goto out;
*/
- PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
- PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
- PPC_BCC(COND_GT, out);
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
+ EMIT(PPC_RAW_CMPLWI(bpf_to_ppc(TMP_REG_1), MAX_TAIL_CALL_CNT));
+ PPC_BCC_SHORT(COND_GE, out);
/*
* tail_call_cnt++;
*/
- PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
- PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), 1));
+ EMIT(PPC_RAW_STD(bpf_to_ppc(TMP_REG_1), _R1, bpf_jit_stack_tailcallcnt(ctx)));
/* prog = array->ptrs[index]; */
- PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
- PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
- PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
+ EMIT(PPC_RAW_MULI(bpf_to_ppc(TMP_REG_1), b2p_index, 8));
+ EMIT(PPC_RAW_ADD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), b2p_bpf_array));
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_array, ptrs)));
/*
* if (prog == NULL)
* goto out;
*/
- PPC_CMPLDI(b2p[TMP_REG_1], 0);
- PPC_BCC(COND_EQ, out);
+ EMIT(PPC_RAW_CMPLDI(bpf_to_ppc(TMP_REG_1), 0));
+ PPC_BCC_SHORT(COND_EQ, out);
/* goto *(prog->bpf_func + prologue_size); */
- PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
-#ifdef PPC64_ELF_ABI_v1
- /* skip past the function descriptor */
- PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
- FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
-#else
- PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
-#endif
- PPC_MTCTR(b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_LD(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1), offsetof(struct bpf_prog, bpf_func)));
+ EMIT(PPC_RAW_ADDI(bpf_to_ppc(TMP_REG_1), bpf_to_ppc(TMP_REG_1),
+ FUNCTION_DESCR_SIZE + bpf_tailcall_prologue_size));
+ EMIT(PPC_RAW_MTCTR(bpf_to_ppc(TMP_REG_1)));
/* tear down stack, restore NVRs, ... */
bpf_jit_emit_common_epilogue(image, ctx);
- PPC_BCTR();
+ EMIT(PPC_RAW_BCTR());
+
/* out: */
+ return 0;
}
+/*
+ * We spill into the redzone always, even if the bpf program has its own stackframe.
+ * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
+ */
+void bpf_stf_barrier(void);
+
+asm (
+" .global bpf_stf_barrier ;"
+" bpf_stf_barrier: ;"
+" std 21,-64(1) ;"
+" std 22,-56(1) ;"
+" sync ;"
+" ld 21,-64(1) ;"
+" ld 22,-56(1) ;"
+" ori 31,31,0 ;"
+" .rept 14 ;"
+" b 1f ;"
+" 1: ;"
+" .endr ;"
+" blr ;"
+);
+
/* Assemble the body code between the prologue & epilogue */
-static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx,
- u32 *addrs, bool extra_pass)
+int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
+ u32 *addrs, int pass)
{
+ enum stf_barrier_type stf_barrier = stf_barrier_type_get();
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
int i, ret;
@@ -302,8 +355,12 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
for (i = 0; i < flen; i++) {
u32 code = insn[i].code;
- u32 dst_reg = b2p[insn[i].dst_reg];
- u32 src_reg = b2p[insn[i].src_reg];
+ u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
+ u32 src_reg = bpf_to_ppc(insn[i].src_reg);
+ u32 size = BPF_SIZE(code);
+ u32 tmp1_reg = bpf_to_ppc(TMP_REG_1);
+ u32 tmp2_reg = bpf_to_ppc(TMP_REG_2);
+ u32 save_reg, ret_reg;
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
@@ -311,6 +368,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u64 imm64;
u32 true_cond;
u32 tmp_idx;
+ int j;
/*
* addrs[] maps a BPF bytecode address into a real offset from
@@ -330,9 +388,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
* any issues.
*/
if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
- bpf_set_seen_register(ctx, insn[i].dst_reg);
+ bpf_set_seen_register(ctx, dst_reg);
if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
- bpf_set_seen_register(ctx, insn[i].src_reg);
+ bpf_set_seen_register(ctx, src_reg);
switch (code) {
/*
@@ -340,67 +398,70 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
*/
case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
- PPC_ADD(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
- PPC_SUB(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
- case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
+ if (!imm) {
+ goto bpf_alu32_trunc;
+ } else if (imm >= -32768 && imm < 32768) {
+ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
+ } else {
+ PPC_LI32(tmp1_reg, imm);
+ EMIT(PPC_RAW_ADD(dst_reg, dst_reg, tmp1_reg));
+ }
+ goto bpf_alu32_trunc;
+ case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
- if (BPF_OP(code) == BPF_SUB)
- imm = -imm;
- if (imm) {
- if (imm >= -32768 && imm < 32768)
- PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
- else {
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
- }
+ if (!imm) {
+ goto bpf_alu32_trunc;
+ } else if (imm > -32768 && imm <= 32768) {
+ EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
+ } else {
+ PPC_LI32(tmp1_reg, imm);
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
if (BPF_CLASS(code) == BPF_ALU)
- PPC_MULW(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
else
- PPC_MULD(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
if (imm >= -32768 && imm < 32768)
- PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
+ EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
else {
- PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_LI32(tmp1_reg, imm);
if (BPF_CLASS(code) == BPF_ALU)
- PPC_MULW(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp1_reg));
else
- PPC_MULD(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_MULD(dst_reg, dst_reg, tmp1_reg));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
- PPC_MULW(b2p[TMP_REG_1], src_reg,
- b2p[TMP_REG_1]);
- PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVWU(tmp1_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_MULW(tmp1_reg, src_reg, tmp1_reg));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
- PPC_DIVWU(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg);
- PPC_MULD(b2p[TMP_REG_1], src_reg,
- b2p[TMP_REG_1]);
- PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVDU(tmp1_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_MULD(tmp1_reg, src_reg, tmp1_reg));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
- PPC_DIVDU(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
@@ -408,42 +469,38 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
if (imm == 0)
return -EINVAL;
- else if (imm == 1)
- goto bpf_alu32_trunc;
+ if (imm == 1) {
+ if (BPF_OP(code) == BPF_DIV) {
+ goto bpf_alu32_trunc;
+ } else {
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ break;
+ }
+ }
- PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_LI32(tmp1_reg, imm);
switch (BPF_CLASS(code)) {
case BPF_ALU:
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
- b2p[TMP_REG_1]);
- PPC_MULW(b2p[TMP_REG_1],
- b2p[TMP_REG_1],
- b2p[TMP_REG_2]);
- PPC_SUB(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVWU(tmp2_reg, dst_reg, tmp1_reg));
+ EMIT(PPC_RAW_MULW(tmp1_reg, tmp1_reg, tmp2_reg));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
- PPC_DIVWU(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, tmp1_reg));
break;
case BPF_ALU64:
if (BPF_OP(code) == BPF_MOD) {
- PPC_DIVDU(b2p[TMP_REG_2], dst_reg,
- b2p[TMP_REG_1]);
- PPC_MULD(b2p[TMP_REG_1],
- b2p[TMP_REG_1],
- b2p[TMP_REG_2]);
- PPC_SUB(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVDU(tmp2_reg, dst_reg, tmp1_reg));
+ EMIT(PPC_RAW_MULD(tmp1_reg, tmp1_reg, tmp2_reg));
+ EMIT(PPC_RAW_SUB(dst_reg, dst_reg, tmp1_reg));
} else
- PPC_DIVDU(dst_reg, dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, tmp1_reg));
break;
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
- PPC_NEG(dst_reg, dst_reg);
+ EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
goto bpf_alu32_trunc;
/*
@@ -451,101 +508,101 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
*/
case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
- PPC_AND(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
if (!IMM_H(imm))
- PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
+ EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
else {
/* Sign-extended */
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ PPC_LI32(tmp1_reg, imm);
+ EMIT(PPC_RAW_AND(dst_reg, dst_reg, tmp1_reg));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
- PPC_OR(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
/* Sign-extended */
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ PPC_LI32(tmp1_reg, imm);
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp1_reg));
} else {
if (IMM_L(imm))
- PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
+ EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
if (IMM_H(imm))
- PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
+ EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
- PPC_XOR(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
/* Sign-extended */
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
+ PPC_LI32(tmp1_reg, imm);
+ EMIT(PPC_RAW_XOR(dst_reg, dst_reg, tmp1_reg));
} else {
if (IMM_L(imm))
- PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
+ EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
if (IMM_H(imm))
- PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
+ EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
}
goto bpf_alu32_trunc;
case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
/* slw clears top 32 bits */
- PPC_SLW(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
/* skip zero extension move, but set address map. */
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
- PPC_SLD(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
/* with imm 0, we still need to clear top 32 bits */
- PPC_SLWI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
if (imm != 0)
- PPC_SLDI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
break;
case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
- PPC_SRW(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
- PPC_SRD(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
- PPC_SRWI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
if (imm != 0)
- PPC_SRDI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
break;
case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
- PPC_SRAW(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
- PPC_SRAD(dst_reg, dst_reg, src_reg);
+ EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
- PPC_SRAWI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
goto bpf_alu32_trunc;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
if (imm != 0)
- PPC_SRADI(dst_reg, dst_reg, imm);
+ EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
break;
/*
@@ -555,10 +612,10 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
if (imm == 1) {
/* special mov32 for zext */
- PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
break;
}
- PPC_MR(dst_reg, src_reg);
+ EMIT(PPC_RAW_MR(dst_reg, src_reg));
goto bpf_alu32_trunc;
case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
@@ -572,7 +629,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
bpf_alu32_trunc:
/* Truncate to 32-bits */
if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
- PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
+ EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
break;
/*
@@ -590,11 +647,11 @@ bpf_alu32_trunc:
switch (imm) {
case 16:
/* Rotate 8 bits left & mask with 0x0000ff00 */
- PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
+ EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 16, 23));
/* Rotate 8 bits right & insert LSB to reg */
- PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
+ EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 24, 31));
/* Move result back to dst_reg */
- PPC_MR(dst_reg, b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
break;
case 32:
/*
@@ -602,25 +659,29 @@ bpf_alu32_trunc:
* 2 bytes are already in their final position
* -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
*/
- PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
+ EMIT(PPC_RAW_RLWINM(tmp1_reg, dst_reg, 8, 0, 31));
/* Rotate 24 bits and insert byte 1 */
- PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
+ EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 0, 7));
/* Rotate 24 bits and insert byte 3 */
- PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
- PPC_MR(dst_reg, b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_RLWIMI(tmp1_reg, dst_reg, 24, 16, 23));
+ EMIT(PPC_RAW_MR(dst_reg, tmp1_reg));
break;
case 64:
- /*
- * Way easier and faster(?) to store the value
- * into stack and then use ldbrx
- *
- * ctx->seen will be reliable in pass2, but
- * the instructions generated will remain the
- * same across all passes
- */
- PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
- PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
- PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
+ /* Store the value to stack and then use byte-reverse loads */
+ EMIT(PPC_RAW_STD(dst_reg, _R1, bpf_jit_stack_local(ctx)));
+ EMIT(PPC_RAW_ADDI(tmp1_reg, _R1, bpf_jit_stack_local(ctx)));
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ EMIT(PPC_RAW_LDBRX(dst_reg, 0, tmp1_reg));
+ } else {
+ EMIT(PPC_RAW_LWBRX(dst_reg, 0, tmp1_reg));
+ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
+ EMIT(PPC_RAW_LI(tmp2_reg, 4));
+ EMIT(PPC_RAW_LWBRX(tmp2_reg, tmp2_reg, tmp1_reg));
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ EMIT(PPC_RAW_SLDI(tmp2_reg, tmp2_reg, 32));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp2_reg));
+ }
break;
}
break;
@@ -629,14 +690,14 @@ emit_clear:
switch (imm) {
case 16:
/* zero-extend 16 bits into 64 bits */
- PPC_RLDICL(dst_reg, dst_reg, 0, 48);
+ EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
if (insn_is_zext(&insn[i + 1]))
addrs[++i] = ctx->idx * 4;
break;
case 32:
if (!fp->aux->verifier_zext)
/* zero-extend 32 bits into 64 bits */
- PPC_RLDICL(dst_reg, dst_reg, 0, 32);
+ EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
break;
case 64:
/* nop */
@@ -645,66 +706,155 @@ emit_clear:
break;
/*
+ * BPF_ST NOSPEC (speculation barrier)
+ */
+ case BPF_ST | BPF_NOSPEC:
+ if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
+ !security_ftr_enabled(SEC_FTR_STF_BARRIER))
+ break;
+
+ switch (stf_barrier) {
+ case STF_BARRIER_EIEIO:
+ EMIT(PPC_RAW_EIEIO() | 0x02000000);
+ break;
+ case STF_BARRIER_SYNC_ORI:
+ EMIT(PPC_RAW_SYNC());
+ EMIT(PPC_RAW_LD(tmp1_reg, _R13, 0));
+ EMIT(PPC_RAW_ORI(_R31, _R31, 0));
+ break;
+ case STF_BARRIER_FALLBACK:
+ ctx->seen |= SEEN_FUNC;
+ PPC_LI64(_R12, dereference_kernel_function_descriptor(bpf_stf_barrier));
+ EMIT(PPC_RAW_MTCTR(_R12));
+ EMIT(PPC_RAW_BCTRL());
+ break;
+ case STF_BARRIER_NONE:
+ break;
+ }
+ break;
+
+ /*
* BPF_ST(X)
*/
case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
- PPC_LI(b2p[TMP_REG_1], imm);
- src_reg = b2p[TMP_REG_1];
+ EMIT(PPC_RAW_LI(tmp1_reg, imm));
+ src_reg = tmp1_reg;
}
- PPC_STB(src_reg, dst_reg, off);
+ EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
- PPC_LI(b2p[TMP_REG_1], imm);
- src_reg = b2p[TMP_REG_1];
+ EMIT(PPC_RAW_LI(tmp1_reg, imm));
+ src_reg = tmp1_reg;
}
- PPC_STH(src_reg, dst_reg, off);
+ EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
- PPC_LI32(b2p[TMP_REG_1], imm);
- src_reg = b2p[TMP_REG_1];
+ PPC_LI32(tmp1_reg, imm);
+ src_reg = tmp1_reg;
}
- PPC_STW(src_reg, dst_reg, off);
+ EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
break;
case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
if (BPF_CLASS(code) == BPF_ST) {
- PPC_LI32(b2p[TMP_REG_1], imm);
- src_reg = b2p[TMP_REG_1];
+ PPC_LI32(tmp1_reg, imm);
+ src_reg = tmp1_reg;
+ }
+ if (off % 4) {
+ EMIT(PPC_RAW_LI(tmp2_reg, off));
+ EMIT(PPC_RAW_STDX(src_reg, dst_reg, tmp2_reg));
+ } else {
+ EMIT(PPC_RAW_STD(src_reg, dst_reg, off));
}
- PPC_BPF_STL(src_reg, dst_reg, off);
break;
/*
- * BPF_STX XADD (atomic_add)
+ * BPF_STX ATOMIC (atomic ops)
*/
- /* *(u32 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_W:
- /* Get EA into TMP_REG_1 */
- PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
+ case BPF_STX | BPF_ATOMIC | BPF_W:
+ case BPF_STX | BPF_ATOMIC | BPF_DW:
+ save_reg = tmp2_reg;
+ ret_reg = src_reg;
+
+ /* Get offset into TMP_REG_1 */
+ EMIT(PPC_RAW_LI(tmp1_reg, off));
tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
- PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
- /* add value from src_reg into this */
- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
- /* store result back */
- PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
+ if (size == BPF_DW)
+ EMIT(PPC_RAW_LDARX(tmp2_reg, tmp1_reg, dst_reg, 0));
+ else
+ EMIT(PPC_RAW_LWARX(tmp2_reg, tmp1_reg, dst_reg, 0));
+
+ /* Save old value in _R0 */
+ if (imm & BPF_FETCH)
+ EMIT(PPC_RAW_MR(_R0, tmp2_reg));
+
+ switch (imm) {
+ case BPF_ADD:
+ case BPF_ADD | BPF_FETCH:
+ EMIT(PPC_RAW_ADD(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_AND:
+ case BPF_AND | BPF_FETCH:
+ EMIT(PPC_RAW_AND(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_OR:
+ case BPF_OR | BPF_FETCH:
+ EMIT(PPC_RAW_OR(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_XOR:
+ case BPF_XOR | BPF_FETCH:
+ EMIT(PPC_RAW_XOR(tmp2_reg, tmp2_reg, src_reg));
+ break;
+ case BPF_CMPXCHG:
+ /*
+ * Return old value in BPF_REG_0 for BPF_CMPXCHG &
+ * in src_reg for other cases.
+ */
+ ret_reg = bpf_to_ppc(BPF_REG_0);
+
+ /* Compare with old value in BPF_R0 */
+ if (size == BPF_DW)
+ EMIT(PPC_RAW_CMPD(bpf_to_ppc(BPF_REG_0), tmp2_reg));
+ else
+ EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), tmp2_reg));
+ /* Don't set if different from old value */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
+ fallthrough;
+ case BPF_XCHG:
+ save_reg = src_reg;
+ break;
+ default:
+ pr_err_ratelimited(
+ "eBPF filter atomic op code %02x (@%d) unsupported\n",
+ code, i);
+ return -EOPNOTSUPP;
+ }
+
+ /* store new value */
+ if (size == BPF_DW)
+ EMIT(PPC_RAW_STDCX(save_reg, tmp1_reg, dst_reg));
+ else
+ EMIT(PPC_RAW_STWCX(save_reg, tmp1_reg, dst_reg));
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx);
- break;
- /* *(u64 *)(dst + off) += src */
- case BPF_STX | BPF_XADD | BPF_DW:
- PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
- tmp_idx = ctx->idx * 4;
- PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
- PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
- PPC_BCC_SHORT(COND_NE, tmp_idx);
+
+ if (imm & BPF_FETCH) {
+ EMIT(PPC_RAW_MR(ret_reg, _R0));
+ /*
+ * Skip unnecessary zero-extension for 32-bit cmpxchg.
+ * For context, see commit 39491867ace5.
+ */
+ if (size != BPF_DW && imm == BPF_CMPXCHG &&
+ insn_is_zext(&insn[i + 1]))
+ addrs[++i] = ctx->idx * 4;
+ }
break;
/*
@@ -712,25 +862,70 @@ emit_clear:
*/
/* dst = *(u8 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
- PPC_LBZ(dst_reg, src_reg, off);
- if (insn_is_zext(&insn[i + 1]))
- addrs[++i] = ctx->idx * 4;
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_B:
/* dst = *(u16 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_H:
- PPC_LHZ(dst_reg, src_reg, off);
- if (insn_is_zext(&insn[i + 1]))
- addrs[++i] = ctx->idx * 4;
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_H:
/* dst = *(u32 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_W:
- PPC_LWZ(dst_reg, src_reg, off);
- if (insn_is_zext(&insn[i + 1]))
- addrs[++i] = ctx->idx * 4;
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_W:
/* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_DW:
- PPC_BPF_LL(dst_reg, src_reg, off);
+ case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ /*
+ * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
+ * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
+ * load only if addr is kernel address (see is_kernel_addr()), otherwise
+ * set dst_reg=0 and move on.
+ */
+ if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ EMIT(PPC_RAW_ADDI(tmp1_reg, src_reg, off));
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
+ PPC_LI64(tmp2_reg, 0x8000000000000000ul);
+ else /* BOOK3S_64 */
+ PPC_LI64(tmp2_reg, PAGE_OFFSET);
+ EMIT(PPC_RAW_CMPLD(tmp1_reg, tmp2_reg));
+ PPC_BCC_SHORT(COND_GT, (ctx->idx + 3) * 4);
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ /*
+ * Check if 'off' is word aligned for BPF_DW, because
+ * we might generate two instructions.
+ */
+ if (BPF_SIZE(code) == BPF_DW && (off & 3))
+ PPC_JMP((ctx->idx + 3) * 4);
+ else
+ PPC_JMP((ctx->idx + 2) * 4);
+ }
+
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ case BPF_DW:
+ if (off % 4) {
+ EMIT(PPC_RAW_LI(tmp1_reg, off));
+ EMIT(PPC_RAW_LDX(dst_reg, src_reg, tmp1_reg));
+ } else {
+ EMIT(PPC_RAW_LD(dst_reg, src_reg, off));
+ }
+ break;
+ }
+
+ if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
+ addrs[++i] = ctx->idx * 4;
+
+ if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
+ 4, dst_reg);
+ if (ret)
+ return ret;
+ }
break;
/*
@@ -740,9 +935,13 @@ emit_clear:
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
imm64 = ((u64)(u32) insn[i].imm) |
(((u64)(u32) insn[i+1].imm) << 32);
+ tmp_idx = ctx->idx;
+ PPC_LI64(dst_reg, imm64);
+ /* padding to allow full 5 instructions for later patching */
+ for (j = ctx->idx - tmp_idx; j < 5; j++)
+ EMIT(PPC_RAW_NOP());
/* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4;
- PPC_LI64(dst_reg, imm64);
break;
/*
@@ -754,8 +953,11 @@ emit_clear:
* the epilogue. If we _are_ the last instruction,
* we'll just fall through to the epilogue.
*/
- if (i != flen - 1)
- PPC_JMP(exit_addr);
+ if (i != flen - 1) {
+ ret = bpf_jit_emit_exit_insn(image, ctx, tmp1_reg, exit_addr);
+ if (ret)
+ return ret;
+ }
/* else fall through to the epilogue */
break;
@@ -765,17 +967,21 @@ emit_clear:
case BPF_JMP | BPF_CALL:
ctx->seen |= SEEN_FUNC;
- ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
+ ret = bpf_jit_get_func_addr(fp, &insn[i], false,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
if (func_addr_fixed)
- bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
else
- bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+ ret = bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+
+ if (ret)
+ return ret;
+
/* move return value from r3 to BPF_REG_0 */
- PPC_MR(b2p[BPF_REG_0], 3);
+ EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R3));
break;
/*
@@ -860,9 +1066,9 @@ cond_branch:
case BPF_JMP32 | BPF_JNE | BPF_X:
/* unsigned comparison */
if (BPF_CLASS(code) == BPF_JMP32)
- PPC_CMPLW(dst_reg, src_reg);
+ EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
else
- PPC_CMPLD(dst_reg, src_reg);
+ EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
break;
case BPF_JMP | BPF_JSGT | BPF_X:
case BPF_JMP | BPF_JSLT | BPF_X:
@@ -874,21 +1080,17 @@ cond_branch:
case BPF_JMP32 | BPF_JSLE | BPF_X:
/* signed comparison */
if (BPF_CLASS(code) == BPF_JMP32)
- PPC_CMPW(dst_reg, src_reg);
+ EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
else
- PPC_CMPD(dst_reg, src_reg);
+ EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
break;
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X:
if (BPF_CLASS(code) == BPF_JMP) {
- PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
- src_reg);
+ EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg, src_reg));
} else {
- int tmp_reg = b2p[TMP_REG_1];
-
- PPC_AND(tmp_reg, dst_reg, src_reg);
- PPC_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
- 31);
+ EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, src_reg));
+ EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg, 0, 0, 31));
}
break;
case BPF_JMP | BPF_JNE | BPF_K:
@@ -912,19 +1114,17 @@ cond_branch:
*/
if (imm >= 0 && imm < 32768) {
if (is_jmp32)
- PPC_CMPLWI(dst_reg, imm);
+ EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
else
- PPC_CMPLDI(dst_reg, imm);
+ EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
} else {
/* sign-extending load */
- PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_LI32(tmp1_reg, imm);
/* ... but unsigned comparison */
if (is_jmp32)
- PPC_CMPLW(dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_CMPLW(dst_reg, tmp1_reg));
else
- PPC_CMPLD(dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_CMPLD(dst_reg, tmp1_reg));
}
break;
}
@@ -945,17 +1145,15 @@ cond_branch:
*/
if (imm >= -32768 && imm < 32768) {
if (is_jmp32)
- PPC_CMPWI(dst_reg, imm);
+ EMIT(PPC_RAW_CMPWI(dst_reg, imm));
else
- PPC_CMPDI(dst_reg, imm);
+ EMIT(PPC_RAW_CMPDI(dst_reg, imm));
} else {
- PPC_LI32(b2p[TMP_REG_1], imm);
+ PPC_LI32(tmp1_reg, imm);
if (is_jmp32)
- PPC_CMPW(dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_CMPW(dst_reg, tmp1_reg));
else
- PPC_CMPD(dst_reg,
- b2p[TMP_REG_1]);
+ EMIT(PPC_RAW_CMPD(dst_reg, tmp1_reg));
}
break;
}
@@ -964,19 +1162,16 @@ cond_branch:
/* andi does not sign-extend the immediate */
if (imm >= 0 && imm < 32768)
/* PPC_ANDI is _only/always_ dot-form */
- PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
+ EMIT(PPC_RAW_ANDI(tmp1_reg, dst_reg, imm));
else {
- int tmp_reg = b2p[TMP_REG_1];
-
- PPC_LI32(tmp_reg, imm);
+ PPC_LI32(tmp1_reg, imm);
if (BPF_CLASS(code) == BPF_JMP) {
- PPC_AND_DOT(tmp_reg, dst_reg,
- tmp_reg);
+ EMIT(PPC_RAW_AND_DOT(tmp1_reg, dst_reg,
+ tmp1_reg));
} else {
- PPC_AND(tmp_reg, dst_reg,
- tmp_reg);
- PPC_RLWINM_DOT(tmp_reg, tmp_reg,
- 0, 0, 31);
+ EMIT(PPC_RAW_AND(tmp1_reg, dst_reg, tmp1_reg));
+ EMIT(PPC_RAW_RLWINM_DOT(tmp1_reg, tmp1_reg,
+ 0, 0, 31));
}
}
break;
@@ -989,7 +1184,9 @@ cond_branch:
*/
case BPF_JMP | BPF_TAIL_CALL:
ctx->seen |= SEEN_TAILCALL;
- bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
+ if (ret < 0)
+ return ret;
break;
default:
@@ -1009,249 +1206,3 @@ cond_branch:
return 0;
}
-
-/* Fix the branch target addresses for subprog calls */
-static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx, u32 *addrs)
-{
- const struct bpf_insn *insn = fp->insnsi;
- bool func_addr_fixed;
- u64 func_addr;
- u32 tmp_idx;
- int i, ret;
-
- for (i = 0; i < fp->len; i++) {
- /*
- * During the extra pass, only the branch target addresses for
- * the subprog calls need to be fixed. All other instructions
- * can left untouched.
- *
- * The JITed image length does not change because we already
- * ensure that the JITed instruction sequence for these calls
- * are of fixed length by padding them with NOPs.
- */
- if (insn[i].code == (BPF_JMP | BPF_CALL) &&
- insn[i].src_reg == BPF_PSEUDO_CALL) {
- ret = bpf_jit_get_func_addr(fp, &insn[i], true,
- &func_addr,
- &func_addr_fixed);
- if (ret < 0)
- return ret;
-
- /*
- * Save ctx->idx as this would currently point to the
- * end of the JITed image and set it to the offset of
- * the instruction sequence corresponding to the
- * subprog call temporarily.
- */
- tmp_idx = ctx->idx;
- ctx->idx = addrs[i] / 4;
- bpf_jit_emit_func_call_rel(image, ctx, func_addr);
-
- /*
- * Restore ctx->idx here. This is safe as the length
- * of the JITed sequence remains unchanged.
- */
- ctx->idx = tmp_idx;
- }
- }
-
- return 0;
-}
-
-struct powerpc64_jit_data {
- struct bpf_binary_header *header;
- u32 *addrs;
- u8 *image;
- u32 proglen;
- struct codegen_context ctx;
-};
-
-bool bpf_jit_needs_zext(void)
-{
- return true;
-}
-
-struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
-{
- u32 proglen;
- u32 alloclen;
- u8 *image = NULL;
- u32 *code_base;
- u32 *addrs;
- struct powerpc64_jit_data *jit_data;
- struct codegen_context cgctx;
- int pass;
- int flen;
- struct bpf_binary_header *bpf_hdr;
- struct bpf_prog *org_fp = fp;
- struct bpf_prog *tmp_fp;
- bool bpf_blinded = false;
- bool extra_pass = false;
-
- if (!fp->jit_requested)
- return org_fp;
-
- tmp_fp = bpf_jit_blind_constants(org_fp);
- if (IS_ERR(tmp_fp))
- return org_fp;
-
- if (tmp_fp != org_fp) {
- bpf_blinded = true;
- fp = tmp_fp;
- }
-
- jit_data = fp->aux->jit_data;
- if (!jit_data) {
- jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
- if (!jit_data) {
- fp = org_fp;
- goto out;
- }
- fp->aux->jit_data = jit_data;
- }
-
- flen = fp->len;
- addrs = jit_data->addrs;
- if (addrs) {
- cgctx = jit_data->ctx;
- image = jit_data->image;
- bpf_hdr = jit_data->header;
- proglen = jit_data->proglen;
- alloclen = proglen + FUNCTION_DESCR_SIZE;
- extra_pass = true;
- goto skip_init_ctx;
- }
-
- addrs = kcalloc(flen + 1, sizeof(*addrs), GFP_KERNEL);
- if (addrs == NULL) {
- fp = org_fp;
- goto out_addrs;
- }
-
- memset(&cgctx, 0, sizeof(struct codegen_context));
-
- /* Make sure that the stack is quadword aligned. */
- cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
-
- /* Scouting faux-generate pass 0 */
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
- /* We hit something illegal or unsupported. */
- fp = org_fp;
- goto out_addrs;
- }
-
- /*
- * If we have seen a tail call, we need a second pass.
- * This is because bpf_jit_emit_common_epilogue() is called
- * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
- */
- if (cgctx.seen & SEEN_TAILCALL) {
- cgctx.idx = 0;
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
- fp = org_fp;
- goto out_addrs;
- }
- }
-
- /*
- * Pretend to build prologue, given the features we've seen. This will
- * update ctgtx.idx as it pretends to output instructions, then we can
- * calculate total size from idx.
- */
- bpf_jit_build_prologue(0, &cgctx);
- bpf_jit_build_epilogue(0, &cgctx);
-
- proglen = cgctx.idx * 4;
- alloclen = proglen + FUNCTION_DESCR_SIZE;
-
- bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
- bpf_jit_fill_ill_insns);
- if (!bpf_hdr) {
- fp = org_fp;
- goto out_addrs;
- }
-
-skip_init_ctx:
- code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
-
- if (extra_pass) {
- /*
- * Do not touch the prologue and epilogue as they will remain
- * unchanged. Only fix the branch target address for subprog
- * calls in the body.
- *
- * This does not change the offsets and lengths of the subprog
- * call instruction sequences and hence, the size of the JITed
- * image as well.
- */
- bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
-
- /* There is no need to perform the usual passes. */
- goto skip_codegen_passes;
- }
-
- /* Code generation passes 1-2 */
- for (pass = 1; pass < 3; pass++) {
- /* Now build the prologue, body code & epilogue for real. */
- cgctx.idx = 0;
- bpf_jit_build_prologue(code_base, &cgctx);
- bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass);
- bpf_jit_build_epilogue(code_base, &cgctx);
-
- if (bpf_jit_enable > 1)
- pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
- proglen - (cgctx.idx * 4), cgctx.seen);
- }
-
-skip_codegen_passes:
- if (bpf_jit_enable > 1)
- /*
- * Note that we output the base address of the code_base
- * rather than image, since opcodes are in code_base.
- */
- bpf_jit_dump(flen, proglen, pass, code_base);
-
-#ifdef PPC64_ELF_ABI_v1
- /* Function descriptor nastiness: Address + TOC */
- ((u64 *)image)[0] = (u64)code_base;
- ((u64 *)image)[1] = local_paca->kernel_toc;
-#endif
-
- fp->bpf_func = (void *)image;
- fp->jited = 1;
- fp->jited_len = alloclen;
-
- bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
- if (!fp->is_func || extra_pass) {
- bpf_prog_fill_jited_linfo(fp, addrs);
-out_addrs:
- kfree(addrs);
- kfree(jit_data);
- fp->aux->jit_data = NULL;
- } else {
- jit_data->addrs = addrs;
- jit_data->ctx = cgctx;
- jit_data->proglen = proglen;
- jit_data->image = image;
- jit_data->header = bpf_hdr;
- }
-
-out:
- if (bpf_blinded)
- bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
-
- return fp;
-}
-
-/* Overriding bpf_jit_free() as we don't set images read-only. */
-void bpf_jit_free(struct bpf_prog *fp)
-{
- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
- struct bpf_binary_header *bpf_hdr = (void *)addr;
-
- if (fp->jited)
- bpf_jit_binary_free(bpf_hdr);
-
- bpf_prog_unlock_free(fp);
-}
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
deleted file mode 100644
index bb2d94c8cbe6..000000000000
--- a/arch/powerpc/oprofile/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-
-obj-$(CONFIG_OPROFILE) += oprofile.o
-
-DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \
- oprof.o cpu_buffer.o buffer_sync.o \
- event_buffer.o oprofile_files.o \
- oprofilefs.o oprofile_stats.o \
- timer_int.o )
-
-oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
-oprofile-$(CONFIG_OPROFILE_CELL) += op_model_cell.o \
- cell/spu_profiler.o cell/vma_map.o \
- cell/spu_task_sync.o
-oprofile-$(CONFIG_PPC_BOOK3S_64) += op_model_power4.o op_model_pa6t.o
-oprofile-$(CONFIG_FSL_EMB_PERFMON) += op_model_fsl_emb.o
-oprofile-$(CONFIG_PPC_BOOK3S_32) += op_model_7450.o
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
deleted file mode 100644
index 6f347fa29f41..000000000000
--- a/arch/powerpc/oprofile/backtrace.c
+++ /dev/null
@@ -1,118 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/**
- * Copyright (C) 2005 Brian Rogan <bcr6@cornell.edu>, IBM
- *
-**/
-
-#include <linux/time.h>
-#include <linux/oprofile.h>
-#include <linux/sched.h>
-#include <asm/processor.h>
-#include <linux/uaccess.h>
-#include <linux/compat.h>
-#include <asm/oprofile_impl.h>
-
-#define STACK_SP(STACK) *(STACK)
-
-#define STACK_LR64(STACK) *((unsigned long *)(STACK) + 2)
-#define STACK_LR32(STACK) *((unsigned int *)(STACK) + 1)
-
-#ifdef CONFIG_PPC64
-#define STACK_LR(STACK) STACK_LR64(STACK)
-#else
-#define STACK_LR(STACK) STACK_LR32(STACK)
-#endif
-
-static unsigned int user_getsp32(unsigned int sp, int is_first)
-{
- unsigned int stack_frame[2];
- void __user *p = compat_ptr(sp);
-
- /*
- * The most likely reason for this is that we returned -EFAULT,
- * which means that we've done all that we can do from
- * interrupt context.
- */
- if (probe_user_read(stack_frame, (void __user *)p, sizeof(stack_frame)))
- return 0;
-
- if (!is_first)
- oprofile_add_trace(STACK_LR32(stack_frame));
-
- /*
- * We do not enforce increasing stack addresses here because
- * we may transition to a different stack, eg a signal handler.
- */
- return STACK_SP(stack_frame);
-}
-
-#ifdef CONFIG_PPC64
-static unsigned long user_getsp64(unsigned long sp, int is_first)
-{
- unsigned long stack_frame[3];
-
- if (probe_user_read(stack_frame, (void __user *)sp, sizeof(stack_frame)))
- return 0;
-
- if (!is_first)
- oprofile_add_trace(STACK_LR64(stack_frame));
-
- return STACK_SP(stack_frame);
-}
-#endif
-
-static unsigned long kernel_getsp(unsigned long sp, int is_first)
-{
- unsigned long *stack_frame = (unsigned long *)sp;
-
- if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
- return 0;
-
- if (!is_first)
- oprofile_add_trace(STACK_LR(stack_frame));
-
- /*
- * We do not enforce increasing stack addresses here because
- * we might be transitioning from an interrupt stack to a kernel
- * stack. validate_sp() is designed to understand this, so just
- * use it.
- */
- return STACK_SP(stack_frame);
-}
-
-void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
-{
- unsigned long sp = regs->gpr[1];
- int first_frame = 1;
-
- /* We ditch the top stackframe so need to loop through an extra time */
- depth += 1;
-
- if (!user_mode(regs)) {
- while (depth--) {
- sp = kernel_getsp(sp, first_frame);
- if (!sp)
- break;
- first_frame = 0;
- }
- } else {
-#ifdef CONFIG_PPC64
- if (!is_32bit_task()) {
- while (depth--) {
- sp = user_getsp64(sp, first_frame);
- if (!sp)
- break;
- first_frame = 0;
- }
- return;
- }
-#endif
-
- while (depth--) {
- sp = user_getsp32(sp, first_frame);
- if (!sp)
- break;
- first_frame = 0;
- }
- }
-}
diff --git a/arch/powerpc/oprofile/cell/pr_util.h b/arch/powerpc/oprofile/cell/pr_util.h
deleted file mode 100644
index e198efa9113a..000000000000
--- a/arch/powerpc/oprofile/cell/pr_util.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
- /*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: Maynard Johnson <maynardj@us.ibm.com>
- */
-
-#ifndef PR_UTIL_H
-#define PR_UTIL_H
-
-#include <linux/cpumask.h>
-#include <linux/oprofile.h>
-#include <asm/cell-pmu.h>
-#include <asm/cell-regs.h>
-#include <asm/spu.h>
-
-/* Defines used for sync_start */
-#define SKIP_GENERIC_SYNC 0
-#define SYNC_START_ERROR -1
-#define DO_GENERIC_SYNC 1
-#define SPUS_PER_NODE 8
-#define DEFAULT_TIMER_EXPIRE (HZ / 10)
-
-extern struct delayed_work spu_work;
-extern int spu_prof_running;
-
-#define TRACE_ARRAY_SIZE 1024
-
-extern spinlock_t oprof_spu_smpl_arry_lck;
-
-struct spu_overlay_info { /* map of sections within an SPU overlay */
- unsigned int vma; /* SPU virtual memory address from elf */
- unsigned int size; /* size of section from elf */
- unsigned int offset; /* offset of section into elf file */
- unsigned int buf;
-};
-
-struct vma_to_fileoffset_map { /* map of sections within an SPU program */
- struct vma_to_fileoffset_map *next; /* list pointer */
- unsigned int vma; /* SPU virtual memory address from elf */
- unsigned int size; /* size of section from elf */
- unsigned int offset; /* offset of section into elf file */
- unsigned int guard_ptr;
- unsigned int guard_val;
- /*
- * The guard pointer is an entry in the _ovly_buf_table,
- * computed using ovly.buf as the index into the table. Since
- * ovly.buf values begin at '1' to reference the first (or 0th)
- * entry in the _ovly_buf_table, the computation subtracts 1
- * from ovly.buf.
- * The guard value is stored in the _ovly_buf_table entry and
- * is an index (starting at 1) back to the _ovly_table entry
- * that is pointing at this _ovly_buf_table entry. So, for
- * example, for an overlay scenario with one overlay segment
- * and two overlay sections:
- * - Section 1 points to the first entry of the
- * _ovly_buf_table, which contains a guard value
- * of '1', referencing the first (index=0) entry of
- * _ovly_table.
- * - Section 2 points to the second entry of the
- * _ovly_buf_table, which contains a guard value
- * of '2', referencing the second (index=1) entry of
- * _ovly_table.
- */
-
-};
-
-struct spu_buffer {
- int last_guard_val;
- int ctx_sw_seen;
- unsigned long *buff;
- unsigned int head, tail;
-};
-
-
-/* The three functions below are for maintaining and accessing
- * the vma-to-fileoffset map.
- */
-struct vma_to_fileoffset_map *create_vma_map(const struct spu *spu,
- unsigned long objectid);
-unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map,
- unsigned int vma, const struct spu *aSpu,
- int *grd_val);
-void vma_map_free(struct vma_to_fileoffset_map *map);
-
-/*
- * Entry point for SPU profiling.
- * cycles_reset is the SPU_CYCLES count value specified by the user.
- */
-int start_spu_profiling_cycles(unsigned int cycles_reset);
-void start_spu_profiling_events(void);
-
-void stop_spu_profiling_cycles(void);
-void stop_spu_profiling_events(void);
-
-/* add the necessary profiling hooks */
-int spu_sync_start(void);
-
-/* remove the hooks */
-int spu_sync_stop(void);
-
-/* Record SPU program counter samples to the oprofile event buffer. */
-void spu_sync_buffer(int spu_num, unsigned int *samples,
- int num_samples);
-
-void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset);
-
-#endif /* PR_UTIL_H */
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
deleted file mode 100644
index cdf883445a9f..000000000000
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ /dev/null
@@ -1,248 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Authors: Maynard Johnson <maynardj@us.ibm.com>
- * Carl Love <carll@us.ibm.com>
- */
-
-#include <linux/hrtimer.h>
-#include <linux/smp.h>
-#include <linux/slab.h>
-#include <asm/cell-pmu.h>
-#include <asm/time.h>
-#include "pr_util.h"
-
-#define SCALE_SHIFT 14
-
-static u32 *samples;
-
-/* spu_prof_running is a flag used to indicate if spu profiling is enabled
- * or not. It is set by the routines start_spu_profiling_cycles() and
- * start_spu_profiling_events(). The flag is cleared by the routines
- * stop_spu_profiling_cycles() and stop_spu_profiling_events(). These
- * routines are called via global_start() and global_stop() which are called in
- * op_powerpc_start() and op_powerpc_stop(). These routines are called once
- * per system as a result of the user starting/stopping oprofile. Hence, only
- * one CPU per user at a time will be changing the value of spu_prof_running.
- * In general, OProfile does not protect against multiple users trying to run
- * OProfile at a time.
- */
-int spu_prof_running;
-static unsigned int profiling_interval;
-
-#define NUM_SPU_BITS_TRBUF 16
-#define SPUS_PER_TB_ENTRY 4
-
-#define SPU_PC_MASK 0xFFFF
-
-DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
-static unsigned long oprof_spu_smpl_arry_lck_flags;
-
-void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
-{
- unsigned long ns_per_cyc;
-
- if (!freq_khz)
- freq_khz = ppc_proc_freq/1000;
-
- /* To calculate a timeout in nanoseconds, the basic
- * formula is ns = cycles_reset * (NSEC_PER_SEC / cpu frequency).
- * To avoid floating point math, we use the scale math
- * technique as described in linux/jiffies.h. We use
- * a scale factor of SCALE_SHIFT, which provides 4 decimal places
- * of precision. This is close enough for the purpose at hand.
- *
- * The value of the timeout should be small enough that the hw
- * trace buffer will not get more than about 1/3 full for the
- * maximum user specified (the LFSR value) hw sampling frequency.
- * This is to ensure the trace buffer will never fill even if the
- * kernel thread scheduling varies under a heavy system load.
- */
-
- ns_per_cyc = (USEC_PER_SEC << SCALE_SHIFT)/freq_khz;
- profiling_interval = (ns_per_cyc * cycles_reset) >> SCALE_SHIFT;
-
-}
-
-/*
- * Extract SPU PC from trace buffer entry
- */
-static void spu_pc_extract(int cpu, int entry)
-{
- /* the trace buffer is 128 bits */
- u64 trace_buffer[2];
- u64 spu_mask;
- int spu;
-
- spu_mask = SPU_PC_MASK;
-
- /* Each SPU PC is 16 bits; hence, four spus in each of
- * the two 64-bit buffer entries that make up the
- * 128-bit trace_buffer entry. Process two 64-bit values
- * simultaneously.
- * trace[0] SPU PC contents are: 0 1 2 3
- * trace[1] SPU PC contents are: 4 5 6 7
- */
-
- cbe_read_trace_buffer(cpu, trace_buffer);
-
- for (spu = SPUS_PER_TB_ENTRY-1; spu >= 0; spu--) {
- /* spu PC trace entry is upper 16 bits of the
- * 18 bit SPU program counter
- */
- samples[spu * TRACE_ARRAY_SIZE + entry]
- = (spu_mask & trace_buffer[0]) << 2;
- samples[(spu + SPUS_PER_TB_ENTRY) * TRACE_ARRAY_SIZE + entry]
- = (spu_mask & trace_buffer[1]) << 2;
-
- trace_buffer[0] = trace_buffer[0] >> NUM_SPU_BITS_TRBUF;
- trace_buffer[1] = trace_buffer[1] >> NUM_SPU_BITS_TRBUF;
- }
-}
-
-static int cell_spu_pc_collection(int cpu)
-{
- u32 trace_addr;
- int entry;
-
- /* process the collected SPU PC for the node */
-
- entry = 0;
-
- trace_addr = cbe_read_pm(cpu, trace_address);
- while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
- /* there is data in the trace buffer to process */
- spu_pc_extract(cpu, entry);
-
- entry++;
-
- if (entry >= TRACE_ARRAY_SIZE)
- /* spu_samples is full */
- break;
-
- trace_addr = cbe_read_pm(cpu, trace_address);
- }
-
- return entry;
-}
-
-
-static enum hrtimer_restart profile_spus(struct hrtimer *timer)
-{
- ktime_t kt;
- int cpu, node, k, num_samples, spu_num;
-
- if (!spu_prof_running)
- goto stop;
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- node = cbe_cpu_to_node(cpu);
-
- /* There should only be one kernel thread at a time processing
- * the samples. In the very unlikely case that the processing
- * is taking a very long time and multiple kernel threads are
- * started to process the samples. Make sure only one kernel
- * thread is working on the samples array at a time. The
- * sample array must be loaded and then processed for a given
- * cpu. The sample array is not per cpu.
- */
- spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
- oprof_spu_smpl_arry_lck_flags);
- num_samples = cell_spu_pc_collection(cpu);
-
- if (num_samples == 0) {
- spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
- oprof_spu_smpl_arry_lck_flags);
- continue;
- }
-
- for (k = 0; k < SPUS_PER_NODE; k++) {
- spu_num = k + (node * SPUS_PER_NODE);
- spu_sync_buffer(spu_num,
- samples + (k * TRACE_ARRAY_SIZE),
- num_samples);
- }
-
- spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
- oprof_spu_smpl_arry_lck_flags);
-
- }
- smp_wmb(); /* insure spu event buffer updates are written */
- /* don't want events intermingled... */
-
- kt = profiling_interval;
- if (!spu_prof_running)
- goto stop;
- hrtimer_forward(timer, timer->base->get_time(), kt);
- return HRTIMER_RESTART;
-
- stop:
- printk(KERN_INFO "SPU_PROF: spu-prof timer ending\n");
- return HRTIMER_NORESTART;
-}
-
-static struct hrtimer timer;
-/*
- * Entry point for SPU cycle profiling.
- * NOTE: SPU profiling is done system-wide, not per-CPU.
- *
- * cycles_reset is the count value specified by the user when
- * setting up OProfile to count SPU_CYCLES.
- */
-int start_spu_profiling_cycles(unsigned int cycles_reset)
-{
- ktime_t kt;
-
- pr_debug("timer resolution: %lu\n", TICK_NSEC);
- kt = profiling_interval;
- hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- hrtimer_set_expires(&timer, kt);
- timer.function = profile_spus;
-
- /* Allocate arrays for collecting SPU PC samples */
- samples = kcalloc(SPUS_PER_NODE * TRACE_ARRAY_SIZE, sizeof(u32),
- GFP_KERNEL);
-
- if (!samples)
- return -ENOMEM;
-
- spu_prof_running = 1;
- hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
- schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
-
- return 0;
-}
-
-/*
- * Entry point for SPU event profiling.
- * NOTE: SPU profiling is done system-wide, not per-CPU.
- *
- * cycles_reset is the count value specified by the user when
- * setting up OProfile to count SPU_CYCLES.
- */
-void start_spu_profiling_events(void)
-{
- spu_prof_running = 1;
- schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
-
- return;
-}
-
-void stop_spu_profiling_cycles(void)
-{
- spu_prof_running = 0;
- hrtimer_cancel(&timer);
- kfree(samples);
- pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
-}
-
-void stop_spu_profiling_events(void)
-{
- spu_prof_running = 0;
-}
diff --git a/arch/powerpc/oprofile/cell/spu_task_sync.c b/arch/powerpc/oprofile/cell/spu_task_sync.c
deleted file mode 100644
index 0caec3d8d436..000000000000
--- a/arch/powerpc/oprofile/cell/spu_task_sync.c
+++ /dev/null
@@ -1,657 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: Maynard Johnson <maynardj@us.ibm.com>
- */
-
-/* The purpose of this file is to handle SPU event task switching
- * and to record SPU context information into the OProfile
- * event buffer.
- *
- * Additionally, the spu_sync_buffer function is provided as a helper
- * for recoding actual SPU program counter samples to the event buffer.
- */
-#include <linux/dcookies.h>
-#include <linux/kref.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/numa.h>
-#include <linux/oprofile.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include "pr_util.h"
-
-#define RELEASE_ALL 9999
-
-static DEFINE_SPINLOCK(buffer_lock);
-static DEFINE_SPINLOCK(cache_lock);
-static int num_spu_nodes;
-static int spu_prof_num_nodes;
-
-struct spu_buffer spu_buff[MAX_NUMNODES * SPUS_PER_NODE];
-struct delayed_work spu_work;
-static unsigned max_spu_buff;
-
-static void spu_buff_add(unsigned long int value, int spu)
-{
- /* spu buff is a circular buffer. Add entries to the
- * head. Head is the index to store the next value.
- * The buffer is full when there is one available entry
- * in the queue, i.e. head and tail can't be equal.
- * That way we can tell the difference between the
- * buffer being full versus empty.
- *
- * ASSUMPTION: the buffer_lock is held when this function
- * is called to lock the buffer, head and tail.
- */
- int full = 1;
-
- if (spu_buff[spu].head >= spu_buff[spu].tail) {
- if ((spu_buff[spu].head - spu_buff[spu].tail)
- < (max_spu_buff - 1))
- full = 0;
-
- } else if (spu_buff[spu].tail > spu_buff[spu].head) {
- if ((spu_buff[spu].tail - spu_buff[spu].head)
- > 1)
- full = 0;
- }
-
- if (!full) {
- spu_buff[spu].buff[spu_buff[spu].head] = value;
- spu_buff[spu].head++;
-
- if (spu_buff[spu].head >= max_spu_buff)
- spu_buff[spu].head = 0;
- } else {
- /* From the user's perspective make the SPU buffer
- * size management/overflow look like we are using
- * per cpu buffers. The user uses the same
- * per cpu parameter to adjust the SPU buffer size.
- * Increment the sample_lost_overflow to inform
- * the user the buffer size needs to be increased.
- */
- oprofile_cpu_buffer_inc_smpl_lost();
- }
-}
-
-/* This function copies the per SPU buffers to the
- * OProfile kernel buffer.
- */
-static void sync_spu_buff(void)
-{
- int spu;
- unsigned long flags;
- int curr_head;
-
- for (spu = 0; spu < num_spu_nodes; spu++) {
- /* In case there was an issue and the buffer didn't
- * get created skip it.
- */
- if (spu_buff[spu].buff == NULL)
- continue;
-
- /* Hold the lock to make sure the head/tail
- * doesn't change while spu_buff_add() is
- * deciding if the buffer is full or not.
- * Being a little paranoid.
- */
- spin_lock_irqsave(&buffer_lock, flags);
- curr_head = spu_buff[spu].head;
- spin_unlock_irqrestore(&buffer_lock, flags);
-
- /* Transfer the current contents to the kernel buffer.
- * data can still be added to the head of the buffer.
- */
- oprofile_put_buff(spu_buff[spu].buff,
- spu_buff[spu].tail,
- curr_head, max_spu_buff);
-
- spin_lock_irqsave(&buffer_lock, flags);
- spu_buff[spu].tail = curr_head;
- spin_unlock_irqrestore(&buffer_lock, flags);
- }
-
-}
-
-static void wq_sync_spu_buff(struct work_struct *work)
-{
- /* move data from spu buffers to kernel buffer */
- sync_spu_buff();
-
- /* only reschedule if profiling is not done */
- if (spu_prof_running)
- schedule_delayed_work(&spu_work, DEFAULT_TIMER_EXPIRE);
-}
-
-/* Container for caching information about an active SPU task. */
-struct cached_info {
- struct vma_to_fileoffset_map *map;
- struct spu *the_spu; /* needed to access pointer to local_store */
- struct kref cache_ref;
-};
-
-static struct cached_info *spu_info[MAX_NUMNODES * 8];
-
-static void destroy_cached_info(struct kref *kref)
-{
- struct cached_info *info;
-
- info = container_of(kref, struct cached_info, cache_ref);
- vma_map_free(info->map);
- kfree(info);
- module_put(THIS_MODULE);
-}
-
-/* Return the cached_info for the passed SPU number.
- * ATTENTION: Callers are responsible for obtaining the
- * cache_lock if needed prior to invoking this function.
- */
-static struct cached_info *get_cached_info(struct spu *the_spu, int spu_num)
-{
- struct kref *ref;
- struct cached_info *ret_info;
-
- if (spu_num >= num_spu_nodes) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Invalid index %d into spu info cache\n",
- __func__, __LINE__, spu_num);
- ret_info = NULL;
- goto out;
- }
- if (!spu_info[spu_num] && the_spu) {
- ref = spu_get_profile_private_kref(the_spu->ctx);
- if (ref) {
- spu_info[spu_num] = container_of(ref, struct cached_info, cache_ref);
- kref_get(&spu_info[spu_num]->cache_ref);
- }
- }
-
- ret_info = spu_info[spu_num];
- out:
- return ret_info;
-}
-
-
-/* Looks for cached info for the passed spu. If not found, the
- * cached info is created for the passed spu.
- * Returns 0 for success; otherwise, -1 for error.
- */
-static int
-prepare_cached_spu_info(struct spu *spu, unsigned long objectId)
-{
- unsigned long flags;
- struct vma_to_fileoffset_map *new_map;
- int retval = 0;
- struct cached_info *info;
-
- /* We won't bother getting cache_lock here since
- * don't do anything with the cached_info that's returned.
- */
- info = get_cached_info(spu, spu->number);
-
- if (info) {
- pr_debug("Found cached SPU info.\n");
- goto out;
- }
-
- /* Create cached_info and set spu_info[spu->number] to point to it.
- * spu->number is a system-wide value, not a per-node value.
- */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: create vma_map failed\n",
- __func__, __LINE__);
- retval = -ENOMEM;
- goto err_alloc;
- }
- new_map = create_vma_map(spu, objectId);
- if (!new_map) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: create vma_map failed\n",
- __func__, __LINE__);
- retval = -ENOMEM;
- goto err_alloc;
- }
-
- pr_debug("Created vma_map\n");
- info->map = new_map;
- info->the_spu = spu;
- kref_init(&info->cache_ref);
- spin_lock_irqsave(&cache_lock, flags);
- spu_info[spu->number] = info;
- /* Increment count before passing off ref to SPUFS. */
- kref_get(&info->cache_ref);
-
- /* We increment the module refcount here since SPUFS is
- * responsible for the final destruction of the cached_info,
- * and it must be able to access the destroy_cached_info()
- * function defined in the OProfile module. We decrement
- * the module refcount in destroy_cached_info.
- */
- try_module_get(THIS_MODULE);
- spu_set_profile_private_kref(spu->ctx, &info->cache_ref,
- destroy_cached_info);
- spin_unlock_irqrestore(&cache_lock, flags);
- goto out;
-
-err_alloc:
- kfree(info);
-out:
- return retval;
-}
-
-/*
- * NOTE: The caller is responsible for locking the
- * cache_lock prior to calling this function.
- */
-static int release_cached_info(int spu_index)
-{
- int index, end;
-
- if (spu_index == RELEASE_ALL) {
- end = num_spu_nodes;
- index = 0;
- } else {
- if (spu_index >= num_spu_nodes) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: "
- "Invalid index %d into spu info cache\n",
- __func__, __LINE__, spu_index);
- goto out;
- }
- end = spu_index + 1;
- index = spu_index;
- }
- for (; index < end; index++) {
- if (spu_info[index]) {
- kref_put(&spu_info[index]->cache_ref,
- destroy_cached_info);
- spu_info[index] = NULL;
- }
- }
-
-out:
- return 0;
-}
-
-/* The source code for fast_get_dcookie was "borrowed"
- * from drivers/oprofile/buffer_sync.c.
- */
-
-/* Optimisation. We can manage without taking the dcookie sem
- * because we cannot reach this code without at least one
- * dcookie user still being registered (namely, the reader
- * of the event buffer).
- */
-static inline unsigned long fast_get_dcookie(const struct path *path)
-{
- unsigned long cookie;
-
- if (path->dentry->d_flags & DCACHE_COOKIE)
- return (unsigned long)path->dentry;
- get_dcookie(path, &cookie);
- return cookie;
-}
-
-/* Look up the dcookie for the task's mm->exe_file,
- * which corresponds loosely to "application name". Also, determine
- * the offset for the SPU ELF object. If computed offset is
- * non-zero, it implies an embedded SPU object; otherwise, it's a
- * separate SPU binary, in which case we retrieve it's dcookie.
- * For the embedded case, we must determine if SPU ELF is embedded
- * in the executable application or another file (i.e., shared lib).
- * If embedded in a shared lib, we must get the dcookie and return
- * that to the caller.
- */
-static unsigned long
-get_exec_dcookie_and_offset(struct spu *spu, unsigned int *offsetp,
- unsigned long *spu_bin_dcookie,
- unsigned long spu_ref)
-{
- unsigned long app_cookie = 0;
- unsigned int my_offset = 0;
- struct vm_area_struct *vma;
- struct file *exe_file;
- struct mm_struct *mm = spu->mm;
-
- if (!mm)
- goto out;
-
- exe_file = get_mm_exe_file(mm);
- if (exe_file) {
- app_cookie = fast_get_dcookie(&exe_file->f_path);
- pr_debug("got dcookie for %pD\n", exe_file);
- fput(exe_file);
- }
-
- down_read(&mm->mmap_sem);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (vma->vm_start > spu_ref || vma->vm_end <= spu_ref)
- continue;
- my_offset = spu_ref - vma->vm_start;
- if (!vma->vm_file)
- goto fail_no_image_cookie;
-
- pr_debug("Found spu ELF at %X(object-id:%lx) for file %pD\n",
- my_offset, spu_ref, vma->vm_file);
- *offsetp = my_offset;
- break;
- }
-
- *spu_bin_dcookie = fast_get_dcookie(&vma->vm_file->f_path);
- pr_debug("got dcookie for %pD\n", vma->vm_file);
-
- up_read(&mm->mmap_sem);
-
-out:
- return app_cookie;
-
-fail_no_image_cookie:
- up_read(&mm->mmap_sem);
-
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Cannot find dcookie for SPU binary\n",
- __func__, __LINE__);
- goto out;
-}
-
-
-
-/* This function finds or creates cached context information for the
- * passed SPU and records SPU context information into the OProfile
- * event buffer.
- */
-static int process_context_switch(struct spu *spu, unsigned long objectId)
-{
- unsigned long flags;
- int retval;
- unsigned int offset = 0;
- unsigned long spu_cookie = 0, app_dcookie;
-
- retval = prepare_cached_spu_info(spu, objectId);
- if (retval)
- goto out;
-
- /* Get dcookie first because a mutex_lock is taken in that
- * code path, so interrupts must not be disabled.
- */
- app_dcookie = get_exec_dcookie_and_offset(spu, &offset, &spu_cookie, objectId);
- if (!app_dcookie || !spu_cookie) {
- retval = -ENOENT;
- goto out;
- }
-
- /* Record context info in event buffer */
- spin_lock_irqsave(&buffer_lock, flags);
- spu_buff_add(ESCAPE_CODE, spu->number);
- spu_buff_add(SPU_CTX_SWITCH_CODE, spu->number);
- spu_buff_add(spu->number, spu->number);
- spu_buff_add(spu->pid, spu->number);
- spu_buff_add(spu->tgid, spu->number);
- spu_buff_add(app_dcookie, spu->number);
- spu_buff_add(spu_cookie, spu->number);
- spu_buff_add(offset, spu->number);
-
- /* Set flag to indicate SPU PC data can now be written out. If
- * the SPU program counter data is seen before an SPU context
- * record is seen, the postprocessing will fail.
- */
- spu_buff[spu->number].ctx_sw_seen = 1;
-
- spin_unlock_irqrestore(&buffer_lock, flags);
- smp_wmb(); /* insure spu event buffer updates are written */
- /* don't want entries intermingled... */
-out:
- return retval;
-}
-
-/*
- * This function is invoked on either a bind_context or unbind_context.
- * If called for an unbind_context, the val arg is 0; otherwise,
- * it is the object-id value for the spu context.
- * The data arg is of type 'struct spu *'.
- */
-static int spu_active_notify(struct notifier_block *self, unsigned long val,
- void *data)
-{
- int retval;
- unsigned long flags;
- struct spu *the_spu = data;
-
- pr_debug("SPU event notification arrived\n");
- if (!val) {
- spin_lock_irqsave(&cache_lock, flags);
- retval = release_cached_info(the_spu->number);
- spin_unlock_irqrestore(&cache_lock, flags);
- } else {
- retval = process_context_switch(the_spu, val);
- }
- return retval;
-}
-
-static struct notifier_block spu_active = {
- .notifier_call = spu_active_notify,
-};
-
-static int number_of_online_nodes(void)
-{
- u32 cpu; u32 tmp;
- int nodes = 0;
- for_each_online_cpu(cpu) {
- tmp = cbe_cpu_to_node(cpu) + 1;
- if (tmp > nodes)
- nodes++;
- }
- return nodes;
-}
-
-static int oprofile_spu_buff_create(void)
-{
- int spu;
-
- max_spu_buff = oprofile_get_cpu_buffer_size();
-
- for (spu = 0; spu < num_spu_nodes; spu++) {
- /* create circular buffers to store the data in.
- * use locks to manage accessing the buffers
- */
- spu_buff[spu].head = 0;
- spu_buff[spu].tail = 0;
-
- /*
- * Create a buffer for each SPU. Can't reliably
- * create a single buffer for all spus due to not
- * enough contiguous kernel memory.
- */
-
- spu_buff[spu].buff = kzalloc((max_spu_buff
- * sizeof(unsigned long)),
- GFP_KERNEL);
-
- if (!spu_buff[spu].buff) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: oprofile_spu_buff_create "
- "failed to allocate spu buffer %d.\n",
- __func__, __LINE__, spu);
-
- /* release the spu buffers that have been allocated */
- while (spu >= 0) {
- kfree(spu_buff[spu].buff);
- spu_buff[spu].buff = 0;
- spu--;
- }
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-/* The main purpose of this function is to synchronize
- * OProfile with SPUFS by registering to be notified of
- * SPU task switches.
- *
- * NOTE: When profiling SPUs, we must ensure that only
- * spu_sync_start is invoked and not the generic sync_start
- * in drivers/oprofile/oprof.c. A return value of
- * SKIP_GENERIC_SYNC or SYNC_START_ERROR will
- * accomplish this.
- */
-int spu_sync_start(void)
-{
- int spu;
- int ret = SKIP_GENERIC_SYNC;
- int register_ret;
- unsigned long flags = 0;
-
- spu_prof_num_nodes = number_of_online_nodes();
- num_spu_nodes = spu_prof_num_nodes * 8;
- INIT_DELAYED_WORK(&spu_work, wq_sync_spu_buff);
-
- /* create buffer for storing the SPU data to put in
- * the kernel buffer.
- */
- ret = oprofile_spu_buff_create();
- if (ret)
- goto out;
-
- spin_lock_irqsave(&buffer_lock, flags);
- for (spu = 0; spu < num_spu_nodes; spu++) {
- spu_buff_add(ESCAPE_CODE, spu);
- spu_buff_add(SPU_PROFILING_CODE, spu);
- spu_buff_add(num_spu_nodes, spu);
- }
- spin_unlock_irqrestore(&buffer_lock, flags);
-
- for (spu = 0; spu < num_spu_nodes; spu++) {
- spu_buff[spu].ctx_sw_seen = 0;
- spu_buff[spu].last_guard_val = 0;
- }
-
- /* Register for SPU events */
- register_ret = spu_switch_event_register(&spu_active);
- if (register_ret) {
- ret = SYNC_START_ERROR;
- goto out;
- }
-
- pr_debug("spu_sync_start -- running.\n");
-out:
- return ret;
-}
-
-/* Record SPU program counter samples to the oprofile event buffer. */
-void spu_sync_buffer(int spu_num, unsigned int *samples,
- int num_samples)
-{
- unsigned long long file_offset;
- unsigned long flags;
- int i;
- struct vma_to_fileoffset_map *map;
- struct spu *the_spu;
- unsigned long long spu_num_ll = spu_num;
- unsigned long long spu_num_shifted = spu_num_ll << 32;
- struct cached_info *c_info;
-
- /* We need to obtain the cache_lock here because it's
- * possible that after getting the cached_info, the SPU job
- * corresponding to this cached_info may end, thus resulting
- * in the destruction of the cached_info.
- */
- spin_lock_irqsave(&cache_lock, flags);
- c_info = get_cached_info(NULL, spu_num);
- if (!c_info) {
- /* This legitimately happens when the SPU task ends before all
- * samples are recorded.
- * No big deal -- so we just drop a few samples.
- */
- pr_debug("SPU_PROF: No cached SPU contex "
- "for SPU #%d. Dropping samples.\n", spu_num);
- goto out;
- }
-
- map = c_info->map;
- the_spu = c_info->the_spu;
- spin_lock(&buffer_lock);
- for (i = 0; i < num_samples; i++) {
- unsigned int sample = *(samples+i);
- int grd_val = 0;
- file_offset = 0;
- if (sample == 0)
- continue;
- file_offset = vma_map_lookup( map, sample, the_spu, &grd_val);
-
- /* If overlays are used by this SPU application, the guard
- * value is non-zero, indicating which overlay section is in
- * use. We need to discard samples taken during the time
- * period which an overlay occurs (i.e., guard value changes).
- */
- if (grd_val && grd_val != spu_buff[spu_num].last_guard_val) {
- spu_buff[spu_num].last_guard_val = grd_val;
- /* Drop the rest of the samples. */
- break;
- }
-
- /* We must ensure that the SPU context switch has been written
- * out before samples for the SPU. Otherwise, the SPU context
- * information is not available and the postprocessing of the
- * SPU PC will fail with no available anonymous map information.
- */
- if (spu_buff[spu_num].ctx_sw_seen)
- spu_buff_add((file_offset | spu_num_shifted),
- spu_num);
- }
- spin_unlock(&buffer_lock);
-out:
- spin_unlock_irqrestore(&cache_lock, flags);
-}
-
-
-int spu_sync_stop(void)
-{
- unsigned long flags = 0;
- int ret;
- int k;
-
- ret = spu_switch_event_unregister(&spu_active);
-
- if (ret)
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: spu_switch_event_unregister " \
- "returned %d\n",
- __func__, __LINE__, ret);
-
- /* flush any remaining data in the per SPU buffers */
- sync_spu_buff();
-
- spin_lock_irqsave(&cache_lock, flags);
- ret = release_cached_info(RELEASE_ALL);
- spin_unlock_irqrestore(&cache_lock, flags);
-
- /* remove scheduled work queue item rather then waiting
- * for every queued entry to execute. Then flush pending
- * system wide buffer to event buffer.
- */
- cancel_delayed_work(&spu_work);
-
- for (k = 0; k < num_spu_nodes; k++) {
- spu_buff[k].ctx_sw_seen = 0;
-
- /*
- * spu_sys_buff will be null if there was a problem
- * allocating the buffer. Only delete if it exists.
- */
- kfree(spu_buff[k].buff);
- spu_buff[k].buff = 0;
- }
- pr_debug("spu_sync_stop -- done.\n");
- return ret;
-}
-
diff --git a/arch/powerpc/oprofile/cell/vma_map.c b/arch/powerpc/oprofile/cell/vma_map.c
deleted file mode 100644
index 7c4b19cfde88..000000000000
--- a/arch/powerpc/oprofile/cell/vma_map.c
+++ /dev/null
@@ -1,279 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: Maynard Johnson <maynardj@us.ibm.com>
- */
-
-/* The code in this source file is responsible for generating
- * vma-to-fileOffset maps for both overlay and non-overlay SPU
- * applications.
- */
-
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-#include <linux/elf.h>
-#include <linux/slab.h>
-#include "pr_util.h"
-
-
-void vma_map_free(struct vma_to_fileoffset_map *map)
-{
- while (map) {
- struct vma_to_fileoffset_map *next = map->next;
- kfree(map);
- map = next;
- }
-}
-
-unsigned int
-vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma,
- const struct spu *aSpu, int *grd_val)
-{
- /*
- * Default the offset to the physical address + a flag value.
- * Addresses of dynamically generated code can't be found in the vma
- * map. For those addresses the flagged value will be sent on to
- * the user space tools so they can be reported rather than just
- * thrown away.
- */
- u32 offset = 0x10000000 + vma;
- u32 ovly_grd;
-
- for (; map; map = map->next) {
- if (vma < map->vma || vma >= map->vma + map->size)
- continue;
-
- if (map->guard_ptr) {
- ovly_grd = *(u32 *)(aSpu->local_store + map->guard_ptr);
- if (ovly_grd != map->guard_val)
- continue;
- *grd_val = ovly_grd;
- }
- offset = vma - map->vma + map->offset;
- break;
- }
-
- return offset;
-}
-
-static struct vma_to_fileoffset_map *
-vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma,
- unsigned int size, unsigned int offset, unsigned int guard_ptr,
- unsigned int guard_val)
-{
- struct vma_to_fileoffset_map *new = kzalloc(sizeof(*new), GFP_KERNEL);
-
- if (!new) {
- printk(KERN_ERR "SPU_PROF: %s, line %d: malloc failed\n",
- __func__, __LINE__);
- vma_map_free(map);
- return NULL;
- }
-
- new->next = map;
- new->vma = vma;
- new->size = size;
- new->offset = offset;
- new->guard_ptr = guard_ptr;
- new->guard_val = guard_val;
-
- return new;
-}
-
-
-/* Parse SPE ELF header and generate a list of vma_maps.
- * A pointer to the first vma_map in the generated list
- * of vma_maps is returned. */
-struct vma_to_fileoffset_map *create_vma_map(const struct spu *aSpu,
- unsigned long __spu_elf_start)
-{
- static const unsigned char expected[EI_PAD] = {
- [EI_MAG0] = ELFMAG0,
- [EI_MAG1] = ELFMAG1,
- [EI_MAG2] = ELFMAG2,
- [EI_MAG3] = ELFMAG3,
- [EI_CLASS] = ELFCLASS32,
- [EI_DATA] = ELFDATA2MSB,
- [EI_VERSION] = EV_CURRENT,
- [EI_OSABI] = ELFOSABI_NONE
- };
-
- int grd_val;
- struct vma_to_fileoffset_map *map = NULL;
- void __user *spu_elf_start = (void __user *)__spu_elf_start;
- struct spu_overlay_info ovly;
- unsigned int overlay_tbl_offset = -1;
- Elf32_Phdr __user *phdr_start;
- Elf32_Shdr __user *shdr_start;
- Elf32_Ehdr ehdr;
- Elf32_Phdr phdr;
- Elf32_Shdr shdr, shdr_str;
- Elf32_Sym sym;
- int i, j;
- char name[32];
-
- unsigned int ovly_table_sym = 0;
- unsigned int ovly_buf_table_sym = 0;
- unsigned int ovly_table_end_sym = 0;
- unsigned int ovly_buf_table_end_sym = 0;
- struct spu_overlay_info __user *ovly_table;
- unsigned int n_ovlys;
-
- /* Get and validate ELF header. */
-
- if (copy_from_user(&ehdr, spu_elf_start, sizeof (ehdr)))
- goto fail;
-
- if (memcmp(ehdr.e_ident, expected, EI_PAD) != 0) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Unexpected e_ident parsing SPU ELF\n",
- __func__, __LINE__);
- goto fail;
- }
- if (ehdr.e_machine != EM_SPU) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Unexpected e_machine parsing SPU ELF\n",
- __func__, __LINE__);
- goto fail;
- }
- if (ehdr.e_type != ET_EXEC) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Unexpected e_type parsing SPU ELF\n",
- __func__, __LINE__);
- goto fail;
- }
- phdr_start = spu_elf_start + ehdr.e_phoff;
- shdr_start = spu_elf_start + ehdr.e_shoff;
-
- /* Traverse program headers. */
- for (i = 0; i < ehdr.e_phnum; i++) {
- if (copy_from_user(&phdr, phdr_start + i, sizeof(phdr)))
- goto fail;
-
- if (phdr.p_type != PT_LOAD)
- continue;
- if (phdr.p_flags & (1 << 27))
- continue;
-
- map = vma_map_add(map, phdr.p_vaddr, phdr.p_memsz,
- phdr.p_offset, 0, 0);
- if (!map)
- goto fail;
- }
-
- pr_debug("SPU_PROF: Created non-overlay maps\n");
- /* Traverse section table and search for overlay-related symbols. */
- for (i = 0; i < ehdr.e_shnum; i++) {
- if (copy_from_user(&shdr, shdr_start + i, sizeof(shdr)))
- goto fail;
-
- if (shdr.sh_type != SHT_SYMTAB)
- continue;
- if (shdr.sh_entsize != sizeof (sym))
- continue;
-
- if (copy_from_user(&shdr_str,
- shdr_start + shdr.sh_link,
- sizeof(shdr)))
- goto fail;
-
- if (shdr_str.sh_type != SHT_STRTAB)
- goto fail;
-
- for (j = 0; j < shdr.sh_size / sizeof (sym); j++) {
- if (copy_from_user(&sym, spu_elf_start +
- shdr.sh_offset +
- j * sizeof (sym),
- sizeof (sym)))
- goto fail;
-
- if (copy_from_user(name,
- spu_elf_start + shdr_str.sh_offset +
- sym.st_name,
- 20))
- goto fail;
-
- if (memcmp(name, "_ovly_table", 12) == 0)
- ovly_table_sym = sym.st_value;
- if (memcmp(name, "_ovly_buf_table", 16) == 0)
- ovly_buf_table_sym = sym.st_value;
- if (memcmp(name, "_ovly_table_end", 16) == 0)
- ovly_table_end_sym = sym.st_value;
- if (memcmp(name, "_ovly_buf_table_end", 20) == 0)
- ovly_buf_table_end_sym = sym.st_value;
- }
- }
-
- /* If we don't have overlays, we're done. */
- if (ovly_table_sym == 0 || ovly_buf_table_sym == 0
- || ovly_table_end_sym == 0 || ovly_buf_table_end_sym == 0) {
- pr_debug("SPU_PROF: No overlay table found\n");
- goto out;
- } else {
- pr_debug("SPU_PROF: Overlay table found\n");
- }
-
- /* The _ovly_table symbol represents a table with one entry
- * per overlay section. The _ovly_buf_table symbol represents
- * a table with one entry per overlay region.
- * The struct spu_overlay_info gives the structure of the _ovly_table
- * entries. The structure of _ovly_table_buf is simply one
- * u32 word per entry.
- */
- overlay_tbl_offset = vma_map_lookup(map, ovly_table_sym,
- aSpu, &grd_val);
- if (overlay_tbl_offset > 0x10000000) {
- printk(KERN_ERR "SPU_PROF: "
- "%s, line %d: Error finding SPU overlay table\n",
- __func__, __LINE__);
- goto fail;
- }
- ovly_table = spu_elf_start + overlay_tbl_offset;
-
- n_ovlys = (ovly_table_end_sym -
- ovly_table_sym) / sizeof (ovly);
-
- /* Traverse overlay table. */
- for (i = 0; i < n_ovlys; i++) {
- if (copy_from_user(&ovly, ovly_table + i, sizeof (ovly)))
- goto fail;
-
- /* The ovly.vma/size/offset arguments are analogous to the same
- * arguments used above for non-overlay maps. The final two
- * args are referred to as the guard pointer and the guard
- * value.
- * The guard pointer is an entry in the _ovly_buf_table,
- * computed using ovly.buf as the index into the table. Since
- * ovly.buf values begin at '1' to reference the first (or 0th)
- * entry in the _ovly_buf_table, the computation subtracts 1
- * from ovly.buf.
- * The guard value is stored in the _ovly_buf_table entry and
- * is an index (starting at 1) back to the _ovly_table entry
- * that is pointing at this _ovly_buf_table entry. So, for
- * example, for an overlay scenario with one overlay segment
- * and two overlay sections:
- * - Section 1 points to the first entry of the
- * _ovly_buf_table, which contains a guard value
- * of '1', referencing the first (index=0) entry of
- * _ovly_table.
- * - Section 2 points to the second entry of the
- * _ovly_buf_table, which contains a guard value
- * of '2', referencing the second (index=1) entry of
- * _ovly_table.
- */
- map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset,
- ovly_buf_table_sym + (ovly.buf-1) * 4, i+1);
- if (!map)
- goto fail;
- }
- goto out;
-
- fail:
- map = NULL;
- out:
- return map;
-}
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
deleted file mode 100644
index 0fb528c2b3a1..000000000000
--- a/arch/powerpc/oprofile/common.c
+++ /dev/null
@@ -1,243 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * PPC 64 oprofile support:
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- * PPC 32 oprofile support: (based on PPC 64 support)
- * Copyright (C) Freescale Semiconductor, Inc 2004
- * Author: Andy Fleming
- *
- * Based on alpha version.
- */
-
-#include <linux/oprofile.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <asm/ptrace.h>
-#include <asm/pmc.h>
-#include <asm/cputable.h>
-#include <asm/oprofile_impl.h>
-#include <asm/firmware.h>
-
-static struct op_powerpc_model *model;
-
-static struct op_counter_config ctr[OP_MAX_COUNTER];
-static struct op_system_config sys;
-
-static int op_per_cpu_rc;
-
-static void op_handle_interrupt(struct pt_regs *regs)
-{
- model->handle_interrupt(regs, ctr);
-}
-
-static void op_powerpc_cpu_setup(void *dummy)
-{
- int ret;
-
- ret = model->cpu_setup(ctr);
-
- if (ret != 0)
- op_per_cpu_rc = ret;
-}
-
-static int op_powerpc_setup(void)
-{
- int err;
-
- op_per_cpu_rc = 0;
-
- /* Grab the hardware */
- err = reserve_pmc_hardware(op_handle_interrupt);
- if (err)
- return err;
-
- /* Pre-compute the values to stuff in the hardware registers. */
- op_per_cpu_rc = model->reg_setup(ctr, &sys, model->num_counters);
-
- if (op_per_cpu_rc)
- goto out;
-
- /* Configure the registers on all cpus. If an error occurs on one
- * of the cpus, op_per_cpu_rc will be set to the error */
- on_each_cpu(op_powerpc_cpu_setup, NULL, 1);
-
-out: if (op_per_cpu_rc) {
- /* error on setup release the performance counter hardware */
- release_pmc_hardware();
- }
-
- return op_per_cpu_rc;
-}
-
-static void op_powerpc_shutdown(void)
-{
- release_pmc_hardware();
-}
-
-static void op_powerpc_cpu_start(void *dummy)
-{
- /* If any of the cpus have return an error, set the
- * global flag to the error so it can be returned
- * to the generic OProfile caller.
- */
- int ret;
-
- ret = model->start(ctr);
- if (ret != 0)
- op_per_cpu_rc = ret;
-}
-
-static int op_powerpc_start(void)
-{
- op_per_cpu_rc = 0;
-
- if (model->global_start)
- return model->global_start(ctr);
- if (model->start) {
- on_each_cpu(op_powerpc_cpu_start, NULL, 1);
- return op_per_cpu_rc;
- }
- return -EIO; /* No start function is defined for this
- power architecture */
-}
-
-static inline void op_powerpc_cpu_stop(void *dummy)
-{
- model->stop();
-}
-
-static void op_powerpc_stop(void)
-{
- if (model->stop)
- on_each_cpu(op_powerpc_cpu_stop, NULL, 1);
- if (model->global_stop)
- model->global_stop();
-}
-
-static int op_powerpc_create_files(struct dentry *root)
-{
- int i;
-
-#ifdef CONFIG_PPC64
- /*
- * There is one mmcr0, mmcr1 and mmcra for setting the events for
- * all of the counters.
- */
- oprofilefs_create_ulong(root, "mmcr0", &sys.mmcr0);
- oprofilefs_create_ulong(root, "mmcr1", &sys.mmcr1);
- oprofilefs_create_ulong(root, "mmcra", &sys.mmcra);
-#ifdef CONFIG_OPROFILE_CELL
- /* create a file the user tool can check to see what level of profiling
- * support exits with this kernel. Initialize bit mask to indicate
- * what support the kernel has:
- * bit 0 - Supports SPU event profiling in addition to PPU
- * event and cycles; and SPU cycle profiling
- * bits 1-31 - Currently unused.
- *
- * If the file does not exist, then the kernel only supports SPU
- * cycle profiling, PPU event and cycle profiling.
- */
- oprofilefs_create_ulong(root, "cell_support", &sys.cell_support);
- sys.cell_support = 0x1; /* Note, the user OProfile tool must check
- * that this bit is set before attempting to
- * user SPU event profiling. Older kernels
- * will not have this file, hence the user
- * tool is not allowed to do SPU event
- * profiling on older kernels. Older kernels
- * will accept SPU events but collected data
- * is garbage.
- */
-#endif
-#endif
-
- for (i = 0; i < model->num_counters; ++i) {
- struct dentry *dir;
- char buf[4];
-
- snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(root, buf);
-
- oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(dir, "count", &ctr[i].count);
-
- /*
- * Classic PowerPC doesn't support per-counter
- * control like this, but the options are
- * expected, so they remain. For Freescale
- * Book-E style performance monitors, we do
- * support them.
- */
- oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(dir, "user", &ctr[i].user);
-
- oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
- }
-
- oprofilefs_create_ulong(root, "enable_kernel", &sys.enable_kernel);
- oprofilefs_create_ulong(root, "enable_user", &sys.enable_user);
-
- /* Default to tracing both kernel and user */
- sys.enable_kernel = 1;
- sys.enable_user = 1;
-
- return 0;
-}
-
-int __init oprofile_arch_init(struct oprofile_operations *ops)
-{
- if (!cur_cpu_spec->oprofile_cpu_type)
- return -ENODEV;
-
- switch (cur_cpu_spec->oprofile_type) {
-#ifdef CONFIG_PPC_BOOK3S_64
-#ifdef CONFIG_OPROFILE_CELL
- case PPC_OPROFILE_CELL:
- if (firmware_has_feature(FW_FEATURE_LPAR))
- return -ENODEV;
- model = &op_model_cell;
- ops->sync_start = model->sync_start;
- ops->sync_stop = model->sync_stop;
- break;
-#endif
- case PPC_OPROFILE_POWER4:
- model = &op_model_power4;
- break;
- case PPC_OPROFILE_PA6T:
- model = &op_model_pa6t;
- break;
-#endif
-#ifdef CONFIG_PPC_BOOK3S_32
- case PPC_OPROFILE_G4:
- model = &op_model_7450;
- break;
-#endif
-#if defined(CONFIG_FSL_EMB_PERFMON)
- case PPC_OPROFILE_FSL_EMB:
- model = &op_model_fsl_emb;
- break;
-#endif
- default:
- return -ENODEV;
- }
-
- model->num_counters = cur_cpu_spec->num_pmcs;
-
- ops->cpu_type = cur_cpu_spec->oprofile_cpu_type;
- ops->create_files = op_powerpc_create_files;
- ops->setup = op_powerpc_setup;
- ops->shutdown = op_powerpc_shutdown;
- ops->start = op_powerpc_start;
- ops->stop = op_powerpc_stop;
- ops->backtrace = op_powerpc_backtrace;
-
- printk(KERN_DEBUG "oprofile: using %s performance monitoring.\n",
- ops->cpu_type);
-
- return 0;
-}
-
-void oprofile_arch_exit(void)
-{
-}
diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c
deleted file mode 100644
index 5ebc25188a72..000000000000
--- a/arch/powerpc/oprofile/op_model_7450.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/powerpc/oprofile/op_model_7450.c
- *
- * Freescale 745x/744x oprofile support, based on fsl_booke support
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Copyright (c) 2004 Freescale Semiconductor, Inc
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala <galak@kernel.crashing.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/page.h>
-#include <asm/pmc.h>
-#include <asm/oprofile_impl.h>
-
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-static int oprofile_running;
-static u32 mmcr0_val, mmcr1_val, mmcr2_val, num_pmcs;
-
-#define MMCR0_PMC1_SHIFT 6
-#define MMCR0_PMC2_SHIFT 0
-#define MMCR1_PMC3_SHIFT 27
-#define MMCR1_PMC4_SHIFT 22
-#define MMCR1_PMC5_SHIFT 17
-#define MMCR1_PMC6_SHIFT 11
-
-#define mmcr0_event1(event) \
- ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL)
-#define mmcr0_event2(event) \
- ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL)
-
-#define mmcr1_event3(event) \
- ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL)
-#define mmcr1_event4(event) \
- ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL)
-#define mmcr1_event5(event) \
- ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL)
-#define mmcr1_event6(event) \
- ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL)
-
-#define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0)
-
-/* Unfreezes the counters on this CPU, enables the interrupt,
- * enables the counters to trigger the interrupt, and sets the
- * counters to only count when the mark bit is not set.
- */
-static void pmc_start_ctrs(void)
-{
- u32 mmcr0 = mfspr(SPRN_MMCR0);
-
- mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0);
- mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
-
- mtspr(SPRN_MMCR0, mmcr0);
-}
-
-/* Disables the counters on this CPU, and freezes them */
-static void pmc_stop_ctrs(void)
-{
- u32 mmcr0 = mfspr(SPRN_MMCR0);
-
- mmcr0 |= MMCR0_FC;
- mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE);
-
- mtspr(SPRN_MMCR0, mmcr0);
-}
-
-/* Configures the counters on this CPU based on the global
- * settings */
-static int fsl7450_cpu_setup(struct op_counter_config *ctr)
-{
- /* freeze all counters */
- pmc_stop_ctrs();
-
- mtspr(SPRN_MMCR0, mmcr0_val);
- mtspr(SPRN_MMCR1, mmcr1_val);
- if (num_pmcs > 4)
- mtspr(SPRN_MMCR2, mmcr2_val);
-
- return 0;
-}
-
-/* Configures the global settings for the countes on all CPUs. */
-static int fsl7450_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int i;
-
- num_pmcs = num_ctrs;
- /* Our counters count up, and "count" refers to
- * how much before the next interrupt, and we interrupt
- * on overflow. So we calculate the starting value
- * which will give us "count" until overflow.
- * Then we set the events on the enabled counters */
- for (i = 0; i < num_ctrs; ++i)
- reset_value[i] = 0x80000000UL - ctr[i].count;
-
- /* Set events for Counters 1 & 2 */
- mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event)
- | mmcr0_event2(ctr[1].event);
-
- /* Setup user/kernel bits */
- if (sys->enable_kernel)
- mmcr0_val &= ~(MMCR0_FCS);
-
- if (sys->enable_user)
- mmcr0_val &= ~(MMCR0_FCP);
-
- /* Set events for Counters 3-6 */
- mmcr1_val = mmcr1_event3(ctr[2].event)
- | mmcr1_event4(ctr[3].event);
- if (num_ctrs > 4)
- mmcr1_val |= mmcr1_event5(ctr[4].event)
- | mmcr1_event6(ctr[5].event);
-
- mmcr2_val = 0;
-
- return 0;
-}
-
-/* Sets the counters on this CPU to the chosen values, and starts them */
-static int fsl7450_start(struct op_counter_config *ctr)
-{
- int i;
-
- mtmsr(mfmsr() | MSR_PMM);
-
- for (i = 0; i < num_pmcs; ++i) {
- if (ctr[i].enabled)
- classic_ctr_write(i, reset_value[i]);
- else
- classic_ctr_write(i, 0);
- }
-
- /* Clear the freeze bit, and enable the interrupt.
- * The counters won't actually start until the rfi clears
- * the PMM bit */
- pmc_start_ctrs();
-
- oprofile_running = 1;
-
- return 0;
-}
-
-/* Stop the counters on this CPU */
-static void fsl7450_stop(void)
-{
- /* freeze counters */
- pmc_stop_ctrs();
-
- oprofile_running = 0;
-
- mb();
-}
-
-
-/* Handle the interrupt on this CPU, and log a sample for each
- * event that triggered the interrupt */
-static void fsl7450_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc;
- int is_kernel;
- int val;
- int i;
-
- /* set the PMM bit (see comment below) */
- mtmsr(mfmsr() | MSR_PMM);
-
- pc = mfspr(SPRN_SIAR);
- is_kernel = is_kernel_addr(pc);
-
- for (i = 0; i < num_pmcs; ++i) {
- val = classic_ctr_read(i);
- if (val < 0) {
- if (oprofile_running && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- classic_ctr_write(i, reset_value[i]);
- } else {
- classic_ctr_write(i, 0);
- }
- }
- }
-
- /* The freeze bit was set by the interrupt. */
- /* Clear the freeze bit, and reenable the interrupt.
- * The counters won't actually start until the rfi clears
- * the PM/M bit */
- pmc_start_ctrs();
-}
-
-struct op_powerpc_model op_model_7450= {
- .reg_setup = fsl7450_reg_setup,
- .cpu_setup = fsl7450_cpu_setup,
- .start = fsl7450_start,
- .stop = fsl7450_stop,
- .handle_interrupt = fsl7450_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c
deleted file mode 100644
index 7eb73070b7be..000000000000
--- a/arch/powerpc/oprofile/op_model_cell.c
+++ /dev/null
@@ -1,1709 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Cell Broadband Engine OProfile Support
- *
- * (C) Copyright IBM Corporation 2006
- *
- * Author: David Erb (djerb@us.ibm.com)
- * Modifications:
- * Carl Love <carll@us.ibm.com>
- * Maynard Johnson <maynardj@us.ibm.com>
- */
-
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/kthread.h>
-#include <linux/oprofile.h>
-#include <linux/percpu.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <asm/cell-pmu.h>
-#include <asm/cputable.h>
-#include <asm/firmware.h>
-#include <asm/io.h>
-#include <asm/oprofile_impl.h>
-#include <asm/processor.h>
-#include <asm/prom.h>
-#include <asm/ptrace.h>
-#include <asm/reg.h>
-#include <asm/rtas.h>
-#include <asm/cell-regs.h>
-
-#include "../platforms/cell/interrupt.h"
-#include "cell/pr_util.h"
-
-#define PPU_PROFILING 0
-#define SPU_PROFILING_CYCLES 1
-#define SPU_PROFILING_EVENTS 2
-
-#define SPU_EVENT_NUM_START 4100
-#define SPU_EVENT_NUM_STOP 4399
-#define SPU_PROFILE_EVENT_ADDR 4363 /* spu, address trace, decimal */
-#define SPU_PROFILE_EVENT_ADDR_MASK_A 0x146 /* sub unit set to zero */
-#define SPU_PROFILE_EVENT_ADDR_MASK_B 0x186 /* sub unit set to zero */
-
-#define NUM_SPUS_PER_NODE 8
-#define SPU_CYCLES_EVENT_NUM 2 /* event number for SPU_CYCLES */
-
-#define PPU_CYCLES_EVENT_NUM 1 /* event number for CYCLES */
-#define PPU_CYCLES_GRP_NUM 1 /* special group number for identifying
- * PPU_CYCLES event
- */
-#define CBE_COUNT_ALL_CYCLES 0x42800000 /* PPU cycle event specifier */
-
-#define NUM_THREADS 2 /* number of physical threads in
- * physical processor
- */
-#define NUM_DEBUG_BUS_WORDS 4
-#define NUM_INPUT_BUS_WORDS 2
-
-#define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */
-
-/* Minimum HW interval timer setting to send value to trace buffer is 10 cycle.
- * To configure counter to send value every N cycles set counter to
- * 2^32 - 1 - N.
- */
-#define NUM_INTERVAL_CYC 0xFFFFFFFF - 10
-
-/*
- * spu_cycle_reset is the number of cycles between samples.
- * This variable is used for SPU profiling and should ONLY be set
- * at the beginning of cell_reg_setup; otherwise, it's read-only.
- */
-static unsigned int spu_cycle_reset;
-static unsigned int profiling_mode;
-static int spu_evnt_phys_spu_indx;
-
-struct pmc_cntrl_data {
- unsigned long vcntr;
- unsigned long evnts;
- unsigned long masks;
- unsigned long enabled;
-};
-
-/*
- * ibm,cbe-perftools rtas parameters
- */
-struct pm_signal {
- u16 cpu; /* Processor to modify */
- u16 sub_unit; /* hw subunit this applies to (if applicable)*/
- short int signal_group; /* Signal Group to Enable/Disable */
- u8 bus_word; /* Enable/Disable on this Trace/Trigger/Event
- * Bus Word(s) (bitmask)
- */
- u8 bit; /* Trigger/Event bit (if applicable) */
-};
-
-/*
- * rtas call arguments
- */
-enum {
- SUBFUNC_RESET = 1,
- SUBFUNC_ACTIVATE = 2,
- SUBFUNC_DEACTIVATE = 3,
-
- PASSTHRU_IGNORE = 0,
- PASSTHRU_ENABLE = 1,
- PASSTHRU_DISABLE = 2,
-};
-
-struct pm_cntrl {
- u16 enable;
- u16 stop_at_max;
- u16 trace_mode;
- u16 freeze;
- u16 count_mode;
- u16 spu_addr_trace;
- u8 trace_buf_ovflw;
-};
-
-static struct {
- u32 group_control;
- u32 debug_bus_control;
- struct pm_cntrl pm_cntrl;
- u32 pm07_cntrl[NR_PHYS_CTRS];
-} pm_regs;
-
-#define GET_SUB_UNIT(x) ((x & 0x0000f000) >> 12)
-#define GET_BUS_WORD(x) ((x & 0x000000f0) >> 4)
-#define GET_BUS_TYPE(x) ((x & 0x00000300) >> 8)
-#define GET_POLARITY(x) ((x & 0x00000002) >> 1)
-#define GET_COUNT_CYCLES(x) (x & 0x00000001)
-#define GET_INPUT_CONTROL(x) ((x & 0x00000004) >> 2)
-
-static DEFINE_PER_CPU(unsigned long[NR_PHYS_CTRS], pmc_values);
-static unsigned long spu_pm_cnt[MAX_NUMNODES * NUM_SPUS_PER_NODE];
-static struct pmc_cntrl_data pmc_cntrl[NUM_THREADS][NR_PHYS_CTRS];
-
-/*
- * The CELL profiling code makes rtas calls to setup the debug bus to
- * route the performance signals. Additionally, SPU profiling requires
- * a second rtas call to setup the hardware to capture the SPU PCs.
- * The EIO error value is returned if the token lookups or the rtas
- * call fail. The EIO error number is the best choice of the existing
- * error numbers. The probability of rtas related error is very low. But
- * by returning EIO and printing additional information to dmsg the user
- * will know that OProfile did not start and dmesg will tell them why.
- * OProfile does not support returning errors on Stop. Not a huge issue
- * since failure to reset the debug bus or stop the SPU PC collection is
- * not a fatel issue. Chances are if the Stop failed, Start doesn't work
- * either.
- */
-
-/*
- * Interpetation of hdw_thread:
- * 0 - even virtual cpus 0, 2, 4,...
- * 1 - odd virtual cpus 1, 3, 5, ...
- *
- * FIXME: this is strictly wrong, we need to clean this up in a number
- * of places. It works for now. -arnd
- */
-static u32 hdw_thread;
-
-static u32 virt_cntr_inter_mask;
-static struct timer_list timer_virt_cntr;
-static struct timer_list timer_spu_event_swap;
-
-/*
- * pm_signal needs to be global since it is initialized in
- * cell_reg_setup at the time when the necessary information
- * is available.
- */
-static struct pm_signal pm_signal[NR_PHYS_CTRS];
-static int pm_rtas_token; /* token for debug bus setup call */
-static int spu_rtas_token; /* token for SPU cycle profiling */
-
-static u32 reset_value[NR_PHYS_CTRS];
-static int num_counters;
-static int oprofile_running;
-static DEFINE_SPINLOCK(cntr_lock);
-
-static u32 ctr_enabled;
-
-static unsigned char input_bus[NUM_INPUT_BUS_WORDS];
-
-/*
- * Firmware interface functions
- */
-static int
-rtas_ibm_cbe_perftools(int subfunc, int passthru,
- void *address, unsigned long length)
-{
- u64 paddr = __pa(address);
-
- return rtas_call(pm_rtas_token, 5, 1, NULL, subfunc,
- passthru, paddr >> 32, paddr & 0xffffffff, length);
-}
-
-static void pm_rtas_reset_signals(u32 node)
-{
- int ret;
- struct pm_signal pm_signal_local;
-
- /*
- * The debug bus is being set to the passthru disable state.
- * However, the FW still expects at least one legal signal routing
- * entry or it will return an error on the arguments. If we don't
- * supply a valid entry, we must ignore all return values. Ignoring
- * all return values means we might miss an error we should be
- * concerned about.
- */
-
- /* fw expects physical cpu #. */
- pm_signal_local.cpu = node;
- pm_signal_local.signal_group = 21;
- pm_signal_local.bus_word = 1;
- pm_signal_local.sub_unit = 0;
- pm_signal_local.bit = 0;
-
- ret = rtas_ibm_cbe_perftools(SUBFUNC_RESET, PASSTHRU_DISABLE,
- &pm_signal_local,
- sizeof(struct pm_signal));
-
- if (unlikely(ret))
- /*
- * Not a fatal error. For Oprofile stop, the oprofile
- * functions do not support returning an error for
- * failure to stop OProfile.
- */
- printk(KERN_WARNING "%s: rtas returned: %d\n",
- __func__, ret);
-}
-
-static int pm_rtas_activate_signals(u32 node, u32 count)
-{
- int ret;
- int i, j;
- struct pm_signal pm_signal_local[NR_PHYS_CTRS];
-
- /*
- * There is no debug setup required for the cycles event.
- * Note that only events in the same group can be used.
- * Otherwise, there will be conflicts in correctly routing
- * the signals on the debug bus. It is the responsibility
- * of the OProfile user tool to check the events are in
- * the same group.
- */
- i = 0;
- for (j = 0; j < count; j++) {
- if (pm_signal[j].signal_group != PPU_CYCLES_GRP_NUM) {
-
- /* fw expects physical cpu # */
- pm_signal_local[i].cpu = node;
- pm_signal_local[i].signal_group
- = pm_signal[j].signal_group;
- pm_signal_local[i].bus_word = pm_signal[j].bus_word;
- pm_signal_local[i].sub_unit = pm_signal[j].sub_unit;
- pm_signal_local[i].bit = pm_signal[j].bit;
- i++;
- }
- }
-
- if (i != 0) {
- ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE, PASSTHRU_ENABLE,
- pm_signal_local,
- i * sizeof(struct pm_signal));
-
- if (unlikely(ret)) {
- printk(KERN_WARNING "%s: rtas returned: %d\n",
- __func__, ret);
- return -EIO;
- }
- }
-
- return 0;
-}
-
-/*
- * PM Signal functions
- */
-static void set_pm_event(u32 ctr, int event, u32 unit_mask)
-{
- struct pm_signal *p;
- u32 signal_bit;
- u32 bus_word, bus_type, count_cycles, polarity, input_control;
- int j, i;
-
- if (event == PPU_CYCLES_EVENT_NUM) {
- /* Special Event: Count all cpu cycles */
- pm_regs.pm07_cntrl[ctr] = CBE_COUNT_ALL_CYCLES;
- p = &(pm_signal[ctr]);
- p->signal_group = PPU_CYCLES_GRP_NUM;
- p->bus_word = 1;
- p->sub_unit = 0;
- p->bit = 0;
- goto out;
- } else {
- pm_regs.pm07_cntrl[ctr] = 0;
- }
-
- bus_word = GET_BUS_WORD(unit_mask);
- bus_type = GET_BUS_TYPE(unit_mask);
- count_cycles = GET_COUNT_CYCLES(unit_mask);
- polarity = GET_POLARITY(unit_mask);
- input_control = GET_INPUT_CONTROL(unit_mask);
- signal_bit = (event % 100);
-
- p = &(pm_signal[ctr]);
-
- p->signal_group = event / 100;
- p->bus_word = bus_word;
- p->sub_unit = GET_SUB_UNIT(unit_mask);
-
- pm_regs.pm07_cntrl[ctr] = 0;
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_COUNT_CYCLES(count_cycles);
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_POLARITY(polarity);
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_CONTROL(input_control);
-
- /*
- * Some of the islands signal selection is based on 64 bit words.
- * The debug bus words are 32 bits, the input words to the performance
- * counters are defined as 32 bits. Need to convert the 64 bit island
- * specification to the appropriate 32 input bit and bus word for the
- * performance counter event selection. See the CELL Performance
- * monitoring signals manual and the Perf cntr hardware descriptions
- * for the details.
- */
- if (input_control == 0) {
- if (signal_bit > 31) {
- signal_bit -= 32;
- if (bus_word == 0x3)
- bus_word = 0x2;
- else if (bus_word == 0xc)
- bus_word = 0x8;
- }
-
- if ((bus_type == 0) && p->signal_group >= 60)
- bus_type = 2;
- if ((bus_type == 1) && p->signal_group >= 50)
- bus_type = 0;
-
- pm_regs.pm07_cntrl[ctr] |= PM07_CTR_INPUT_MUX(signal_bit);
- } else {
- pm_regs.pm07_cntrl[ctr] = 0;
- p->bit = signal_bit;
- }
-
- for (i = 0; i < NUM_DEBUG_BUS_WORDS; i++) {
- if (bus_word & (1 << i)) {
- pm_regs.debug_bus_control |=
- (bus_type << (30 - (2 * i)));
-
- for (j = 0; j < NUM_INPUT_BUS_WORDS; j++) {
- if (input_bus[j] == 0xff) {
- input_bus[j] = i;
- pm_regs.group_control |=
- (i << (30 - (2 * j)));
-
- break;
- }
- }
- }
- }
-out:
- ;
-}
-
-static void write_pm_cntrl(int cpu)
-{
- /*
- * Oprofile will use 32 bit counters, set bits 7:10 to 0
- * pmregs.pm_cntrl is a global
- */
-
- u32 val = 0;
- if (pm_regs.pm_cntrl.enable == 1)
- val |= CBE_PM_ENABLE_PERF_MON;
-
- if (pm_regs.pm_cntrl.stop_at_max == 1)
- val |= CBE_PM_STOP_AT_MAX;
-
- if (pm_regs.pm_cntrl.trace_mode != 0)
- val |= CBE_PM_TRACE_MODE_SET(pm_regs.pm_cntrl.trace_mode);
-
- if (pm_regs.pm_cntrl.trace_buf_ovflw == 1)
- val |= CBE_PM_TRACE_BUF_OVFLW(pm_regs.pm_cntrl.trace_buf_ovflw);
- if (pm_regs.pm_cntrl.freeze == 1)
- val |= CBE_PM_FREEZE_ALL_CTRS;
-
- val |= CBE_PM_SPU_ADDR_TRACE_SET(pm_regs.pm_cntrl.spu_addr_trace);
-
- /*
- * Routine set_count_mode must be called previously to set
- * the count mode based on the user selection of user and kernel.
- */
- val |= CBE_PM_COUNT_MODE_SET(pm_regs.pm_cntrl.count_mode);
- cbe_write_pm(cpu, pm_control, val);
-}
-
-static inline void
-set_count_mode(u32 kernel, u32 user)
-{
- /*
- * The user must specify user and kernel if they want them. If
- * neither is specified, OProfile will count in hypervisor mode.
- * pm_regs.pm_cntrl is a global
- */
- if (kernel) {
- if (user)
- pm_regs.pm_cntrl.count_mode = CBE_COUNT_ALL_MODES;
- else
- pm_regs.pm_cntrl.count_mode =
- CBE_COUNT_SUPERVISOR_MODE;
- } else {
- if (user)
- pm_regs.pm_cntrl.count_mode = CBE_COUNT_PROBLEM_MODE;
- else
- pm_regs.pm_cntrl.count_mode =
- CBE_COUNT_HYPERVISOR_MODE;
- }
-}
-
-static inline void enable_ctr(u32 cpu, u32 ctr, u32 *pm07_cntrl)
-{
-
- pm07_cntrl[ctr] |= CBE_PM_CTR_ENABLE;
- cbe_write_pm07_control(cpu, ctr, pm07_cntrl[ctr]);
-}
-
-/*
- * Oprofile is expected to collect data on all CPUs simultaneously.
- * However, there is one set of performance counters per node. There are
- * two hardware threads or virtual CPUs on each node. Hence, OProfile must
- * multiplex in time the performance counter collection on the two virtual
- * CPUs. The multiplexing of the performance counters is done by this
- * virtual counter routine.
- *
- * The pmc_values used below is defined as 'per-cpu' but its use is
- * more akin to 'per-node'. We need to store two sets of counter
- * values per node -- one for the previous run and one for the next.
- * The per-cpu[NR_PHYS_CTRS] gives us the storage we need. Each odd/even
- * pair of per-cpu arrays is used for storing the previous and next
- * pmc values for a given node.
- * NOTE: We use the per-cpu variable to improve cache performance.
- *
- * This routine will alternate loading the virtual counters for
- * virtual CPUs
- */
-static void cell_virtual_cntr(struct timer_list *unused)
-{
- int i, prev_hdw_thread, next_hdw_thread;
- u32 cpu;
- unsigned long flags;
-
- /*
- * Make sure that the interrupt_hander and the virt counter are
- * not both playing with the counters on the same node.
- */
-
- spin_lock_irqsave(&cntr_lock, flags);
-
- prev_hdw_thread = hdw_thread;
-
- /* switch the cpu handling the interrupts */
- hdw_thread = 1 ^ hdw_thread;
- next_hdw_thread = hdw_thread;
-
- pm_regs.group_control = 0;
- pm_regs.debug_bus_control = 0;
-
- for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
- input_bus[i] = 0xff;
-
- /*
- * There are some per thread events. Must do the
- * set event, for the thread that is being started
- */
- for (i = 0; i < num_counters; i++)
- set_pm_event(i,
- pmc_cntrl[next_hdw_thread][i].evnts,
- pmc_cntrl[next_hdw_thread][i].masks);
-
- /*
- * The following is done only once per each node, but
- * we need cpu #, not node #, to pass to the cbe_xxx functions.
- */
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- /*
- * stop counters, save counter values, restore counts
- * for previous thread
- */
- cbe_disable_pm(cpu);
- cbe_disable_pm_interrupts(cpu);
- for (i = 0; i < num_counters; i++) {
- per_cpu(pmc_values, cpu + prev_hdw_thread)[i]
- = cbe_read_ctr(cpu, i);
-
- if (per_cpu(pmc_values, cpu + next_hdw_thread)[i]
- == 0xFFFFFFFF)
- /* If the cntr value is 0xffffffff, we must
- * reset that to 0xfffffff0 when the current
- * thread is restarted. This will generate a
- * new interrupt and make sure that we never
- * restore the counters to the max value. If
- * the counters were restored to the max value,
- * they do not increment and no interrupts are
- * generated. Hence no more samples will be
- * collected on that cpu.
- */
- cbe_write_ctr(cpu, i, 0xFFFFFFF0);
- else
- cbe_write_ctr(cpu, i,
- per_cpu(pmc_values,
- cpu +
- next_hdw_thread)[i]);
- }
-
- /*
- * Switch to the other thread. Change the interrupt
- * and control regs to be scheduled on the CPU
- * corresponding to the thread to execute.
- */
- for (i = 0; i < num_counters; i++) {
- if (pmc_cntrl[next_hdw_thread][i].enabled) {
- /*
- * There are some per thread events.
- * Must do the set event, enable_cntr
- * for each cpu.
- */
- enable_ctr(cpu, i,
- pm_regs.pm07_cntrl);
- } else {
- cbe_write_pm07_control(cpu, i, 0);
- }
- }
-
- /* Enable interrupts on the CPU thread that is starting */
- cbe_enable_pm_interrupts(cpu, next_hdw_thread,
- virt_cntr_inter_mask);
- cbe_enable_pm(cpu);
- }
-
- spin_unlock_irqrestore(&cntr_lock, flags);
-
- mod_timer(&timer_virt_cntr, jiffies + HZ / 10);
-}
-
-static void start_virt_cntrs(void)
-{
- timer_setup(&timer_virt_cntr, cell_virtual_cntr, 0);
- timer_virt_cntr.expires = jiffies + HZ / 10;
- add_timer(&timer_virt_cntr);
-}
-
-static int cell_reg_setup_spu_cycles(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- spu_cycle_reset = ctr[0].count;
-
- /*
- * Each node will need to make the rtas call to start
- * and stop SPU profiling. Get the token once and store it.
- */
- spu_rtas_token = rtas_token("ibm,cbe-spu-perftools");
-
- if (unlikely(spu_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-spu-perftools unknown\n",
- __func__);
- return -EIO;
- }
- return 0;
-}
-
-/* Unfortunately, the hardware will only support event profiling
- * on one SPU per node at a time. Therefore, we must time slice
- * the profiling across all SPUs in the node. Note, we do this
- * in parallel for each node. The following routine is called
- * periodically based on kernel timer to switch which SPU is
- * being monitored in a round robbin fashion.
- */
-static void spu_evnt_swap(struct timer_list *unused)
-{
- int node;
- int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
- unsigned long flags;
- int cpu;
- int ret;
- u32 interrupt_mask;
-
-
- /* enable interrupts on cntr 0 */
- interrupt_mask = CBE_PM_CTR_OVERFLOW_INTR(0);
-
- hdw_thread = 0;
-
- /* Make sure spu event interrupt handler and spu event swap
- * don't access the counters simultaneously.
- */
- spin_lock_irqsave(&cntr_lock, flags);
-
- cur_spu_evnt_phys_spu_indx = spu_evnt_phys_spu_indx;
-
- if (++(spu_evnt_phys_spu_indx) == NUM_SPUS_PER_NODE)
- spu_evnt_phys_spu_indx = 0;
-
- pm_signal[0].sub_unit = spu_evnt_phys_spu_indx;
- pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
- pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
-
- /* switch the SPU being profiled on each node */
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- node = cbe_cpu_to_node(cpu);
- cur_phys_spu = (node * NUM_SPUS_PER_NODE)
- + cur_spu_evnt_phys_spu_indx;
- nxt_phys_spu = (node * NUM_SPUS_PER_NODE)
- + spu_evnt_phys_spu_indx;
-
- /*
- * stop counters, save counter values, restore counts
- * for previous physical SPU
- */
- cbe_disable_pm(cpu);
- cbe_disable_pm_interrupts(cpu);
-
- spu_pm_cnt[cur_phys_spu]
- = cbe_read_ctr(cpu, 0);
-
- /* restore previous count for the next spu to sample */
- /* NOTE, hardware issue, counter will not start if the
- * counter value is at max (0xFFFFFFFF).
- */
- if (spu_pm_cnt[nxt_phys_spu] >= 0xFFFFFFFF)
- cbe_write_ctr(cpu, 0, 0xFFFFFFF0);
- else
- cbe_write_ctr(cpu, 0, spu_pm_cnt[nxt_phys_spu]);
-
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-
- /* setup the debug bus measure the one event and
- * the two events to route the next SPU's PC on
- * the debug bus
- */
- ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu), 3);
- if (ret)
- printk(KERN_ERR "%s: pm_rtas_activate_signals failed, "
- "SPU event swap\n", __func__);
-
- /* clear the trace buffer, don't want to take PC for
- * previous SPU*/
- cbe_write_pm(cpu, trace_address, 0);
-
- enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
-
- /* Enable interrupts on the CPU thread that is starting */
- cbe_enable_pm_interrupts(cpu, hdw_thread,
- interrupt_mask);
- cbe_enable_pm(cpu);
- }
-
- spin_unlock_irqrestore(&cntr_lock, flags);
-
- /* swap approximately every 0.1 seconds */
- mod_timer(&timer_spu_event_swap, jiffies + HZ / 25);
-}
-
-static void start_spu_event_swap(void)
-{
- timer_setup(&timer_spu_event_swap, spu_evnt_swap, 0);
- timer_spu_event_swap.expires = jiffies + HZ / 25;
- add_timer(&timer_spu_event_swap);
-}
-
-static int cell_reg_setup_spu_events(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- int i;
-
- /* routine is called once for all nodes */
-
- spu_evnt_phys_spu_indx = 0;
- /*
- * For all events except PPU CYCLEs, each node will need to make
- * the rtas cbe-perftools call to setup and reset the debug bus.
- * Make the token lookup call once and store it in the global
- * variable pm_rtas_token.
- */
- pm_rtas_token = rtas_token("ibm,cbe-perftools");
-
- if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-perftools unknown\n",
- __func__);
- return -EIO;
- }
-
- /* setup the pm_control register settings,
- * settings will be written per node by the
- * cell_cpu_setup() function.
- */
- pm_regs.pm_cntrl.trace_buf_ovflw = 1;
-
- /* Use the occurrence trace mode to have SPU PC saved
- * to the trace buffer. Occurrence data in trace buffer
- * is not used. Bit 2 must be set to store SPU addresses.
- */
- pm_regs.pm_cntrl.trace_mode = 2;
-
- pm_regs.pm_cntrl.spu_addr_trace = 0x1; /* using debug bus
- event 2 & 3 */
-
- /* setup the debug bus event array with the SPU PC routing events.
- * Note, pm_signal[0] will be filled in by set_pm_event() call below.
- */
- pm_signal[1].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
- pm_signal[1].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_A);
- pm_signal[1].bit = SPU_PROFILE_EVENT_ADDR % 100;
- pm_signal[1].sub_unit = spu_evnt_phys_spu_indx;
-
- pm_signal[2].signal_group = SPU_PROFILE_EVENT_ADDR / 100;
- pm_signal[2].bus_word = GET_BUS_WORD(SPU_PROFILE_EVENT_ADDR_MASK_B);
- pm_signal[2].bit = SPU_PROFILE_EVENT_ADDR % 100;
- pm_signal[2].sub_unit = spu_evnt_phys_spu_indx;
-
- /* Set the user selected spu event to profile on,
- * note, only one SPU profiling event is supported
- */
- num_counters = 1; /* Only support one SPU event at a time */
- set_pm_event(0, ctr[0].event, ctr[0].unit_mask);
-
- reset_value[0] = 0xFFFFFFFF - ctr[0].count;
-
- /* global, used by cell_cpu_setup */
- ctr_enabled |= 1;
-
- /* Initialize the count for each SPU to the reset value */
- for (i=0; i < MAX_NUMNODES * NUM_SPUS_PER_NODE; i++)
- spu_pm_cnt[i] = reset_value[0];
-
- return 0;
-}
-
-static int cell_reg_setup_ppu(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- /* routine is called once for all nodes */
- int i, j, cpu;
-
- num_counters = num_ctrs;
-
- if (unlikely(num_ctrs > NR_PHYS_CTRS)) {
- printk(KERN_ERR
- "%s: Oprofile, number of specified events " \
- "exceeds number of physical counters\n",
- __func__);
- return -EIO;
- }
-
- set_count_mode(sys->enable_kernel, sys->enable_user);
-
- /* Setup the thread 0 events */
- for (i = 0; i < num_ctrs; ++i) {
-
- pmc_cntrl[0][i].evnts = ctr[i].event;
- pmc_cntrl[0][i].masks = ctr[i].unit_mask;
- pmc_cntrl[0][i].enabled = ctr[i].enabled;
- pmc_cntrl[0][i].vcntr = i;
-
- for_each_possible_cpu(j)
- per_cpu(pmc_values, j)[i] = 0;
- }
-
- /*
- * Setup the thread 1 events, map the thread 0 event to the
- * equivalent thread 1 event.
- */
- for (i = 0; i < num_ctrs; ++i) {
- if ((ctr[i].event >= 2100) && (ctr[i].event <= 2111))
- pmc_cntrl[1][i].evnts = ctr[i].event + 19;
- else if (ctr[i].event == 2203)
- pmc_cntrl[1][i].evnts = ctr[i].event;
- else if ((ctr[i].event >= 2200) && (ctr[i].event <= 2215))
- pmc_cntrl[1][i].evnts = ctr[i].event + 16;
- else
- pmc_cntrl[1][i].evnts = ctr[i].event;
-
- pmc_cntrl[1][i].masks = ctr[i].unit_mask;
- pmc_cntrl[1][i].enabled = ctr[i].enabled;
- pmc_cntrl[1][i].vcntr = i;
- }
-
- for (i = 0; i < NUM_INPUT_BUS_WORDS; i++)
- input_bus[i] = 0xff;
-
- /*
- * Our counters count up, and "count" refers to
- * how much before the next interrupt, and we interrupt
- * on overflow. So we calculate the starting value
- * which will give us "count" until overflow.
- * Then we set the events on the enabled counters.
- */
- for (i = 0; i < num_counters; ++i) {
- /* start with virtual counter set 0 */
- if (pmc_cntrl[0][i].enabled) {
- /* Using 32bit counters, reset max - count */
- reset_value[i] = 0xFFFFFFFF - ctr[i].count;
- set_pm_event(i,
- pmc_cntrl[0][i].evnts,
- pmc_cntrl[0][i].masks);
-
- /* global, used by cell_cpu_setup */
- ctr_enabled |= (1 << i);
- }
- }
-
- /* initialize the previous counts for the virtual cntrs */
- for_each_online_cpu(cpu)
- for (i = 0; i < num_counters; ++i) {
- per_cpu(pmc_values, cpu)[i] = reset_value[i];
- }
-
- return 0;
-}
-
-
-/* This function is called once for all cpus combined */
-static int cell_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys, int num_ctrs)
-{
- int ret=0;
- spu_cycle_reset = 0;
-
- /* initialize the spu_arr_trace value, will be reset if
- * doing spu event profiling.
- */
- pm_regs.group_control = 0;
- pm_regs.debug_bus_control = 0;
- pm_regs.pm_cntrl.stop_at_max = 1;
- pm_regs.pm_cntrl.trace_mode = 0;
- pm_regs.pm_cntrl.freeze = 1;
- pm_regs.pm_cntrl.trace_buf_ovflw = 0;
- pm_regs.pm_cntrl.spu_addr_trace = 0;
-
- /*
- * For all events except PPU CYCLEs, each node will need to make
- * the rtas cbe-perftools call to setup and reset the debug bus.
- * Make the token lookup call once and store it in the global
- * variable pm_rtas_token.
- */
- pm_rtas_token = rtas_token("ibm,cbe-perftools");
-
- if (unlikely(pm_rtas_token == RTAS_UNKNOWN_SERVICE)) {
- printk(KERN_ERR
- "%s: rtas token ibm,cbe-perftools unknown\n",
- __func__);
- return -EIO;
- }
-
- if (ctr[0].event == SPU_CYCLES_EVENT_NUM) {
- profiling_mode = SPU_PROFILING_CYCLES;
- ret = cell_reg_setup_spu_cycles(ctr, sys, num_ctrs);
- } else if ((ctr[0].event >= SPU_EVENT_NUM_START) &&
- (ctr[0].event <= SPU_EVENT_NUM_STOP)) {
- profiling_mode = SPU_PROFILING_EVENTS;
- spu_cycle_reset = ctr[0].count;
-
- /* for SPU event profiling, need to setup the
- * pm_signal array with the events to route the
- * SPU PC before making the FW call. Note, only
- * one SPU event for profiling can be specified
- * at a time.
- */
- cell_reg_setup_spu_events(ctr, sys, num_ctrs);
- } else {
- profiling_mode = PPU_PROFILING;
- ret = cell_reg_setup_ppu(ctr, sys, num_ctrs);
- }
-
- return ret;
-}
-
-
-
-/* This function is called once for each cpu */
-static int cell_cpu_setup(struct op_counter_config *cntr)
-{
- u32 cpu = smp_processor_id();
- u32 num_enabled = 0;
- int i;
- int ret;
-
- /* Cycle based SPU profiling does not use the performance
- * counters. The trace array is configured to collect
- * the data.
- */
- if (profiling_mode == SPU_PROFILING_CYCLES)
- return 0;
-
- /* There is one performance monitor per processor chip (i.e. node),
- * so we only need to perform this function once per node.
- */
- if (cbe_get_hw_thread_id(cpu))
- return 0;
-
- /* Stop all counters */
- cbe_disable_pm(cpu);
- cbe_disable_pm_interrupts(cpu);
-
- cbe_write_pm(cpu, pm_start_stop, 0);
- cbe_write_pm(cpu, group_control, pm_regs.group_control);
- cbe_write_pm(cpu, debug_bus_control, pm_regs.debug_bus_control);
- write_pm_cntrl(cpu);
-
- for (i = 0; i < num_counters; ++i) {
- if (ctr_enabled & (1 << i)) {
- pm_signal[num_enabled].cpu = cbe_cpu_to_node(cpu);
- num_enabled++;
- }
- }
-
- /*
- * The pm_rtas_activate_signals will return -EIO if the FW
- * call failed.
- */
- if (profiling_mode == SPU_PROFILING_EVENTS) {
- /* For SPU event profiling also need to setup the
- * pm interval timer
- */
- ret = pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
- num_enabled+2);
- /* store PC from debug bus to Trace buffer as often
- * as possible (every 10 cycles)
- */
- cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
- return ret;
- } else
- return pm_rtas_activate_signals(cbe_cpu_to_node(cpu),
- num_enabled);
-}
-
-#define ENTRIES 303
-#define MAXLFSR 0xFFFFFF
-
-/* precomputed table of 24 bit LFSR values */
-static int initial_lfsr[] = {
- 8221349, 12579195, 5379618, 10097839, 7512963, 7519310, 3955098, 10753424,
- 15507573, 7458917, 285419, 2641121, 9780088, 3915503, 6668768, 1548716,
- 4885000, 8774424, 9650099, 2044357, 2304411, 9326253, 10332526, 4421547,
- 3440748, 10179459, 13332843, 10375561, 1313462, 8375100, 5198480, 6071392,
- 9341783, 1526887, 3985002, 1439429, 13923762, 7010104, 11969769, 4547026,
- 2040072, 4025602, 3437678, 7939992, 11444177, 4496094, 9803157, 10745556,
- 3671780, 4257846, 5662259, 13196905, 3237343, 12077182, 16222879, 7587769,
- 14706824, 2184640, 12591135, 10420257, 7406075, 3648978, 11042541, 15906893,
- 11914928, 4732944, 10695697, 12928164, 11980531, 4430912, 11939291, 2917017,
- 6119256, 4172004, 9373765, 8410071, 14788383, 5047459, 5474428, 1737756,
- 15967514, 13351758, 6691285, 8034329, 2856544, 14394753, 11310160, 12149558,
- 7487528, 7542781, 15668898, 12525138, 12790975, 3707933, 9106617, 1965401,
- 16219109, 12801644, 2443203, 4909502, 8762329, 3120803, 6360315, 9309720,
- 15164599, 10844842, 4456529, 6667610, 14924259, 884312, 6234963, 3326042,
- 15973422, 13919464, 5272099, 6414643, 3909029, 2764324, 5237926, 4774955,
- 10445906, 4955302, 5203726, 10798229, 11443419, 2303395, 333836, 9646934,
- 3464726, 4159182, 568492, 995747, 10318756, 13299332, 4836017, 8237783,
- 3878992, 2581665, 11394667, 5672745, 14412947, 3159169, 9094251, 16467278,
- 8671392, 15230076, 4843545, 7009238, 15504095, 1494895, 9627886, 14485051,
- 8304291, 252817, 12421642, 16085736, 4774072, 2456177, 4160695, 15409741,
- 4902868, 5793091, 13162925, 16039714, 782255, 11347835, 14884586, 366972,
- 16308990, 11913488, 13390465, 2958444, 10340278, 1177858, 1319431, 10426302,
- 2868597, 126119, 5784857, 5245324, 10903900, 16436004, 3389013, 1742384,
- 14674502, 10279218, 8536112, 10364279, 6877778, 14051163, 1025130, 6072469,
- 1988305, 8354440, 8216060, 16342977, 13112639, 3976679, 5913576, 8816697,
- 6879995, 14043764, 3339515, 9364420, 15808858, 12261651, 2141560, 5636398,
- 10345425, 10414756, 781725, 6155650, 4746914, 5078683, 7469001, 6799140,
- 10156444, 9667150, 10116470, 4133858, 2121972, 1124204, 1003577, 1611214,
- 14304602, 16221850, 13878465, 13577744, 3629235, 8772583, 10881308, 2410386,
- 7300044, 5378855, 9301235, 12755149, 4977682, 8083074, 10327581, 6395087,
- 9155434, 15501696, 7514362, 14520507, 15808945, 3244584, 4741962, 9658130,
- 14336147, 8654727, 7969093, 15759799, 14029445, 5038459, 9894848, 8659300,
- 13699287, 8834306, 10712885, 14753895, 10410465, 3373251, 309501, 9561475,
- 5526688, 14647426, 14209836, 5339224, 207299, 14069911, 8722990, 2290950,
- 3258216, 12505185, 6007317, 9218111, 14661019, 10537428, 11731949, 9027003,
- 6641507, 9490160, 200241, 9720425, 16277895, 10816638, 1554761, 10431375,
- 7467528, 6790302, 3429078, 14633753, 14428997, 11463204, 3576212, 2003426,
- 6123687, 820520, 9992513, 15784513, 5778891, 6428165, 8388607
-};
-
-/*
- * The hardware uses an LFSR counting sequence to determine when to capture
- * the SPU PCs. An LFSR sequence is like a puesdo random number sequence
- * where each number occurs once in the sequence but the sequence is not in
- * numerical order. The SPU PC capture is done when the LFSR sequence reaches
- * the last value in the sequence. Hence the user specified value N
- * corresponds to the LFSR number that is N from the end of the sequence.
- *
- * To avoid the time to compute the LFSR, a lookup table is used. The 24 bit
- * LFSR sequence is broken into four ranges. The spacing of the precomputed
- * values is adjusted in each range so the error between the user specified
- * number (N) of events between samples and the actual number of events based
- * on the precomputed value will be les then about 6.2%. Note, if the user
- * specifies N < 2^16, the LFSR value that is 2^16 from the end will be used.
- * This is to prevent the loss of samples because the trace buffer is full.
- *
- * User specified N Step between Index in
- * precomputed values precomputed
- * table
- * 0 to 2^16-1 ---- 0
- * 2^16 to 2^16+2^19-1 2^12 1 to 128
- * 2^16+2^19 to 2^16+2^19+2^22-1 2^15 129 to 256
- * 2^16+2^19+2^22 to 2^24-1 2^18 257 to 302
- *
- *
- * For example, the LFSR values in the second range are computed for 2^16,
- * 2^16+2^12, ... , 2^19-2^16, 2^19 and stored in the table at indicies
- * 1, 2,..., 127, 128.
- *
- * The 24 bit LFSR value for the nth number in the sequence can be
- * calculated using the following code:
- *
- * #define size 24
- * int calculate_lfsr(int n)
- * {
- * int i;
- * unsigned int newlfsr0;
- * unsigned int lfsr = 0xFFFFFF;
- * unsigned int howmany = n;
- *
- * for (i = 2; i < howmany + 2; i++) {
- * newlfsr0 = (((lfsr >> (size - 1 - 0)) & 1) ^
- * ((lfsr >> (size - 1 - 1)) & 1) ^
- * (((lfsr >> (size - 1 - 6)) & 1) ^
- * ((lfsr >> (size - 1 - 23)) & 1)));
- *
- * lfsr >>= 1;
- * lfsr = lfsr | (newlfsr0 << (size - 1));
- * }
- * return lfsr;
- * }
- */
-
-#define V2_16 (0x1 << 16)
-#define V2_19 (0x1 << 19)
-#define V2_22 (0x1 << 22)
-
-static int calculate_lfsr(int n)
-{
- /*
- * The ranges and steps are in powers of 2 so the calculations
- * can be done using shifts rather then divide.
- */
- int index;
-
- if ((n >> 16) == 0)
- index = 0;
- else if (((n - V2_16) >> 19) == 0)
- index = ((n - V2_16) >> 12) + 1;
- else if (((n - V2_16 - V2_19) >> 22) == 0)
- index = ((n - V2_16 - V2_19) >> 15 ) + 1 + 128;
- else if (((n - V2_16 - V2_19 - V2_22) >> 24) == 0)
- index = ((n - V2_16 - V2_19 - V2_22) >> 18 ) + 1 + 256;
- else
- index = ENTRIES-1;
-
- /* make sure index is valid */
- if ((index >= ENTRIES) || (index < 0))
- index = ENTRIES-1;
-
- return initial_lfsr[index];
-}
-
-static int pm_rtas_activate_spu_profiling(u32 node)
-{
- int ret, i;
- struct pm_signal pm_signal_local[NUM_SPUS_PER_NODE];
-
- /*
- * Set up the rtas call to configure the debug bus to
- * route the SPU PCs. Setup the pm_signal for each SPU
- */
- for (i = 0; i < ARRAY_SIZE(pm_signal_local); i++) {
- pm_signal_local[i].cpu = node;
- pm_signal_local[i].signal_group = 41;
- /* spu i on word (i/2) */
- pm_signal_local[i].bus_word = 1 << i / 2;
- /* spu i */
- pm_signal_local[i].sub_unit = i;
- pm_signal_local[i].bit = 63;
- }
-
- ret = rtas_ibm_cbe_perftools(SUBFUNC_ACTIVATE,
- PASSTHRU_ENABLE, pm_signal_local,
- (ARRAY_SIZE(pm_signal_local)
- * sizeof(struct pm_signal)));
-
- if (unlikely(ret)) {
- printk(KERN_WARNING "%s: rtas returned: %d\n",
- __func__, ret);
- return -EIO;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_CPU_FREQ
-static int
-oprof_cpufreq_notify(struct notifier_block *nb, unsigned long val, void *data)
-{
- int ret = 0;
- struct cpufreq_freqs *frq = data;
- if ((val == CPUFREQ_PRECHANGE && frq->old < frq->new) ||
- (val == CPUFREQ_POSTCHANGE && frq->old > frq->new))
- set_spu_profiling_frequency(frq->new, spu_cycle_reset);
- return ret;
-}
-
-static struct notifier_block cpu_freq_notifier_block = {
- .notifier_call = oprof_cpufreq_notify
-};
-#endif
-
-/*
- * Note the generic OProfile stop calls do not support returning
- * an error on stop. Hence, will not return an error if the FW
- * calls fail on stop. Failure to reset the debug bus is not an issue.
- * Failure to disable the SPU profiling is not an issue. The FW calls
- * to enable the performance counters and debug bus will work even if
- * the hardware was not cleanly reset.
- */
-static void cell_global_stop_spu_cycles(void)
-{
- int subfunc, rtn_value;
- unsigned int lfsr_value;
- int cpu;
-
- oprofile_running = 0;
- smp_wmb();
-
-#ifdef CONFIG_CPU_FREQ
- cpufreq_unregister_notifier(&cpu_freq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-#endif
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- subfunc = 3; /*
- * 2 - activate SPU tracing,
- * 3 - deactivate
- */
- lfsr_value = 0x8f100000;
-
- rtn_value = rtas_call(spu_rtas_token, 3, 1, NULL,
- subfunc, cbe_cpu_to_node(cpu),
- lfsr_value);
-
- if (unlikely(rtn_value != 0)) {
- printk(KERN_ERR
- "%s: rtas call ibm,cbe-spu-perftools " \
- "failed, return = %d\n",
- __func__, rtn_value);
- }
-
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
- }
-
- stop_spu_profiling_cycles();
-}
-
-static void cell_global_stop_spu_events(void)
-{
- int cpu;
- oprofile_running = 0;
-
- stop_spu_profiling_events();
- smp_wmb();
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- cbe_sync_irq(cbe_cpu_to_node(cpu));
- /* Stop the counters */
- cbe_disable_pm(cpu);
- cbe_write_pm07_control(cpu, 0, 0);
-
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-
- /* Deactivate interrupts */
- cbe_disable_pm_interrupts(cpu);
- }
- del_timer_sync(&timer_spu_event_swap);
-}
-
-static void cell_global_stop_ppu(void)
-{
- int cpu;
-
- /*
- * This routine will be called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
- del_timer_sync(&timer_virt_cntr);
- oprofile_running = 0;
- smp_wmb();
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- cbe_sync_irq(cbe_cpu_to_node(cpu));
- /* Stop the counters */
- cbe_disable_pm(cpu);
-
- /* Deactivate the signals */
- pm_rtas_reset_signals(cbe_cpu_to_node(cpu));
-
- /* Deactivate interrupts */
- cbe_disable_pm_interrupts(cpu);
- }
-}
-
-static void cell_global_stop(void)
-{
- if (profiling_mode == PPU_PROFILING)
- cell_global_stop_ppu();
- else if (profiling_mode == SPU_PROFILING_EVENTS)
- cell_global_stop_spu_events();
- else
- cell_global_stop_spu_cycles();
-}
-
-static int cell_global_start_spu_cycles(struct op_counter_config *ctr)
-{
- int subfunc;
- unsigned int lfsr_value;
- int cpu;
- int ret;
- int rtas_error;
- unsigned int cpu_khzfreq = 0;
-
- /* The SPU profiling uses time-based profiling based on
- * cpu frequency, so if configured with the CPU_FREQ
- * option, we should detect frequency changes and react
- * accordingly.
- */
-#ifdef CONFIG_CPU_FREQ
- ret = cpufreq_register_notifier(&cpu_freq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- if (ret < 0)
- /* this is not a fatal error */
- printk(KERN_ERR "CPU freq change registration failed: %d\n",
- ret);
-
- else
- cpu_khzfreq = cpufreq_quick_get(smp_processor_id());
-#endif
-
- set_spu_profiling_frequency(cpu_khzfreq, spu_cycle_reset);
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- /*
- * Setup SPU cycle-based profiling.
- * Set perf_mon_control bit 0 to a zero before
- * enabling spu collection hardware.
- */
- cbe_write_pm(cpu, pm_control, 0);
-
- if (spu_cycle_reset > MAX_SPU_COUNT)
- /* use largest possible value */
- lfsr_value = calculate_lfsr(MAX_SPU_COUNT-1);
- else
- lfsr_value = calculate_lfsr(spu_cycle_reset);
-
- /* must use a non zero value. Zero disables data collection. */
- if (lfsr_value == 0)
- lfsr_value = calculate_lfsr(1);
-
- lfsr_value = lfsr_value << 8; /* shift lfsr to correct
- * register location
- */
-
- /* debug bus setup */
- ret = pm_rtas_activate_spu_profiling(cbe_cpu_to_node(cpu));
-
- if (unlikely(ret)) {
- rtas_error = ret;
- goto out;
- }
-
-
- subfunc = 2; /* 2 - activate SPU tracing, 3 - deactivate */
-
- /* start profiling */
- ret = rtas_call(spu_rtas_token, 3, 1, NULL, subfunc,
- cbe_cpu_to_node(cpu), lfsr_value);
-
- if (unlikely(ret != 0)) {
- printk(KERN_ERR
- "%s: rtas call ibm,cbe-spu-perftools failed, " \
- "return = %d\n", __func__, ret);
- rtas_error = -EIO;
- goto out;
- }
- }
-
- rtas_error = start_spu_profiling_cycles(spu_cycle_reset);
- if (rtas_error)
- goto out_stop;
-
- oprofile_running = 1;
- return 0;
-
-out_stop:
- cell_global_stop_spu_cycles(); /* clean up the PMU/debug bus */
-out:
- return rtas_error;
-}
-
-static int cell_global_start_spu_events(struct op_counter_config *ctr)
-{
- int cpu;
- u32 interrupt_mask = 0;
- int rtn = 0;
-
- hdw_thread = 0;
-
- /* spu event profiling, uses the performance counters to generate
- * an interrupt. The hardware is setup to store the SPU program
- * counter into the trace array. The occurrence mode is used to
- * enable storing data to the trace buffer. The bits are set
- * to send/store the SPU address in the trace buffer. The debug
- * bus must be setup to route the SPU program counter onto the
- * debug bus. The occurrence data in the trace buffer is not used.
- */
-
- /* This routine gets called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
-
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- /*
- * Setup SPU event-based profiling.
- * Set perf_mon_control bit 0 to a zero before
- * enabling spu collection hardware.
- *
- * Only support one SPU event on one SPU per node.
- */
- if (ctr_enabled & 1) {
- cbe_write_ctr(cpu, 0, reset_value[0]);
- enable_ctr(cpu, 0, pm_regs.pm07_cntrl);
- interrupt_mask |=
- CBE_PM_CTR_OVERFLOW_INTR(0);
- } else {
- /* Disable counter */
- cbe_write_pm07_control(cpu, 0, 0);
- }
-
- cbe_get_and_clear_pm_interrupts(cpu);
- cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
- cbe_enable_pm(cpu);
-
- /* clear the trace buffer */
- cbe_write_pm(cpu, trace_address, 0);
- }
-
- /* Start the timer to time slice collecting the event profile
- * on each of the SPUs. Note, can collect profile on one SPU
- * per node at a time.
- */
- start_spu_event_swap();
- start_spu_profiling_events();
- oprofile_running = 1;
- smp_wmb();
-
- return rtn;
-}
-
-static int cell_global_start_ppu(struct op_counter_config *ctr)
-{
- u32 cpu, i;
- u32 interrupt_mask = 0;
-
- /* This routine gets called once for the system.
- * There is one performance monitor per node, so we
- * only need to perform this function once per node.
- */
- for_each_online_cpu(cpu) {
- if (cbe_get_hw_thread_id(cpu))
- continue;
-
- interrupt_mask = 0;
-
- for (i = 0; i < num_counters; ++i) {
- if (ctr_enabled & (1 << i)) {
- cbe_write_ctr(cpu, i, reset_value[i]);
- enable_ctr(cpu, i, pm_regs.pm07_cntrl);
- interrupt_mask |= CBE_PM_CTR_OVERFLOW_INTR(i);
- } else {
- /* Disable counter */
- cbe_write_pm07_control(cpu, i, 0);
- }
- }
-
- cbe_get_and_clear_pm_interrupts(cpu);
- cbe_enable_pm_interrupts(cpu, hdw_thread, interrupt_mask);
- cbe_enable_pm(cpu);
- }
-
- virt_cntr_inter_mask = interrupt_mask;
- oprofile_running = 1;
- smp_wmb();
-
- /*
- * NOTE: start_virt_cntrs will result in cell_virtual_cntr() being
- * executed which manipulates the PMU. We start the "virtual counter"
- * here so that we do not need to synchronize access to the PMU in
- * the above for-loop.
- */
- start_virt_cntrs();
-
- return 0;
-}
-
-static int cell_global_start(struct op_counter_config *ctr)
-{
- if (profiling_mode == SPU_PROFILING_CYCLES)
- return cell_global_start_spu_cycles(ctr);
- else if (profiling_mode == SPU_PROFILING_EVENTS)
- return cell_global_start_spu_events(ctr);
- else
- return cell_global_start_ppu(ctr);
-}
-
-
-/* The SPU interrupt handler
- *
- * SPU event profiling works as follows:
- * The pm_signal[0] holds the one SPU event to be measured. It is routed on
- * the debug bus using word 0 or 1. The value of pm_signal[1] and
- * pm_signal[2] contain the necessary events to route the SPU program
- * counter for the selected SPU onto the debug bus using words 2 and 3.
- * The pm_interval register is setup to write the SPU PC value into the
- * trace buffer at the maximum rate possible. The trace buffer is configured
- * to store the PCs, wrapping when it is full. The performance counter is
- * initialized to the max hardware count minus the number of events, N, between
- * samples. Once the N events have occurred, a HW counter overflow occurs
- * causing the generation of a HW counter interrupt which also stops the
- * writing of the SPU PC values to the trace buffer. Hence the last PC
- * written to the trace buffer is the SPU PC that we want. Unfortunately,
- * we have to read from the beginning of the trace buffer to get to the
- * last value written. We just hope the PPU has nothing better to do then
- * service this interrupt. The PC for the specific SPU being profiled is
- * extracted from the trace buffer processed and stored. The trace buffer
- * is cleared, interrupts are cleared, the counter is reset to max - N.
- * A kernel timer is used to periodically call the routine spu_evnt_swap()
- * to switch to the next physical SPU in the node to profile in round robbin
- * order. This way data is collected for all SPUs on the node. It does mean
- * that we need to use a relatively small value of N to ensure enough samples
- * on each SPU are collected each SPU is being profiled 1/8 of the time.
- * It may also be necessary to use a longer sample collection period.
- */
-static void cell_handle_interrupt_spu(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- u32 cpu, cpu_tmp;
- u64 trace_entry;
- u32 interrupt_mask;
- u64 trace_buffer[2];
- u64 last_trace_buffer;
- u32 sample;
- u32 trace_addr;
- unsigned long sample_array_lock_flags;
- int spu_num;
- unsigned long flags;
-
- /* Make sure spu event interrupt handler and spu event swap
- * don't access the counters simultaneously.
- */
- cpu = smp_processor_id();
- spin_lock_irqsave(&cntr_lock, flags);
-
- cpu_tmp = cpu;
- cbe_disable_pm(cpu);
-
- interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
-
- sample = 0xABCDEF;
- trace_entry = 0xfedcba;
- last_trace_buffer = 0xdeadbeaf;
-
- if ((oprofile_running == 1) && (interrupt_mask != 0)) {
- /* disable writes to trace buff */
- cbe_write_pm(cpu, pm_interval, 0);
-
- /* only have one perf cntr being used, cntr 0 */
- if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(0))
- && ctr[0].enabled)
- /* The SPU PC values will be read
- * from the trace buffer, reset counter
- */
-
- cbe_write_ctr(cpu, 0, reset_value[0]);
-
- trace_addr = cbe_read_pm(cpu, trace_address);
-
- while (!(trace_addr & CBE_PM_TRACE_BUF_EMPTY)) {
- /* There is data in the trace buffer to process
- * Read the buffer until you get to the last
- * entry. This is the value we want.
- */
-
- cbe_read_trace_buffer(cpu, trace_buffer);
- trace_addr = cbe_read_pm(cpu, trace_address);
- }
-
- /* SPU Address 16 bit count format for 128 bit
- * HW trace buffer is used for the SPU PC storage
- * HDR bits 0:15
- * SPU Addr 0 bits 16:31
- * SPU Addr 1 bits 32:47
- * unused bits 48:127
- *
- * HDR: bit4 = 1 SPU Address 0 valid
- * HDR: bit5 = 1 SPU Address 1 valid
- * - unfortunately, the valid bits don't seem to work
- *
- * Note trace_buffer[0] holds bits 0:63 of the HW
- * trace buffer, trace_buffer[1] holds bits 64:127
- */
-
- trace_entry = trace_buffer[0]
- & 0x00000000FFFF0000;
-
- /* only top 16 of the 18 bit SPU PC address
- * is stored in trace buffer, hence shift right
- * by 16 -2 bits */
- sample = trace_entry >> 14;
- last_trace_buffer = trace_buffer[0];
-
- spu_num = spu_evnt_phys_spu_indx
- + (cbe_cpu_to_node(cpu) * NUM_SPUS_PER_NODE);
-
- /* make sure only one process at a time is calling
- * spu_sync_buffer()
- */
- spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
- sample_array_lock_flags);
- spu_sync_buffer(spu_num, &sample, 1);
- spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
- sample_array_lock_flags);
-
- smp_wmb(); /* insure spu event buffer updates are written
- * don't want events intermingled... */
-
- /* The counters were frozen by the interrupt.
- * Reenable the interrupt and restart the counters.
- */
- cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
- cbe_enable_pm_interrupts(cpu, hdw_thread,
- virt_cntr_inter_mask);
-
- /* clear the trace buffer, re-enable writes to trace buff */
- cbe_write_pm(cpu, trace_address, 0);
- cbe_write_pm(cpu, pm_interval, NUM_INTERVAL_CYC);
-
- /* The writes to the various performance counters only writes
- * to a latch. The new values (interrupt setting bits, reset
- * counter value etc.) are not copied to the actual registers
- * until the performance monitor is enabled. In order to get
- * this to work as desired, the performance monitor needs to
- * be disabled while writing to the latches. This is a
- * HW design issue.
- */
- write_pm_cntrl(cpu);
- cbe_enable_pm(cpu);
- }
- spin_unlock_irqrestore(&cntr_lock, flags);
-}
-
-static void cell_handle_interrupt_ppu(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- u32 cpu;
- u64 pc;
- int is_kernel;
- unsigned long flags = 0;
- u32 interrupt_mask;
- int i;
-
- cpu = smp_processor_id();
-
- /*
- * Need to make sure the interrupt handler and the virt counter
- * routine are not running at the same time. See the
- * cell_virtual_cntr() routine for additional comments.
- */
- spin_lock_irqsave(&cntr_lock, flags);
-
- /*
- * Need to disable and reenable the performance counters
- * to get the desired behavior from the hardware. This
- * is hardware specific.
- */
-
- cbe_disable_pm(cpu);
-
- interrupt_mask = cbe_get_and_clear_pm_interrupts(cpu);
-
- /*
- * If the interrupt mask has been cleared, then the virt cntr
- * has cleared the interrupt. When the thread that generated
- * the interrupt is restored, the data count will be restored to
- * 0xffffff0 to cause the interrupt to be regenerated.
- */
-
- if ((oprofile_running == 1) && (interrupt_mask != 0)) {
- pc = regs->nip;
- is_kernel = is_kernel_addr(pc);
-
- for (i = 0; i < num_counters; ++i) {
- if ((interrupt_mask & CBE_PM_CTR_OVERFLOW_INTR(i))
- && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- cbe_write_ctr(cpu, i, reset_value[i]);
- }
- }
-
- /*
- * The counters were frozen by the interrupt.
- * Reenable the interrupt and restart the counters.
- * If there was a race between the interrupt handler and
- * the virtual counter routine. The virtual counter
- * routine may have cleared the interrupts. Hence must
- * use the virt_cntr_inter_mask to re-enable the interrupts.
- */
- cbe_enable_pm_interrupts(cpu, hdw_thread,
- virt_cntr_inter_mask);
-
- /*
- * The writes to the various performance counters only writes
- * to a latch. The new values (interrupt setting bits, reset
- * counter value etc.) are not copied to the actual registers
- * until the performance monitor is enabled. In order to get
- * this to work as desired, the performance monitor needs to
- * be disabled while writing to the latches. This is a
- * HW design issue.
- */
- cbe_enable_pm(cpu);
- }
- spin_unlock_irqrestore(&cntr_lock, flags);
-}
-
-static void cell_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- if (profiling_mode == PPU_PROFILING)
- cell_handle_interrupt_ppu(regs, ctr);
- else
- cell_handle_interrupt_spu(regs, ctr);
-}
-
-/*
- * This function is called from the generic OProfile
- * driver. When profiling PPUs, we need to do the
- * generic sync start; otherwise, do spu_sync_start.
- */
-static int cell_sync_start(void)
-{
- if ((profiling_mode == SPU_PROFILING_CYCLES) ||
- (profiling_mode == SPU_PROFILING_EVENTS))
- return spu_sync_start();
- else
- return DO_GENERIC_SYNC;
-}
-
-static int cell_sync_stop(void)
-{
- if ((profiling_mode == SPU_PROFILING_CYCLES) ||
- (profiling_mode == SPU_PROFILING_EVENTS))
- return spu_sync_stop();
- else
- return 1;
-}
-
-struct op_powerpc_model op_model_cell = {
- .reg_setup = cell_reg_setup,
- .cpu_setup = cell_cpu_setup,
- .global_start = cell_global_start,
- .global_stop = cell_global_stop,
- .sync_start = cell_sync_start,
- .sync_stop = cell_sync_stop,
- .handle_interrupt = cell_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c
deleted file mode 100644
index 25dc6813ecee..000000000000
--- a/arch/powerpc/oprofile/op_model_fsl_emb.c
+++ /dev/null
@@ -1,380 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Freescale Embedded oprofile support, based on ppc64 oprofile support
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala <galak@kernel.crashing.org>
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/reg_fsl_emb.h>
-#include <asm/page.h>
-#include <asm/pmc.h>
-#include <asm/oprofile_impl.h>
-
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-static int num_counters;
-static int oprofile_running;
-
-static inline u32 get_pmlca(int ctr)
-{
- u32 pmlca;
-
- switch (ctr) {
- case 0:
- pmlca = mfpmr(PMRN_PMLCA0);
- break;
- case 1:
- pmlca = mfpmr(PMRN_PMLCA1);
- break;
- case 2:
- pmlca = mfpmr(PMRN_PMLCA2);
- break;
- case 3:
- pmlca = mfpmr(PMRN_PMLCA3);
- break;
- case 4:
- pmlca = mfpmr(PMRN_PMLCA4);
- break;
- case 5:
- pmlca = mfpmr(PMRN_PMLCA5);
- break;
- default:
- panic("Bad ctr number\n");
- }
-
- return pmlca;
-}
-
-static inline void set_pmlca(int ctr, u32 pmlca)
-{
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- break;
- case 4:
- mtpmr(PMRN_PMLCA4, pmlca);
- break;
- case 5:
- mtpmr(PMRN_PMLCA5, pmlca);
- break;
- default:
- panic("Bad ctr number\n");
- }
-}
-
-static inline unsigned int ctr_read(unsigned int i)
-{
- switch(i) {
- case 0:
- return mfpmr(PMRN_PMC0);
- case 1:
- return mfpmr(PMRN_PMC1);
- case 2:
- return mfpmr(PMRN_PMC2);
- case 3:
- return mfpmr(PMRN_PMC3);
- case 4:
- return mfpmr(PMRN_PMC4);
- case 5:
- return mfpmr(PMRN_PMC5);
- default:
- return 0;
- }
-}
-
-static inline void ctr_write(unsigned int i, unsigned int val)
-{
- switch(i) {
- case 0:
- mtpmr(PMRN_PMC0, val);
- break;
- case 1:
- mtpmr(PMRN_PMC1, val);
- break;
- case 2:
- mtpmr(PMRN_PMC2, val);
- break;
- case 3:
- mtpmr(PMRN_PMC3, val);
- break;
- case 4:
- mtpmr(PMRN_PMC4, val);
- break;
- case 5:
- mtpmr(PMRN_PMC5, val);
- break;
- default:
- break;
- }
-}
-
-
-static void init_pmc_stop(int ctr)
-{
- u32 pmlca = (PMLCA_FC | PMLCA_FCS | PMLCA_FCU |
- PMLCA_FCM1 | PMLCA_FCM0);
- u32 pmlcb = 0;
-
- switch (ctr) {
- case 0:
- mtpmr(PMRN_PMLCA0, pmlca);
- mtpmr(PMRN_PMLCB0, pmlcb);
- break;
- case 1:
- mtpmr(PMRN_PMLCA1, pmlca);
- mtpmr(PMRN_PMLCB1, pmlcb);
- break;
- case 2:
- mtpmr(PMRN_PMLCA2, pmlca);
- mtpmr(PMRN_PMLCB2, pmlcb);
- break;
- case 3:
- mtpmr(PMRN_PMLCA3, pmlca);
- mtpmr(PMRN_PMLCB3, pmlcb);
- break;
- case 4:
- mtpmr(PMRN_PMLCA4, pmlca);
- mtpmr(PMRN_PMLCB4, pmlcb);
- break;
- case 5:
- mtpmr(PMRN_PMLCA5, pmlca);
- mtpmr(PMRN_PMLCB5, pmlcb);
- break;
- default:
- panic("Bad ctr number!\n");
- }
-}
-
-static void set_pmc_event(int ctr, int event)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- pmlca = (pmlca & ~PMLCA_EVENT_MASK) |
- ((event << PMLCA_EVENT_SHIFT) &
- PMLCA_EVENT_MASK);
-
- set_pmlca(ctr, pmlca);
-}
-
-static void set_pmc_user_kernel(int ctr, int user, int kernel)
-{
- u32 pmlca;
-
- pmlca = get_pmlca(ctr);
-
- if(user)
- pmlca &= ~PMLCA_FCU;
- else
- pmlca |= PMLCA_FCU;
-
- if(kernel)
- pmlca &= ~PMLCA_FCS;
- else
- pmlca |= PMLCA_FCS;
-
- set_pmlca(ctr, pmlca);
-}
-
-static void set_pmc_marked(int ctr, int mark0, int mark1)
-{
- u32 pmlca = get_pmlca(ctr);
-
- if(mark0)
- pmlca &= ~PMLCA_FCM0;
- else
- pmlca |= PMLCA_FCM0;
-
- if(mark1)
- pmlca &= ~PMLCA_FCM1;
- else
- pmlca |= PMLCA_FCM1;
-
- set_pmlca(ctr, pmlca);
-}
-
-static void pmc_start_ctr(int ctr, int enable)
-{
- u32 pmlca = get_pmlca(ctr);
-
- pmlca &= ~PMLCA_FC;
-
- if (enable)
- pmlca |= PMLCA_CE;
- else
- pmlca &= ~PMLCA_CE;
-
- set_pmlca(ctr, pmlca);
-}
-
-static void pmc_start_ctrs(int enable)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 &= ~PMGC0_FAC;
- pmgc0 |= PMGC0_FCECE;
-
- if (enable)
- pmgc0 |= PMGC0_PMIE;
- else
- pmgc0 &= ~PMGC0_PMIE;
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-static void pmc_stop_ctrs(void)
-{
- u32 pmgc0 = mfpmr(PMRN_PMGC0);
-
- pmgc0 |= PMGC0_FAC;
-
- pmgc0 &= ~(PMGC0_PMIE | PMGC0_FCECE);
-
- mtpmr(PMRN_PMGC0, pmgc0);
-}
-
-static int fsl_emb_cpu_setup(struct op_counter_config *ctr)
-{
- int i;
-
- /* freeze all counters */
- pmc_stop_ctrs();
-
- for (i = 0;i < num_counters;i++) {
- init_pmc_stop(i);
-
- set_pmc_event(i, ctr[i].event);
-
- set_pmc_user_kernel(i, ctr[i].user, ctr[i].kernel);
- }
-
- return 0;
-}
-
-static int fsl_emb_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int i;
-
- num_counters = num_ctrs;
-
- /* Our counters count up, and "count" refers to
- * how much before the next interrupt, and we interrupt
- * on overflow. So we calculate the starting value
- * which will give us "count" until overflow.
- * Then we set the events on the enabled counters */
- for (i = 0; i < num_counters; ++i)
- reset_value[i] = 0x80000000UL - ctr[i].count;
-
- return 0;
-}
-
-static int fsl_emb_start(struct op_counter_config *ctr)
-{
- int i;
-
- mtmsr(mfmsr() | MSR_PMM);
-
- for (i = 0; i < num_counters; ++i) {
- if (ctr[i].enabled) {
- ctr_write(i, reset_value[i]);
- /* Set each enabled counter to only
- * count when the Mark bit is *not* set */
- set_pmc_marked(i, 1, 0);
- pmc_start_ctr(i, 1);
- } else {
- ctr_write(i, 0);
-
- /* Set the ctr to be stopped */
- pmc_start_ctr(i, 0);
- }
- }
-
- /* Clear the freeze bit, and enable the interrupt.
- * The counters won't actually start until the rfi clears
- * the PMM bit */
- pmc_start_ctrs(1);
-
- oprofile_running = 1;
-
- pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(),
- mfpmr(PMRN_PMGC0));
-
- return 0;
-}
-
-static void fsl_emb_stop(void)
-{
- /* freeze counters */
- pmc_stop_ctrs();
-
- oprofile_running = 0;
-
- pr_debug("stop on cpu %d, pmgc0 %x\n", smp_processor_id(),
- mfpmr(PMRN_PMGC0));
-
- mb();
-}
-
-
-static void fsl_emb_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc;
- int is_kernel;
- int val;
- int i;
-
- pc = regs->nip;
- is_kernel = is_kernel_addr(pc);
-
- for (i = 0; i < num_counters; ++i) {
- val = ctr_read(i);
- if (val < 0) {
- if (oprofile_running && ctr[i].enabled) {
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- ctr_write(i, reset_value[i]);
- } else {
- ctr_write(i, 0);
- }
- }
- }
-
- /* The freeze bit was set by the interrupt. */
- /* Clear the freeze bit, and reenable the interrupt. The
- * counters won't actually start until the rfi clears the PMM
- * bit. The PMM bit should not be set until after the interrupt
- * is cleared to avoid it getting lost in some hypervisor
- * environments.
- */
- mtmsr(mfmsr() | MSR_PMM);
- pmc_start_ctrs(1);
-}
-
-struct op_powerpc_model op_model_fsl_emb = {
- .reg_setup = fsl_emb_reg_setup,
- .cpu_setup = fsl_emb_cpu_setup,
- .start = fsl_emb_start,
- .stop = fsl_emb_stop,
- .handle_interrupt = fsl_emb_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_pa6t.c b/arch/powerpc/oprofile/op_model_pa6t.c
deleted file mode 100644
index d23061cf76bc..000000000000
--- a/arch/powerpc/oprofile/op_model_pa6t.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2006-2007 PA Semi, Inc
- *
- * Author: Shashi Rao, PA Semi
- *
- * Maintained by: Olof Johansson <olof@lixom.net>
- *
- * Based on arch/powerpc/oprofile/op_model_power4.c
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <linux/percpu.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/oprofile_impl.h>
-#include <asm/reg.h>
-
-static unsigned char oprofile_running;
-
-/* mmcr values are set in pa6t_reg_setup, used in pa6t_cpu_setup */
-static u64 mmcr0_val;
-static u64 mmcr1_val;
-
-/* inited in pa6t_reg_setup */
-static u64 reset_value[OP_MAX_COUNTER];
-
-static inline u64 ctr_read(unsigned int i)
-{
- switch (i) {
- case 0:
- return mfspr(SPRN_PA6T_PMC0);
- case 1:
- return mfspr(SPRN_PA6T_PMC1);
- case 2:
- return mfspr(SPRN_PA6T_PMC2);
- case 3:
- return mfspr(SPRN_PA6T_PMC3);
- case 4:
- return mfspr(SPRN_PA6T_PMC4);
- case 5:
- return mfspr(SPRN_PA6T_PMC5);
- default:
- printk(KERN_ERR "ctr_read called with bad arg %u\n", i);
- return 0;
- }
-}
-
-static inline void ctr_write(unsigned int i, u64 val)
-{
- switch (i) {
- case 0:
- mtspr(SPRN_PA6T_PMC0, val);
- break;
- case 1:
- mtspr(SPRN_PA6T_PMC1, val);
- break;
- case 2:
- mtspr(SPRN_PA6T_PMC2, val);
- break;
- case 3:
- mtspr(SPRN_PA6T_PMC3, val);
- break;
- case 4:
- mtspr(SPRN_PA6T_PMC4, val);
- break;
- case 5:
- mtspr(SPRN_PA6T_PMC5, val);
- break;
- default:
- printk(KERN_ERR "ctr_write called with bad arg %u\n", i);
- break;
- }
-}
-
-
-/* precompute the values to stuff in the hardware registers */
-static int pa6t_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int pmc;
-
- /*
- * adjust the mmcr0.en[0-5] and mmcr0.inten[0-5] values obtained from the
- * event_mappings file by turning off the counters that the user doesn't
- * care about
- *
- * setup user and kernel profiling
- */
- for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++)
- if (!ctr[pmc].enabled) {
- sys->mmcr0 &= ~(0x1UL << pmc);
- sys->mmcr0 &= ~(0x1UL << (pmc+12));
- pr_debug("turned off counter %u\n", pmc);
- }
-
- if (sys->enable_kernel)
- sys->mmcr0 |= PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN;
- else
- sys->mmcr0 &= ~(PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN);
-
- if (sys->enable_user)
- sys->mmcr0 |= PA6T_MMCR0_PREN;
- else
- sys->mmcr0 &= ~PA6T_MMCR0_PREN;
-
- /*
- * The performance counter event settings are given in the mmcr0 and
- * mmcr1 values passed from the user in the op_system_config
- * structure (sys variable).
- */
- mmcr0_val = sys->mmcr0;
- mmcr1_val = sys->mmcr1;
- pr_debug("mmcr0_val inited to %016lx\n", sys->mmcr0);
- pr_debug("mmcr1_val inited to %016lx\n", sys->mmcr1);
-
- for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) {
- /* counters are 40 bit. Move to cputable at some point? */
- reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count;
- pr_debug("reset_value for pmc%u inited to 0x%llx\n",
- pmc, reset_value[pmc]);
- }
-
- return 0;
-}
-
-/* configure registers on this cpu */
-static int pa6t_cpu_setup(struct op_counter_config *ctr)
-{
- u64 mmcr0 = mmcr0_val;
- u64 mmcr1 = mmcr1_val;
-
- /* Default is all PMCs off */
- mmcr0 &= ~(0x3FUL);
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-
- /* program selected programmable events in */
- mtspr(SPRN_PA6T_MMCR1, mmcr1);
-
- pr_debug("setup on cpu %d, mmcr0 %016lx\n", smp_processor_id(),
- mfspr(SPRN_PA6T_MMCR0));
- pr_debug("setup on cpu %d, mmcr1 %016lx\n", smp_processor_id(),
- mfspr(SPRN_PA6T_MMCR1));
-
- return 0;
-}
-
-static int pa6t_start(struct op_counter_config *ctr)
-{
- int i;
-
- /* Hold off event counting until rfid */
- u64 mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
- if (ctr[i].enabled)
- ctr_write(i, reset_value[i]);
- else
- ctr_write(i, 0UL);
-
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-
- oprofile_running = 1;
-
- pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
-
- return 0;
-}
-
-static void pa6t_stop(void)
-{
- u64 mmcr0;
-
- /* freeze counters */
- mmcr0 = mfspr(SPRN_PA6T_MMCR0);
- mmcr0 |= PA6T_MMCR0_FCM0;
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-
- oprofile_running = 0;
-
- pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
-}
-
-/* handle the perfmon overflow vector */
-static void pa6t_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc = mfspr(SPRN_PA6T_SIAR);
- int is_kernel = is_kernel_addr(pc);
- u64 val;
- int i;
- u64 mmcr0;
-
- /* disable perfmon counting until rfid */
- mmcr0 = mfspr(SPRN_PA6T_MMCR0);
- mtspr(SPRN_PA6T_MMCR0, mmcr0 | PA6T_MMCR0_HANDDIS);
-
- /* Record samples. We've got one global bit for whether a sample
- * was taken, so add it for any counter that triggered overflow.
- */
- for (i = 0; i < cur_cpu_spec->num_pmcs; i++) {
- val = ctr_read(i);
- if (val & (0x1UL << 39)) { /* Overflow bit set */
- if (oprofile_running && ctr[i].enabled) {
- if (mmcr0 & PA6T_MMCR0_SIARLOG)
- oprofile_add_ext_sample(pc, regs, i, is_kernel);
- ctr_write(i, reset_value[i]);
- } else {
- ctr_write(i, 0UL);
- }
- }
- }
-
- /* Restore mmcr0 to a good known value since the PMI changes it */
- mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
- mtspr(SPRN_PA6T_MMCR0, mmcr0);
-}
-
-struct op_powerpc_model op_model_pa6t = {
- .reg_setup = pa6t_reg_setup,
- .cpu_setup = pa6t_cpu_setup,
- .start = pa6t_start,
- .stop = pa6t_stop,
- .handle_interrupt = pa6t_handle_interrupt,
-};
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
deleted file mode 100644
index 2ae6b86ff97b..000000000000
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ /dev/null
@@ -1,438 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM
- * Added mmcra[slot] support:
- * Copyright (C) 2006-2007 Will Schmidt <willschm@us.ibm.com>, IBM
- */
-
-#include <linux/oprofile.h>
-#include <linux/smp.h>
-#include <asm/firmware.h>
-#include <asm/ptrace.h>
-#include <asm/processor.h>
-#include <asm/cputable.h>
-#include <asm/rtas.h>
-#include <asm/oprofile_impl.h>
-#include <asm/reg.h>
-
-#define dbg(args...)
-#define OPROFILE_PM_PMCSEL_MSK 0xffULL
-#define OPROFILE_PM_UNIT_SHIFT 60
-#define OPROFILE_PM_UNIT_MSK 0xfULL
-#define OPROFILE_MAX_PMC_NUM 3
-#define OPROFILE_PMSEL_FIELD_WIDTH 8
-#define OPROFILE_UNIT_FIELD_WIDTH 4
-#define MMCRA_SIAR_VALID_MASK 0x10000000ULL
-
-static unsigned long reset_value[OP_MAX_COUNTER];
-
-static int oprofile_running;
-static int use_slot_nums;
-
-/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
-static u32 mmcr0_val;
-static u64 mmcr1_val;
-static u64 mmcra_val;
-static u32 cntr_marked_events;
-
-static int power7_marked_instr_event(u64 mmcr1)
-{
- u64 psel, unit;
- int pmc, cntr_marked_events = 0;
-
- /* Given the MMCR1 value, look at the field for each counter to
- * determine if it is a marked event. Code based on the function
- * power7_marked_instr_event() in file arch/powerpc/perf/power7-pmu.c.
- */
- for (pmc = 0; pmc < 4; pmc++) {
- psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK
- << (OPROFILE_MAX_PMC_NUM - pmc)
- * OPROFILE_PMSEL_FIELD_WIDTH);
- psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc)
- * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL;
- unit = mmcr1 & (OPROFILE_PM_UNIT_MSK
- << (OPROFILE_PM_UNIT_SHIFT
- - (pmc * OPROFILE_PMSEL_FIELD_WIDTH )));
- unit = unit >> (OPROFILE_PM_UNIT_SHIFT
- - (pmc * OPROFILE_PMSEL_FIELD_WIDTH));
-
- switch (psel >> 4) {
- case 2:
- cntr_marked_events |= (pmc == 1 || pmc == 3) << pmc;
- break;
- case 3:
- if (psel == 0x3c) {
- cntr_marked_events |= (pmc == 0) << pmc;
- break;
- }
-
- if (psel == 0x3e) {
- cntr_marked_events |= (pmc != 1) << pmc;
- break;
- }
-
- cntr_marked_events |= 1 << pmc;
- break;
- case 4:
- case 5:
- cntr_marked_events |= (unit == 0xd) << pmc;
- break;
- case 6:
- if (psel == 0x64)
- cntr_marked_events |= (pmc >= 2) << pmc;
- break;
- case 8:
- cntr_marked_events |= (unit == 0xd) << pmc;
- break;
- }
- }
- return cntr_marked_events;
-}
-
-static int power4_reg_setup(struct op_counter_config *ctr,
- struct op_system_config *sys,
- int num_ctrs)
-{
- int i;
-
- /*
- * The performance counter event settings are given in the mmcr0,
- * mmcr1 and mmcra values passed from the user in the
- * op_system_config structure (sys variable).
- */
- mmcr0_val = sys->mmcr0;
- mmcr1_val = sys->mmcr1;
- mmcra_val = sys->mmcra;
-
- /* Power 7+ and newer architectures:
- * Determine which counter events in the group (the group of events is
- * specified by the bit settings in the MMCR1 register) are marked
- * events for use in the interrupt handler. Do the calculation once
- * before OProfile starts. Information is used in the interrupt
- * handler. Starting with Power 7+ we only record the sample for
- * marked events if the SIAR valid bit is set. For non marked events
- * the sample is always recorded.
- */
- if (pvr_version_is(PVR_POWER7p))
- cntr_marked_events = power7_marked_instr_event(mmcr1_val);
- else
- cntr_marked_events = 0; /* For older processors, set the bit map
- * to zero so the sample will always be
- * be recorded.
- */
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
- reset_value[i] = 0x80000000UL - ctr[i].count;
-
- /* setup user and kernel profiling */
- if (sys->enable_kernel)
- mmcr0_val &= ~MMCR0_KERNEL_DISABLE;
- else
- mmcr0_val |= MMCR0_KERNEL_DISABLE;
-
- if (sys->enable_user)
- mmcr0_val &= ~MMCR0_PROBLEM_DISABLE;
- else
- mmcr0_val |= MMCR0_PROBLEM_DISABLE;
-
- if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
- pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
- pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX) ||
- pvr_version_is(PVR_POWER5) || pvr_version_is(PVR_POWER5p))
- use_slot_nums = 1;
-
- return 0;
-}
-
-extern void ppc_enable_pmcs(void);
-
-/*
- * Older CPUs require the MMCRA sample bit to be always set, but newer
- * CPUs only want it set for some groups. Eventually we will remove all
- * knowledge of this bit in the kernel, oprofile userspace should be
- * setting it when required.
- *
- * In order to keep current installations working we force the bit for
- * those older CPUs. Once everyone has updated their oprofile userspace we
- * can remove this hack.
- */
-static inline int mmcra_must_set_sample(void)
-{
- if (pvr_version_is(PVR_POWER4) || pvr_version_is(PVR_POWER4p) ||
- pvr_version_is(PVR_970) || pvr_version_is(PVR_970FX) ||
- pvr_version_is(PVR_970MP) || pvr_version_is(PVR_970GX))
- return 1;
-
- return 0;
-}
-
-static int power4_cpu_setup(struct op_counter_config *ctr)
-{
- unsigned int mmcr0 = mmcr0_val;
- unsigned long mmcra = mmcra_val;
-
- ppc_enable_pmcs();
-
- /* set the freeze bit */
- mmcr0 |= MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-
- mmcr0 |= MMCR0_FCM1|MMCR0_PMXE|MMCR0_FCECE;
- mmcr0 |= MMCR0_PMC1CE|MMCR0_PMCjCE;
- mtspr(SPRN_MMCR0, mmcr0);
-
- mtspr(SPRN_MMCR1, mmcr1_val);
-
- if (mmcra_must_set_sample())
- mmcra |= MMCRA_SAMPLE_ENABLE;
- mtspr(SPRN_MMCRA, mmcra);
-
- dbg("setup on cpu %d, mmcr0 %lx\n", smp_processor_id(),
- mfspr(SPRN_MMCR0));
- dbg("setup on cpu %d, mmcr1 %lx\n", smp_processor_id(),
- mfspr(SPRN_MMCR1));
- dbg("setup on cpu %d, mmcra %lx\n", smp_processor_id(),
- mfspr(SPRN_MMCRA));
-
- return 0;
-}
-
-static int power4_start(struct op_counter_config *ctr)
-{
- int i;
- unsigned int mmcr0;
-
- /* set the PMM bit (see comment below) */
- mtmsr(mfmsr() | MSR_PMM);
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
- if (ctr[i].enabled) {
- classic_ctr_write(i, reset_value[i]);
- } else {
- classic_ctr_write(i, 0);
- }
- }
-
- mmcr0 = mfspr(SPRN_MMCR0);
-
- /*
- * We must clear the PMAO bit on some (GQ) chips. Just do it
- * all the time
- */
- mmcr0 &= ~MMCR0_PMAO;
-
- /*
- * now clear the freeze bit, counting will not start until we
- * rfid from this excetion, because only at that point will
- * the PMM bit be cleared
- */
- mmcr0 &= ~MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-
- oprofile_running = 1;
-
- dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
- return 0;
-}
-
-static void power4_stop(void)
-{
- unsigned int mmcr0;
-
- /* freeze counters */
- mmcr0 = mfspr(SPRN_MMCR0);
- mmcr0 |= MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-
- oprofile_running = 0;
-
- dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0);
-
- mb();
-}
-
-/* Fake functions used by canonicalize_pc */
-static void __used hypervisor_bucket(void)
-{
-}
-
-static void __used rtas_bucket(void)
-{
-}
-
-static void __used kernel_unknown_bucket(void)
-{
-}
-
-/*
- * On GQ and newer the MMCRA stores the HV and PR bits at the time
- * the SIAR was sampled. We use that to work out if the SIAR was sampled in
- * the hypervisor, our exception vectors or RTAS.
- * If the MMCRA_SAMPLE_ENABLE bit is set, we can use the MMCRA[slot] bits
- * to more accurately identify the address of the sampled instruction. The
- * mmcra[slot] bits represent the slot number of a sampled instruction
- * within an instruction group. The slot will contain a value between 1
- * and 5 if MMCRA_SAMPLE_ENABLE is set, otherwise 0.
- */
-static unsigned long get_pc(struct pt_regs *regs)
-{
- unsigned long pc = mfspr(SPRN_SIAR);
- unsigned long mmcra;
- unsigned long slot;
-
- /* Can't do much about it */
- if (!cur_cpu_spec->oprofile_mmcra_sihv)
- return pc;
-
- mmcra = mfspr(SPRN_MMCRA);
-
- if (use_slot_nums && (mmcra & MMCRA_SAMPLE_ENABLE)) {
- slot = ((mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT);
- if (slot > 1)
- pc += 4 * (slot - 1);
- }
-
- /* Were we in the hypervisor? */
- if (firmware_has_feature(FW_FEATURE_LPAR) &&
- (mmcra & cur_cpu_spec->oprofile_mmcra_sihv))
- /* function descriptor madness */
- return *((unsigned long *)hypervisor_bucket);
-
- /* We were in userspace, nothing to do */
- if (mmcra & cur_cpu_spec->oprofile_mmcra_sipr)
- return pc;
-
-#ifdef CONFIG_PPC_RTAS
- /* Were we in RTAS? */
- if (pc >= rtas.base && pc < (rtas.base + rtas.size))
- /* function descriptor madness */
- return *((unsigned long *)rtas_bucket);
-#endif
-
- /* Were we in our exception vectors or SLB real mode miss handler? */
- if (pc < 0x1000000UL)
- return (unsigned long)__va(pc);
-
- /* Not sure where we were */
- if (!is_kernel_addr(pc))
- /* function descriptor madness */
- return *((unsigned long *)kernel_unknown_bucket);
-
- return pc;
-}
-
-static int get_kernel(unsigned long pc, unsigned long mmcra)
-{
- int is_kernel;
-
- if (!cur_cpu_spec->oprofile_mmcra_sihv) {
- is_kernel = is_kernel_addr(pc);
- } else {
- is_kernel = ((mmcra & cur_cpu_spec->oprofile_mmcra_sipr) == 0);
- }
-
- return is_kernel;
-}
-
-static bool pmc_overflow(unsigned long val)
-{
- if ((int)val < 0)
- return true;
-
- /*
- * Events on POWER7 can roll back if a speculative event doesn't
- * eventually complete. Unfortunately in some rare cases they will
- * raise a performance monitor exception. We need to catch this to
- * ensure we reset the PMC. In all cases the PMC will be 256 or less
- * cycles from overflow.
- *
- * We only do this if the first pass fails to find any overflowing
- * PMCs because a user might set a period of less than 256 and we
- * don't want to mistakenly reset them.
- */
- if (pvr_version_is(PVR_POWER7) && ((0x80000000 - val) <= 256))
- return true;
-
- return false;
-}
-
-static void power4_handle_interrupt(struct pt_regs *regs,
- struct op_counter_config *ctr)
-{
- unsigned long pc;
- int is_kernel;
- int val;
- int i;
- unsigned int mmcr0;
- unsigned long mmcra;
- bool siar_valid = false;
-
- mmcra = mfspr(SPRN_MMCRA);
-
- pc = get_pc(regs);
- is_kernel = get_kernel(pc, mmcra);
-
- /* set the PMM bit (see comment below) */
- mtmsr(mfmsr() | MSR_PMM);
-
- /* Check that the SIAR valid bit in MMCRA is set to 1. */
- if ((mmcra & MMCRA_SIAR_VALID_MASK) == MMCRA_SIAR_VALID_MASK)
- siar_valid = true;
-
- for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
- val = classic_ctr_read(i);
- if (pmc_overflow(val)) {
- if (oprofile_running && ctr[i].enabled) {
- /* Power 7+ and newer architectures:
- * If the event is a marked event, then only
- * save the sample if the SIAR valid bit is
- * set. If the event is not marked, then
- * always save the sample.
- * Note, the Sample enable bit in the MMCRA
- * register must be set to 1 if the group
- * contains a marked event.
- */
- if ((siar_valid &&
- (cntr_marked_events & (1 << i)))
- || !(cntr_marked_events & (1 << i)))
- oprofile_add_ext_sample(pc, regs, i,
- is_kernel);
-
- classic_ctr_write(i, reset_value[i]);
- } else {
- classic_ctr_write(i, 0);
- }
- }
- }
-
- mmcr0 = mfspr(SPRN_MMCR0);
-
- /* reset the perfmon trigger */
- mmcr0 |= MMCR0_PMXE;
-
- /*
- * We must clear the PMAO bit on some (GQ) chips. Just do it
- * all the time
- */
- mmcr0 &= ~MMCR0_PMAO;
-
- /* Clear the appropriate bits in the MMCRA */
- mmcra &= ~cur_cpu_spec->oprofile_mmcra_clear;
- mtspr(SPRN_MMCRA, mmcra);
-
- /*
- * now clear the freeze bit, counting will not start until we
- * rfid from this exception, because only at that point will
- * the PMM bit be cleared
- */
- mmcr0 &= ~MMCR0_FC;
- mtspr(SPRN_MMCR0, mmcr0);
-}
-
-struct op_powerpc_model op_model_power4 = {
- .reg_setup = power4_reg_setup,
- .cpu_setup = power4_cpu_setup,
- .start = power4_start,
- .stop = power4_stop,
- .handle_interrupt = power4_handle_interrupt,
-};
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index 1ad03c55c88c..308a2e40d7be 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -15,6 +15,7 @@
#include <asm/firmware.h>
#include <asm/ptrace.h>
#include <asm/code-patching.h>
+#include <asm/inst.h>
#define PERF_8xx_ID_CPU_CYCLES 1
#define PERF_8xx_ID_HW_INSTRUCTIONS 2
@@ -99,9 +100,6 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags)
unsigned long target = patch_site_addr(&patch__itlbmiss_perf);
patch_branch_site(&patch__itlbmiss_exit_1, target, 0);
-#ifndef CONFIG_PIN_TLB_TEXT
- patch_branch_site(&patch__itlbmiss_exit_2, target, 0);
-#endif
}
val = itlb_miss_counter;
break;
@@ -110,8 +108,6 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags)
unsigned long target = patch_site_addr(&patch__dtlbmiss_perf);
patch_branch_site(&patch__dtlbmiss_exit_1, target, 0);
- patch_branch_site(&patch__dtlbmiss_exit_2, target, 0);
- patch_branch_site(&patch__dtlbmiss_exit_3, target, 0);
}
val = dtlb_miss_counter;
break;
@@ -157,9 +153,11 @@ static void mpc8xx_pmu_read(struct perf_event *event)
static void mpc8xx_pmu_del(struct perf_event *event, int flags)
{
+ ppc_inst_t insn = ppc_inst(PPC_RAW_MFSPR(10, SPRN_SPRG_SCRATCH2));
+
mpc8xx_pmu_read(event);
- /* If it was the last user, stop counting to avoid useles overhead */
+ /* If it was the last user, stop counting to avoid useless overhead */
switch (event_type(event)) {
case PERF_8xx_ID_CPU_CYCLES:
break;
@@ -168,27 +166,12 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
mtspr(SPRN_ICTRL, 7);
break;
case PERF_8xx_ID_ITLB_LOAD_MISS:
- if (atomic_dec_return(&itlb_miss_ref) == 0) {
- /* mfspr r10, SPRN_SPRG_SCRATCH0 */
- unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
- __PPC_SPR(SPRN_SPRG_SCRATCH0);
-
+ if (atomic_dec_return(&itlb_miss_ref) == 0)
patch_instruction_site(&patch__itlbmiss_exit_1, insn);
-#ifndef CONFIG_PIN_TLB_TEXT
- patch_instruction_site(&patch__itlbmiss_exit_2, insn);
-#endif
- }
break;
case PERF_8xx_ID_DTLB_LOAD_MISS:
- if (atomic_dec_return(&dtlb_miss_ref) == 0) {
- /* mfspr r10, SPRN_DAR */
- unsigned int insn = PPC_INST_MFSPR | __PPC_RS(R10) |
- __PPC_SPR(SPRN_DAR);
-
+ if (atomic_dec_return(&dtlb_miss_ref) == 0)
patch_instruction_site(&patch__dtlbmiss_exit_1, insn);
- patch_instruction_site(&patch__dtlbmiss_exit_2, insn);
- patch_instruction_site(&patch__dtlbmiss_exit_3, insn);
- }
break;
}
}
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index c155dcbb8691..4f53d0b97539 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -1,12 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_PERF_EVENTS) += callchain.o perf_regs.o
+obj-y += callchain.o callchain_$(BITS).o perf_regs.o
+obj-$(CONFIG_COMPAT) += callchain_32.o
-obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
+obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o
obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \
power5+-pmu.o power6-pmu.o power7-pmu.o \
isa207-common.o power8-pmu.o power9-pmu.o \
- generic-compat-pmu.o
+ generic-compat-pmu.o power10-pmu.o bhrb.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o
diff --git a/arch/powerpc/perf/bhrb.S b/arch/powerpc/perf/bhrb.S
index 1aa3259716b8..47ba05d5ae76 100644
--- a/arch/powerpc/perf/bhrb.S
+++ b/arch/powerpc/perf/bhrb.S
@@ -21,7 +21,7 @@
_GLOBAL(read_bhrb)
cmpldi r3,31
bgt 1f
- ld r4,bhrb_table@got(r2)
+ LOAD_REG_ADDR(r4, bhrb_table)
sldi r3,r3,3
add r3,r4,r3
mtctr r3
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index cbc251981209..082f6d0308a4 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -11,15 +11,12 @@
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
-#include <asm/pgtable.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
-#ifdef CONFIG_PPC64
-#include "../kernel/ppc32.h"
-#endif
#include <asm/pte-walk.h>
+#include "callchain.h"
/*
* Is sp valid as the address of the next kernel stack frame after prev_sp?
@@ -43,7 +40,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
return 0;
}
-void
+void __no_sanitize_address
perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
unsigned long sp, next_sp;
@@ -102,358 +99,6 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
}
}
-#ifdef CONFIG_PPC64
-/*
- * On 64-bit we don't want to invoke hash_page on user addresses from
- * interrupt context, so if the access faults, we read the page tables
- * to find which page (if any) is mapped and access it directly.
- */
-static int read_user_stack_slow(void __user *ptr, void *buf, int nb)
-{
- int ret = -EFAULT;
- pgd_t *pgdir;
- pte_t *ptep, pte;
- unsigned shift;
- unsigned long addr = (unsigned long) ptr;
- unsigned long offset;
- unsigned long pfn, flags;
- void *kaddr;
-
- pgdir = current->mm->pgd;
- if (!pgdir)
- return -EFAULT;
-
- local_irq_save(flags);
- ptep = find_current_mm_pte(pgdir, addr, NULL, &shift);
- if (!ptep)
- goto err_out;
- if (!shift)
- shift = PAGE_SHIFT;
-
- /* align address to page boundary */
- offset = addr & ((1UL << shift) - 1);
-
- pte = READ_ONCE(*ptep);
- if (!pte_present(pte) || !pte_user(pte))
- goto err_out;
- pfn = pte_pfn(pte);
- if (!page_is_ram(pfn))
- goto err_out;
-
- /* no highmem to worry about here */
- kaddr = pfn_to_kaddr(pfn);
- memcpy(buf, kaddr + offset, nb);
- ret = 0;
-err_out:
- local_irq_restore(flags);
- return ret;
-}
-
-static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
-{
- if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
- ((unsigned long)ptr & 7))
- return -EFAULT;
-
- if (!probe_user_read(ret, ptr, sizeof(*ret)))
- return 0;
-
- return read_user_stack_slow(ptr, ret, 8);
-}
-
-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
-{
- if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
- ((unsigned long)ptr & 3))
- return -EFAULT;
-
- if (!probe_user_read(ret, ptr, sizeof(*ret)))
- return 0;
-
- return read_user_stack_slow(ptr, ret, 4);
-}
-
-static inline int valid_user_sp(unsigned long sp, int is_64)
-{
- if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
- return 0;
- return 1;
-}
-
-/*
- * 64-bit user processes use the same stack frame for RT and non-RT signals.
- */
-struct signal_frame_64 {
- char dummy[__SIGNAL_FRAMESIZE];
- struct ucontext uc;
- unsigned long unused[2];
- unsigned int tramp[6];
- struct siginfo *pinfo;
- void *puc;
- struct siginfo info;
- char abigap[288];
-};
-
-static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
-{
- if (nip == fp + offsetof(struct signal_frame_64, tramp))
- return 1;
- if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
- nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
- return 1;
- return 0;
-}
-
-/*
- * Do some sanity checking on the signal frame pointed to by sp.
- * We check the pinfo and puc pointers in the frame.
- */
-static int sane_signal_64_frame(unsigned long sp)
-{
- struct signal_frame_64 __user *sf;
- unsigned long pinfo, puc;
-
- sf = (struct signal_frame_64 __user *) sp;
- if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
- read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
- return 0;
- return pinfo == (unsigned long) &sf->info &&
- puc == (unsigned long) &sf->uc;
-}
-
-static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
- struct pt_regs *regs)
-{
- unsigned long sp, next_sp;
- unsigned long next_ip;
- unsigned long lr;
- long level = 0;
- struct signal_frame_64 __user *sigframe;
- unsigned long __user *fp, *uregs;
-
- next_ip = perf_instruction_pointer(regs);
- lr = regs->link;
- sp = regs->gpr[1];
- perf_callchain_store(entry, next_ip);
-
- while (entry->nr < entry->max_stack) {
- fp = (unsigned long __user *) sp;
- if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
- return;
- if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
- return;
-
- /*
- * Note: the next_sp - sp >= signal frame size check
- * is true when next_sp < sp, which can happen when
- * transitioning from an alternate signal stack to the
- * normal stack.
- */
- if (next_sp - sp >= sizeof(struct signal_frame_64) &&
- (is_sigreturn_64_address(next_ip, sp) ||
- (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
- sane_signal_64_frame(sp)) {
- /*
- * This looks like an signal frame
- */
- sigframe = (struct signal_frame_64 __user *) sp;
- uregs = sigframe->uc.uc_mcontext.gp_regs;
- if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
- read_user_stack_64(&uregs[PT_LNK], &lr) ||
- read_user_stack_64(&uregs[PT_R1], &sp))
- return;
- level = 0;
- perf_callchain_store_context(entry, PERF_CONTEXT_USER);
- perf_callchain_store(entry, next_ip);
- continue;
- }
-
- if (level == 0)
- next_ip = lr;
- perf_callchain_store(entry, next_ip);
- ++level;
- sp = next_sp;
- }
-}
-
-#else /* CONFIG_PPC64 */
-/*
- * On 32-bit we just access the address and let hash_page create a
- * HPTE if necessary, so there is no need to fall back to reading
- * the page tables. Since this is called at interrupt level,
- * do_page_fault() won't treat a DSI as a page fault.
- */
-static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
-{
- if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
- ((unsigned long)ptr & 3))
- return -EFAULT;
-
- return probe_user_read(ret, ptr, sizeof(*ret));
-}
-
-static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
- struct pt_regs *regs)
-{
-}
-
-static inline int valid_user_sp(unsigned long sp, int is_64)
-{
- if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
- return 0;
- return 1;
-}
-
-#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
-#define sigcontext32 sigcontext
-#define mcontext32 mcontext
-#define ucontext32 ucontext
-#define compat_siginfo_t struct siginfo
-
-#endif /* CONFIG_PPC64 */
-
-/*
- * Layout for non-RT signal frames
- */
-struct signal_frame_32 {
- char dummy[__SIGNAL_FRAMESIZE32];
- struct sigcontext32 sctx;
- struct mcontext32 mctx;
- int abigap[56];
-};
-
-/*
- * Layout for RT signal frames
- */
-struct rt_signal_frame_32 {
- char dummy[__SIGNAL_FRAMESIZE32 + 16];
- compat_siginfo_t info;
- struct ucontext32 uc;
- int abigap[56];
-};
-
-static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
-{
- if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
- return 1;
- if (vdso32_sigtramp && current->mm->context.vdso_base &&
- nip == current->mm->context.vdso_base + vdso32_sigtramp)
- return 1;
- return 0;
-}
-
-static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
-{
- if (nip == fp + offsetof(struct rt_signal_frame_32,
- uc.uc_mcontext.mc_pad))
- return 1;
- if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
- nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
- return 1;
- return 0;
-}
-
-static int sane_signal_32_frame(unsigned int sp)
-{
- struct signal_frame_32 __user *sf;
- unsigned int regs;
-
- sf = (struct signal_frame_32 __user *) (unsigned long) sp;
- if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs))
- return 0;
- return regs == (unsigned long) &sf->mctx;
-}
-
-static int sane_rt_signal_32_frame(unsigned int sp)
-{
- struct rt_signal_frame_32 __user *sf;
- unsigned int regs;
-
- sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
- if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, &regs))
- return 0;
- return regs == (unsigned long) &sf->uc.uc_mcontext;
-}
-
-static unsigned int __user *signal_frame_32_regs(unsigned int sp,
- unsigned int next_sp, unsigned int next_ip)
-{
- struct mcontext32 __user *mctx = NULL;
- struct signal_frame_32 __user *sf;
- struct rt_signal_frame_32 __user *rt_sf;
-
- /*
- * Note: the next_sp - sp >= signal frame size check
- * is true when next_sp < sp, for example, when
- * transitioning from an alternate signal stack to the
- * normal stack.
- */
- if (next_sp - sp >= sizeof(struct signal_frame_32) &&
- is_sigreturn_32_address(next_ip, sp) &&
- sane_signal_32_frame(sp)) {
- sf = (struct signal_frame_32 __user *) (unsigned long) sp;
- mctx = &sf->mctx;
- }
-
- if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
- is_rt_sigreturn_32_address(next_ip, sp) &&
- sane_rt_signal_32_frame(sp)) {
- rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
- mctx = &rt_sf->uc.uc_mcontext;
- }
-
- if (!mctx)
- return NULL;
- return mctx->mc_gregs;
-}
-
-static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
- struct pt_regs *regs)
-{
- unsigned int sp, next_sp;
- unsigned int next_ip;
- unsigned int lr;
- long level = 0;
- unsigned int __user *fp, *uregs;
-
- next_ip = perf_instruction_pointer(regs);
- lr = regs->link;
- sp = regs->gpr[1];
- perf_callchain_store(entry, next_ip);
-
- while (entry->nr < entry->max_stack) {
- fp = (unsigned int __user *) (unsigned long) sp;
- if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
- return;
- if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
- return;
-
- uregs = signal_frame_32_regs(sp, next_sp, next_ip);
- if (!uregs && level <= 1)
- uregs = signal_frame_32_regs(sp, next_sp, lr);
- if (uregs) {
- /*
- * This looks like an signal frame, so restart
- * the stack trace with the values in it.
- */
- if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
- read_user_stack_32(&uregs[PT_LNK], &lr) ||
- read_user_stack_32(&uregs[PT_R1], &sp))
- return;
- level = 0;
- perf_callchain_store_context(entry, PERF_CONTEXT_USER);
- perf_callchain_store(entry, next_ip);
- continue;
- }
-
- if (level == 0)
- next_ip = lr;
- perf_callchain_store(entry, next_ip);
- ++level;
- sp = next_sp;
- }
-}
-
void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
diff --git a/arch/powerpc/perf/callchain.h b/arch/powerpc/perf/callchain.h
new file mode 100644
index 000000000000..19a8d051ddf1
--- /dev/null
+++ b/arch/powerpc/perf/callchain.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _POWERPC_PERF_CALLCHAIN_H
+#define _POWERPC_PERF_CALLCHAIN_H
+
+void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs);
+void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs);
+
+static inline bool invalid_user_sp(unsigned long sp)
+{
+ unsigned long mask = is_32bit_task() ? 3 : 7;
+ unsigned long top = STACK_TOP - (is_32bit_task() ? 16 : 32);
+
+ return (!sp || (sp & mask) || (sp > top));
+}
+
+/*
+ * On 32-bit we just access the address and let hash_page create a
+ * HPTE if necessary, so there is no need to fall back to reading
+ * the page tables. Since this is called at interrupt level,
+ * do_page_fault() won't treat a DSI as a page fault.
+ */
+static inline int __read_user_stack(const void __user *ptr, void *ret,
+ size_t size)
+{
+ unsigned long addr = (unsigned long)ptr;
+
+ if (addr > TASK_SIZE - size || (addr & (size - 1)))
+ return -EFAULT;
+
+ return copy_from_user_nofault(ret, ptr, size);
+}
+
+#endif /* _POWERPC_PERF_CALLCHAIN_H */
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
new file mode 100644
index 000000000000..ea8cfe3806dc
--- /dev/null
+++ b/arch/powerpc/perf/callchain_32.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Performance counter callchain support - powerpc architecture code
+ *
+ * Copyright © 2009 Paul Mackerras, IBM Corporation.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/perf_event.h>
+#include <linux/percpu.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <asm/ptrace.h>
+#include <asm/sigcontext.h>
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+#include <asm/pte-walk.h>
+
+#include "callchain.h"
+
+#ifdef CONFIG_PPC64
+#include <asm/syscalls_32.h>
+#else /* CONFIG_PPC64 */
+
+#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
+#define sigcontext32 sigcontext
+#define mcontext32 mcontext
+#define ucontext32 ucontext
+#define compat_siginfo_t struct siginfo
+
+#endif /* CONFIG_PPC64 */
+
+static int read_user_stack_32(const unsigned int __user *ptr, unsigned int *ret)
+{
+ return __read_user_stack(ptr, ret, sizeof(*ret));
+}
+
+/*
+ * Layout for non-RT signal frames
+ */
+struct signal_frame_32 {
+ char dummy[__SIGNAL_FRAMESIZE32];
+ struct sigcontext32 sctx;
+ struct mcontext32 mctx;
+ int abigap[56];
+};
+
+/*
+ * Layout for RT signal frames
+ */
+struct rt_signal_frame_32 {
+ char dummy[__SIGNAL_FRAMESIZE32 + 16];
+ compat_siginfo_t info;
+ struct ucontext32 uc;
+ int abigap[56];
+};
+
+static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
+{
+ if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
+ return 1;
+ if (current->mm->context.vdso &&
+ nip == VDSO32_SYMBOL(current->mm->context.vdso, sigtramp32))
+ return 1;
+ return 0;
+}
+
+static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
+{
+ if (nip == fp + offsetof(struct rt_signal_frame_32,
+ uc.uc_mcontext.mc_pad))
+ return 1;
+ if (current->mm->context.vdso &&
+ nip == VDSO32_SYMBOL(current->mm->context.vdso, sigtramp_rt32))
+ return 1;
+ return 0;
+}
+
+static int sane_signal_32_frame(unsigned int sp)
+{
+ struct signal_frame_32 __user *sf;
+ unsigned int regs;
+
+ sf = (struct signal_frame_32 __user *) (unsigned long) sp;
+ if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs))
+ return 0;
+ return regs == (unsigned long) &sf->mctx;
+}
+
+static int sane_rt_signal_32_frame(unsigned int sp)
+{
+ struct rt_signal_frame_32 __user *sf;
+ unsigned int regs;
+
+ sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
+ if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, &regs))
+ return 0;
+ return regs == (unsigned long) &sf->uc.uc_mcontext;
+}
+
+static unsigned int __user *signal_frame_32_regs(unsigned int sp,
+ unsigned int next_sp, unsigned int next_ip)
+{
+ struct mcontext32 __user *mctx = NULL;
+ struct signal_frame_32 __user *sf;
+ struct rt_signal_frame_32 __user *rt_sf;
+
+ /*
+ * Note: the next_sp - sp >= signal frame size check
+ * is true when next_sp < sp, for example, when
+ * transitioning from an alternate signal stack to the
+ * normal stack.
+ */
+ if (next_sp - sp >= sizeof(struct signal_frame_32) &&
+ is_sigreturn_32_address(next_ip, sp) &&
+ sane_signal_32_frame(sp)) {
+ sf = (struct signal_frame_32 __user *) (unsigned long) sp;
+ mctx = &sf->mctx;
+ }
+
+ if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
+ is_rt_sigreturn_32_address(next_ip, sp) &&
+ sane_rt_signal_32_frame(sp)) {
+ rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
+ mctx = &rt_sf->uc.uc_mcontext;
+ }
+
+ if (!mctx)
+ return NULL;
+ return mctx->mc_gregs;
+}
+
+void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ unsigned int sp, next_sp;
+ unsigned int next_ip;
+ unsigned int lr;
+ long level = 0;
+ unsigned int __user *fp, *uregs;
+
+ next_ip = perf_instruction_pointer(regs);
+ lr = regs->link;
+ sp = regs->gpr[1];
+ perf_callchain_store(entry, next_ip);
+
+ while (entry->nr < entry->max_stack) {
+ fp = (unsigned int __user *) (unsigned long) sp;
+ if (invalid_user_sp(sp) || read_user_stack_32(fp, &next_sp))
+ return;
+ if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
+ return;
+
+ uregs = signal_frame_32_regs(sp, next_sp, next_ip);
+ if (!uregs && level <= 1)
+ uregs = signal_frame_32_regs(sp, next_sp, lr);
+ if (uregs) {
+ /*
+ * This looks like an signal frame, so restart
+ * the stack trace with the values in it.
+ */
+ if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
+ read_user_stack_32(&uregs[PT_LNK], &lr) ||
+ read_user_stack_32(&uregs[PT_R1], &sp))
+ return;
+ level = 0;
+ perf_callchain_store_context(entry, PERF_CONTEXT_USER);
+ perf_callchain_store(entry, next_ip);
+ continue;
+ }
+
+ if (level == 0)
+ next_ip = lr;
+ perf_callchain_store(entry, next_ip);
+ ++level;
+ sp = next_sp;
+ }
+}
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
new file mode 100644
index 000000000000..488e8a21a11e
--- /dev/null
+++ b/arch/powerpc/perf/callchain_64.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Performance counter callchain support - powerpc architecture code
+ *
+ * Copyright © 2009 Paul Mackerras, IBM Corporation.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/perf_event.h>
+#include <linux/percpu.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <asm/ptrace.h>
+#include <asm/sigcontext.h>
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+#include <asm/pte-walk.h>
+
+#include "callchain.h"
+
+static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret)
+{
+ return __read_user_stack(ptr, ret, sizeof(*ret));
+}
+
+/*
+ * 64-bit user processes use the same stack frame for RT and non-RT signals.
+ */
+struct signal_frame_64 {
+ char dummy[__SIGNAL_FRAMESIZE];
+ struct ucontext uc;
+ unsigned long unused[2];
+ unsigned int tramp[6];
+ struct siginfo *pinfo;
+ void *puc;
+ struct siginfo info;
+ char abigap[288];
+};
+
+static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
+{
+ if (nip == fp + offsetof(struct signal_frame_64, tramp))
+ return 1;
+ if (current->mm->context.vdso &&
+ nip == VDSO64_SYMBOL(current->mm->context.vdso, sigtramp_rt64))
+ return 1;
+ return 0;
+}
+
+/*
+ * Do some sanity checking on the signal frame pointed to by sp.
+ * We check the pinfo and puc pointers in the frame.
+ */
+static int sane_signal_64_frame(unsigned long sp)
+{
+ struct signal_frame_64 __user *sf;
+ unsigned long pinfo, puc;
+
+ sf = (struct signal_frame_64 __user *) sp;
+ if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
+ read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
+ return 0;
+ return pinfo == (unsigned long) &sf->info &&
+ puc == (unsigned long) &sf->uc;
+}
+
+void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ unsigned long sp, next_sp;
+ unsigned long next_ip;
+ unsigned long lr;
+ long level = 0;
+ struct signal_frame_64 __user *sigframe;
+ unsigned long __user *fp, *uregs;
+
+ next_ip = perf_instruction_pointer(regs);
+ lr = regs->link;
+ sp = regs->gpr[1];
+ perf_callchain_store(entry, next_ip);
+
+ while (entry->nr < entry->max_stack) {
+ fp = (unsigned long __user *) sp;
+ if (invalid_user_sp(sp) || read_user_stack_64(fp, &next_sp))
+ return;
+ if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
+ return;
+
+ /*
+ * Note: the next_sp - sp >= signal frame size check
+ * is true when next_sp < sp, which can happen when
+ * transitioning from an alternate signal stack to the
+ * normal stack.
+ */
+ if (next_sp - sp >= sizeof(struct signal_frame_64) &&
+ (is_sigreturn_64_address(next_ip, sp) ||
+ (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
+ sane_signal_64_frame(sp)) {
+ /*
+ * This looks like an signal frame
+ */
+ sigframe = (struct signal_frame_64 __user *) sp;
+ uregs = sigframe->uc.uc_mcontext.gp_regs;
+ if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
+ read_user_stack_64(&uregs[PT_LNK], &lr) ||
+ read_user_stack_64(&uregs[PT_R1], &sp))
+ return;
+ level = 0;
+ perf_callchain_store_context(entry, PERF_CONTEXT_USER);
+ perf_callchain_store(entry, next_ip);
+ continue;
+ }
+
+ if (level == 0)
+ next_ip = lr;
+ perf_callchain_store(entry, next_ip);
+ ++level;
+ sp = next_sp;
+ }
+}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 3086055bf681..942aa830e110 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -17,6 +17,8 @@
#include <asm/firmware.h>
#include <asm/ptrace.h>
#include <asm/code-patching.h>
+#include <asm/hw_irq.h>
+#include <asm/interrupt.h>
#ifdef CONFIG_PPC64
#include "internal.h"
@@ -37,12 +39,7 @@ struct cpu_hw_events {
struct perf_event *event[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int flags[MAX_HWEVENTS];
- /*
- * The order of the MMCR array is:
- * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2
- * - 32-bit, MMCR0, MMCR1, MMCR2
- */
- unsigned long mmcr[4];
+ struct mmcr_regs mmcr;
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
@@ -59,6 +56,9 @@ struct cpu_hw_events {
struct perf_branch_stack bhrb_stack;
struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
u64 ic_init;
+
+ /* Store the PMC values */
+ unsigned long pmcs[MAX_HWEVENTS];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
@@ -77,6 +77,11 @@ static unsigned int freeze_events_kernel = MMCR0_FCS;
/*
* 32-bit doesn't have MMCRA but does have an MMCR2,
* and a few other names are different.
+ * Also 32-bit doesn't have MMCR3, SIER2 and SIER3.
+ * Define them as zero knowing that any code path accessing
+ * these registers (via mtspr/mfspr) are done under ppmu flag
+ * check for PPMU_ARCH_31 and we will not enter that code path
+ * for 32-bit.
*/
#ifdef CONFIG_PPC32
@@ -90,7 +95,12 @@ static unsigned int freeze_events_kernel = MMCR0_FCS;
#define MMCR0_PMCC_U6 0
#define SPRN_MMCRA SPRN_MMCR2
+#define SPRN_MMCR3 0
+#define SPRN_SIER2 0
+#define SPRN_SIER3 0
#define MMCRA_SAMPLE_ENABLE 0
+#define MMCRA_BHRB_DISABLE 0
+#define MMCR0_PMCCEXT 0
static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
@@ -105,10 +115,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
{
regs->result = 0;
}
-static inline int perf_intr_is_nmi(struct pt_regs *regs)
-{
- return 0;
-}
static inline int siar_valid(struct pt_regs *regs)
{
@@ -121,7 +127,7 @@ static void ebb_event_add(struct perf_event *event) { }
static void ebb_switch_out(unsigned long mmcr0) { }
static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
{
- return cpuhw->mmcr[0];
+ return cpuhw->mmcr.mmcr0;
}
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
@@ -133,12 +139,26 @@ static void pmao_restore_workaround(bool ebb) { }
bool is_sier_available(void)
{
+ if (!ppmu)
+ return false;
+
if (ppmu->flags & PPMU_HAS_SIER)
return true;
return false;
}
+/*
+ * Return PMC value corresponding to the
+ * index passed.
+ */
+unsigned long get_pmcs_ext_regs(int idx)
+{
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+
+ return cpuhw->pmcs[idx];
+}
+
static bool regs_use_siar(struct pt_regs *regs)
{
/*
@@ -150,7 +170,7 @@ static bool regs_use_siar(struct pt_regs *regs)
* they have not been setup using perf_read_regs() and so regs->result
* is something random.
*/
- return ((TRAP(regs) == 0xf00) && regs->result);
+ return ((TRAP(regs) == INTERRUPT_PERFMON) && regs->result);
}
/*
@@ -204,7 +224,7 @@ static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *
if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid)
*addrp = mfspr(SPRN_SDAR);
- if (is_kernel_addr(mfspr(SPRN_SDAR)) && perf_allow_kernel(&event->attr) != 0)
+ if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel)
*addrp = 0;
}
@@ -246,11 +266,32 @@ static inline u32 perf_flags_from_msr(struct pt_regs *regs)
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
bool use_siar = regs_use_siar(regs);
+ unsigned long mmcra = regs->dsisr;
+ int marked = mmcra & MMCRA_SAMPLE_ENABLE;
if (!use_siar)
return perf_flags_from_msr(regs);
/*
+ * Check the address in SIAR to identify the
+ * privilege levels since the SIER[MSR_HV, MSR_PR]
+ * bits are not set for marked events in power10
+ * DD1.
+ */
+ if (marked && (ppmu->flags & PPMU_P10_DD1)) {
+ unsigned long siar = mfspr(SPRN_SIAR);
+ if (siar) {
+ if (is_kernel_addr(siar))
+ return PERF_RECORD_MISC_KERNEL;
+ return PERF_RECORD_MISC_USER;
+ } else {
+ if (is_kernel_addr(regs->nip))
+ return PERF_RECORD_MISC_KERNEL;
+ return PERF_RECORD_MISC_USER;
+ }
+ }
+
+ /*
* If we don't have flags in MMCRA, rather than using
* the MSR, we intuit the flags from the address in
* SIAR which should give slightly more reliable
@@ -300,6 +341,13 @@ static inline void perf_read_regs(struct pt_regs *regs)
* If the PMU doesn't update the SIAR for non marked events use
* pt_regs.
*
+ * If regs is a kernel interrupt, always use SIAR. Some PMUs have an
+ * issue with regs_sipr not being in synch with SIAR in interrupt entry
+ * and return sequences, which can result in regs_sipr being true for
+ * kernel interrupts and SIAR, which has the effect of causing samples
+ * to pile up at mtmsrd MSR[EE] 0->1 or pending irq replay around
+ * interrupt entry/exit.
+ *
* If the PMU has HV/PR flags then check to see if they
* place the exception in userspace. If so, use pt_regs. In
* continuous sampling mode the SIAR and the PMU exception are
@@ -308,7 +356,7 @@ static inline void perf_read_regs(struct pt_regs *regs)
* hypervisor samples as well as samples in the kernel with
* interrupts off hence the userspace check.
*/
- if (TRAP(regs) != 0xf00)
+ if (TRAP(regs) != INTERRUPT_PERFMON)
use_siar = 0;
else if ((ppmu->flags & PPMU_NO_SIAR))
use_siar = 0;
@@ -316,6 +364,8 @@ static inline void perf_read_regs(struct pt_regs *regs)
use_siar = 1;
else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
use_siar = 0;
+ else if (!user_mode(regs))
+ use_siar = 1;
else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs))
use_siar = 0;
else
@@ -325,15 +375,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
}
/*
- * If interrupts were soft-disabled when a PMU interrupt occurs, treat
- * it as an NMI.
- */
-static inline int perf_intr_is_nmi(struct pt_regs *regs)
-{
- return (regs->softe & IRQS_DISABLED);
-}
-
-/*
* On processors like P7+ that have the SIAR-Valid bit, marked instructions
* must be sampled only if the SIAR-valid bit is set.
*
@@ -346,7 +387,14 @@ static inline int siar_valid(struct pt_regs *regs)
int marked = mmcra & MMCRA_SAMPLE_ENABLE;
if (marked) {
- if (ppmu->flags & PPMU_HAS_SIER)
+ /*
+ * SIER[SIAR_VALID] is not set for some
+ * marked events on power10 DD1, so drop
+ * the check for SIER[SIAR_VALID] and return true.
+ */
+ if (ppmu->flags & PPMU_P10_DD1)
+ return 0x1;
+ else if (ppmu->flags & PPMU_HAS_SIER)
return regs->dar & SIER_SIAR_VALID;
if (ppmu->flags & PPMU_SIAR_VALID)
@@ -418,14 +466,16 @@ static __u64 power_pmu_bhrb_to(u64 addr)
__u64 target;
if (is_kernel_addr(addr)) {
- if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
+ if (copy_from_kernel_nofault(&instr, (void *)addr,
+ sizeof(instr)))
return 0;
return branch_target(&instr);
}
/* Userspace: need copy instruction here then translate it */
- if (probe_user_read(&instr, (unsigned int __user *)addr, sizeof(instr)))
+ if (copy_from_user_nofault(&instr, (unsigned int __user *)addr,
+ sizeof(instr)))
return 0;
target = branch_target(&instr);
@@ -464,8 +514,11 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *
* addresses at this point. Check the privileges before
* exporting it to userspace (avoid exposure of regions
* where we could have speculative execution)
+ * Incase of ISA v3.1, BHRB will capture only user-space
+ * addresses, hence include a check before filtering code
*/
- if (is_kernel_addr(addr) && perf_allow_kernel(&event->attr) != 0)
+ if (!(ppmu->flags & PPMU_ARCH_31) &&
+ is_kernel_addr(addr) && event->attr.exclude_kernel)
continue;
/* Branches are read most recent first (ie. mfbhrb 0 is
@@ -518,6 +571,7 @@ static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *
}
}
cpuhw->bhrb_stack.nr = u_index;
+ cpuhw->bhrb_stack.hw_idx = -1ULL;
return;
}
@@ -583,11 +637,16 @@ static void ebb_switch_out(unsigned long mmcr0)
current->thread.sdar = mfspr(SPRN_SDAR);
current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK;
current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
+ if (ppmu->flags & PPMU_ARCH_31) {
+ current->thread.mmcr3 = mfspr(SPRN_MMCR3);
+ current->thread.sier2 = mfspr(SPRN_SIER2);
+ current->thread.sier3 = mfspr(SPRN_SIER3);
+ }
}
static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
{
- unsigned long mmcr0 = cpuhw->mmcr[0];
+ unsigned long mmcr0 = cpuhw->mmcr.mmcr0;
if (!ebb)
goto out;
@@ -621,7 +680,13 @@ static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
* unfreeze counters, it should not set exclude_xxx in its events and
* instead manage the MMCR2 entirely by itself.
*/
- mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2);
+ mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2 | current->thread.mmcr2);
+
+ if (ppmu->flags & PPMU_ARCH_31) {
+ mtspr(SPRN_MMCR3, current->thread.mmcr3);
+ mtspr(SPRN_SIER2, current->thread.sier2);
+ mtspr(SPRN_SIER3, current->thread.sier3);
+ }
out:
return mmcr0;
}
@@ -711,6 +776,34 @@ static void pmao_restore_workaround(bool ebb)
mtspr(SPRN_PMC6, pmcs[5]);
}
+/*
+ * If the perf subsystem wants performance monitor interrupts as soon as
+ * possible (e.g., to sample the instruction address and stack chain),
+ * this should return true. The IRQ masking code can then enable MSR[EE]
+ * in some places (e.g., interrupt handlers) that allows PMI interrupts
+ * through to improve accuracy of profiles, at the cost of some performance.
+ *
+ * The PMU counters can be enabled by other means (e.g., sysfs raw SPR
+ * access), but in that case there is no need for prompt PMI handling.
+ *
+ * This currently returns true if any perf counter is being used. It
+ * could possibly return false if only events are being counted rather than
+ * samples being taken, but for now this is good enough.
+ */
+bool power_pmu_wants_prompt_pmi(void)
+{
+ struct cpu_hw_events *cpuhw;
+
+ /*
+ * This could simply test local_paca->pmcregs_in_use if that were not
+ * under ifdef KVM.
+ */
+ if (!ppmu)
+ return false;
+
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
+ return cpuhw->n_events;
+}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
@@ -793,6 +886,19 @@ static void write_pmc(int idx, unsigned long val)
}
}
+static int any_pmc_overflown(struct cpu_hw_events *cpuhw)
+{
+ int i, idx;
+
+ for (i = 0; i < cpuhw->n_events; i++) {
+ idx = cpuhw->event[i]->hw.idx;
+ if ((idx) && ((int)read_pmc(idx) < 0))
+ return idx;
+ }
+
+ return 0;
+}
+
/* Called from sysrq_handle_showregs() */
void perf_event_print_debug(void)
{
@@ -842,6 +948,11 @@ void perf_event_print_debug(void)
pr_info("EBBRR: %016lx BESCR: %016lx\n",
mfspr(SPRN_EBBRR), mfspr(SPRN_BESCR));
}
+
+ if (ppmu->flags & PPMU_ARCH_31) {
+ pr_info("MMCR3: %016lx SIER2: %016lx SIER3: %016lx\n",
+ mfspr(SPRN_MMCR3), mfspr(SPRN_SIER2), mfspr(SPRN_SIER3));
+ }
#endif
pr_info("SIAR: %016lx SDAR: %016lx SIER: %016lx\n",
mfspr(SPRN_SIAR), sdar, sier);
@@ -857,7 +968,7 @@ void perf_event_print_debug(void)
*/
static int power_check_constraints(struct cpu_hw_events *cpuhw,
u64 event_id[], unsigned int cflags[],
- int n_ev)
+ int n_ev, struct perf_event **event)
{
unsigned long mask, value, nv;
unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
@@ -880,7 +991,7 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw,
event_id[i] = cpuhw->alternatives[i][0];
}
if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
- &cpuhw->avalues[i][0]))
+ &cpuhw->avalues[i][0], event[i]->attr.config1))
return -1;
}
value = mask = 0;
@@ -915,7 +1026,8 @@ static int power_check_constraints(struct cpu_hw_events *cpuhw,
for (j = 1; j < n_alt[i]; ++j)
ppmu->get_constraint(cpuhw->alternatives[i][j],
&cpuhw->amasks[i][j],
- &cpuhw->avalues[i][j]);
+ &cpuhw->avalues[i][j],
+ event[i]->attr.config1);
}
/* enumerate all possibilities and see if any will work */
@@ -1030,7 +1142,7 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
/*
* POWER7 can roll back counter values, if the new value is smaller
* than the previous value it will cause the delta and the counter to
- * have bogus values unless we rolled a counter over. If a coutner is
+ * have bogus values unless we rolled a counter over. If a counter is
* rolled back, it will be smaller, but within 256, which is the maximum
* number of events to rollback at once. If we detect a rollback
* return 0. This can lead to a small lack of precision in the
@@ -1193,7 +1305,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
static void power_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
- unsigned long flags, mmcr0, val;
+ unsigned long flags, mmcr0, val, mmcra;
if (!ppmu)
return;
@@ -1211,11 +1323,16 @@ static void power_pmu_disable(struct pmu *pmu)
/*
* Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56
+ * Also clear PMXE to disable PMI's getting triggered in some
+ * corner cases during PMU disable.
*/
val = mmcr0 = mfspr(SPRN_MMCR0);
val |= MMCR0_FC;
val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO |
- MMCR0_FC56);
+ MMCR0_PMXE | MMCR0_FC56);
+ /* Set mmcr0 PMCCEXT for p10 */
+ if (ppmu->flags & PPMU_ARCH_31)
+ val |= MMCR0_PMCCEXT;
/*
* The barrier is to make sure the mtspr has been
@@ -1227,11 +1344,46 @@ static void power_pmu_disable(struct pmu *pmu)
isync();
/*
+ * Some corner cases could clear the PMU counter overflow
+ * while a masked PMI is pending. One such case is when
+ * a PMI happens during interrupt replay and perf counter
+ * values are cleared by PMU callbacks before replay.
+ *
+ * Disable the interrupt by clearing the paca bit for PMI
+ * since we are disabling the PMU now. Otherwise provide a
+ * warning if there is PMI pending, but no counter is found
+ * overflown.
+ *
+ * Since power_pmu_disable runs under local_irq_save, it
+ * could happen that code hits a PMC overflow without PMI
+ * pending in paca. Hence only clear PMI pending if it was
+ * set.
+ *
+ * If a PMI is pending, then MSR[EE] must be disabled (because
+ * the masked PMI handler disabling EE). So it is safe to
+ * call clear_pmi_irq_pending().
+ */
+ if (pmi_irq_pending())
+ clear_pmi_irq_pending();
+
+ val = mmcra = cpuhw->mmcr.mmcra;
+
+ /*
* Disable instruction sampling if it was enabled
*/
- if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
- mtspr(SPRN_MMCRA,
- cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+ if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE)
+ val &= ~MMCRA_SAMPLE_ENABLE;
+
+ /* Disable BHRB via mmcra (BHRBRD) for p10 */
+ if (ppmu->flags & PPMU_ARCH_31)
+ val |= MMCRA_BHRB_DISABLE;
+
+ /*
+ * Write SPRN_MMCRA if mmcra has either disabled
+ * instruction sampling or BHRB.
+ */
+ if (val != mmcra) {
+ mtspr(SPRN_MMCRA, mmcra);
mb();
isync();
}
@@ -1305,18 +1457,29 @@ static void power_pmu_enable(struct pmu *pmu)
* (possibly updated for removal of events).
*/
if (!cpuhw->n_added) {
- mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
- mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
+ /*
+ * If there is any active event with an overflown PMC
+ * value, set back PACA_IRQ_PMI which would have been
+ * cleared in power_pmu_disable().
+ */
+ hard_irq_disable();
+ if (any_pmc_overflown(cpuhw))
+ set_pmi_irq_pending();
+
+ mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
+ mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
+ if (ppmu->flags & PPMU_ARCH_31)
+ mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
goto out_enable;
}
/*
* Clear all MMCR settings and recompute them for the new set of events.
*/
- memset(cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
+ memset(&cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
- cpuhw->mmcr, cpuhw->event)) {
+ &cpuhw->mmcr, cpuhw->event, ppmu->flags)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
goto out;
@@ -1330,11 +1493,11 @@ static void power_pmu_enable(struct pmu *pmu)
*/
event = cpuhw->event[0];
if (event->attr.exclude_user)
- cpuhw->mmcr[0] |= MMCR0_FCP;
+ cpuhw->mmcr.mmcr0 |= MMCR0_FCP;
if (event->attr.exclude_kernel)
- cpuhw->mmcr[0] |= freeze_events_kernel;
+ cpuhw->mmcr.mmcr0 |= freeze_events_kernel;
if (event->attr.exclude_hv)
- cpuhw->mmcr[0] |= MMCR0_FCHV;
+ cpuhw->mmcr.mmcr0 |= MMCR0_FCHV;
}
/*
@@ -1343,12 +1506,15 @@ static void power_pmu_enable(struct pmu *pmu)
* Then unfreeze the events.
*/
ppc_set_pmu_inuse(1);
- mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
- mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
- mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
+ mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
+ mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
+ mtspr(SPRN_MMCR0, (cpuhw->mmcr.mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
| MMCR0_FC);
if (ppmu->flags & PPMU_ARCH_207S)
- mtspr(SPRN_MMCR2, cpuhw->mmcr[3]);
+ mtspr(SPRN_MMCR2, cpuhw->mmcr.mmcr2);
+
+ if (ppmu->flags & PPMU_ARCH_31)
+ mtspr(SPRN_MMCR3, cpuhw->mmcr.mmcr3);
/*
* Read off any pre-existing events that need to move
@@ -1399,7 +1565,7 @@ static void power_pmu_enable(struct pmu *pmu)
perf_event_update_userpage(event);
}
cpuhw->n_limited = n_lim;
- cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
+ cpuhw->mmcr.mmcr0 |= MMCR0_PMXE | MMCR0_FCECE;
out_enable:
pmao_restore_workaround(ebb);
@@ -1415,9 +1581,9 @@ static void power_pmu_enable(struct pmu *pmu)
/*
* Enable instruction sampling if necessary
*/
- if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
+ if (cpuhw->mmcr.mmcra & MMCRA_SAMPLE_ENABLE) {
mb();
- mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
+ mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra);
}
out:
@@ -1501,7 +1667,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
goto out;
- if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
+ if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1, cpuhw->event))
goto out;
event->hw.config = cpuhw->events[n0];
@@ -1514,9 +1680,16 @@ nocheck:
ret = 0;
out:
if (has_branch_stack(event)) {
- power_pmu_bhrb_enable(event);
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
- event->attr.branch_sample_type);
+ u64 bhrb_filter = -1;
+
+ if (ppmu->bhrb_filter_map)
+ bhrb_filter = ppmu->bhrb_filter_map(
+ event->attr.branch_sample_type);
+
+ if (bhrb_filter != -1) {
+ cpuhw->bhrb_filter = bhrb_filter;
+ power_pmu_bhrb_enable(event);
+ }
}
perf_pmu_enable(event->pmu);
@@ -1547,7 +1720,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
cpuhw->flags[i-1] = cpuhw->flags[i];
}
--cpuhw->n_events;
- ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
+ ppmu->disable_pmc(event->hw.idx - 1, &cpuhw->mmcr);
if (event->hw.idx) {
write_pmc(event->hw.idx, 0);
event->hw.idx = 0;
@@ -1568,7 +1741,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
}
if (cpuhw->n_events == 0) {
/* disable exceptions if no events are running */
- cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
+ cpuhw->mmcr.mmcr0 &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
if (has_branch_stack(event))
@@ -1704,7 +1877,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
n = cpuhw->n_events;
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
return -EAGAIN;
- i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
+ i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n, cpuhw->event);
if (i < 0)
return -EAGAIN;
@@ -1792,7 +1965,7 @@ static void hw_perf_event_destroy(struct perf_event *event)
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
- int ev;
+ u64 ev;
if (!ppmu->cache_events)
return -EINVAL;
@@ -1831,14 +2004,13 @@ static bool is_event_blacklisted(u64 ev)
static int power_pmu_event_init(struct perf_event *event)
{
u64 ev;
- unsigned long flags;
+ unsigned long flags, irq_flags;
struct perf_event *ctrs[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int cflags[MAX_HWEVENTS];
int n;
int err;
struct cpu_hw_events *cpuhw;
- u64 bhrb_filter;
if (!ppmu)
return -ENOENT;
@@ -1877,6 +2049,17 @@ static int power_pmu_event_init(struct perf_event *event)
return -ENOENT;
}
+ /*
+ * PMU config registers have fields that are
+ * reserved and some specific values for bit fields are reserved.
+ * For ex., MMCRA[61:62] is Random Sampling Mode (SM)
+ * and value of 0b11 to this field is reserved.
+ * Check for invalid values in attr.config.
+ */
+ if (ppmu->check_attr_config &&
+ ppmu->check_attr_config(event))
+ return -EINVAL;
+
event->hw.config_base = ev;
event->hw.idx = 0;
@@ -1940,21 +2123,43 @@ static int power_pmu_event_init(struct perf_event *event)
if (check_excludes(ctrs, cflags, n, 1))
return -EINVAL;
- cpuhw = &get_cpu_var(cpu_hw_events);
- err = power_check_constraints(cpuhw, events, cflags, n + 1);
+ local_irq_save(irq_flags);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
+
+ err = power_check_constraints(cpuhw, events, cflags, n + 1, ctrs);
if (has_branch_stack(event)) {
- bhrb_filter = ppmu->bhrb_filter_map(
+ u64 bhrb_filter = -1;
+
+ /*
+ * Currently no PMU supports having multiple branch filters
+ * at the same time. Branch filters are set via MMCRA IFM[32:33]
+ * bits for Power8 and above. Return EOPNOTSUPP when multiple
+ * branch filters are requested in the event attr.
+ *
+ * When opening event via perf_event_open(), branch_sample_type
+ * gets adjusted in perf_copy_attr(). Kernel will automatically
+ * adjust the branch_sample_type based on the event modifier
+ * settings to include PERF_SAMPLE_BRANCH_PLM_ALL. Hence drop
+ * the check for PERF_SAMPLE_BRANCH_PLM_ALL.
+ */
+ if (hweight64(event->attr.branch_sample_type & ~PERF_SAMPLE_BRANCH_PLM_ALL) > 1) {
+ local_irq_restore(irq_flags);
+ return -EOPNOTSUPP;
+ }
+
+ if (ppmu->bhrb_filter_map)
+ bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
if (bhrb_filter == -1) {
- put_cpu_var(cpu_hw_events);
+ local_irq_restore(irq_flags);
return -EOPNOTSUPP;
}
cpuhw->bhrb_filter = bhrb_filter;
}
- put_cpu_var(cpu_hw_events);
+ local_irq_restore(irq_flags);
if (err)
return -EINVAL;
@@ -2022,6 +2227,9 @@ static struct pmu power_pmu = {
.sched_task = power_pmu_sched_task,
};
+#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
+ PERF_SAMPLE_PHYS_ADDR | \
+ PERF_SAMPLE_DATA_PAGE_SIZE)
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
@@ -2057,7 +2265,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
left += period;
if (left <= 0)
left = period;
- record = siar_valid(regs);
+
+ /*
+ * If address is not requested in the sample via
+ * PERF_SAMPLE_IP, just record that sample irrespective
+ * of SIAR valid check.
+ */
+ if (event->attr.sample_type & PERF_SAMPLE_IP)
+ record = siar_valid(regs);
+ else
+ record = 1;
+
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
@@ -2070,6 +2288,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_event_update_userpage(event);
/*
+ * Due to hardware limitation, sometimes SIAR could sample a kernel
+ * address even when freeze on supervisor state (kernel) is set in
+ * MMCR2. Check attr.exclude_kernel and address to drop the sample in
+ * these cases.
+ */
+ if (event->attr.exclude_kernel &&
+ (event->attr.sample_type & PERF_SAMPLE_IP) &&
+ is_kernel_addr(mfspr(SPRN_SIAR)))
+ record = 0;
+
+ /*
* Finally record data if requested.
*/
if (record) {
@@ -2077,8 +2306,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
- if (event->attr.sample_type &
- (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR))
+ if (event->attr.sample_type & PERF_SAMPLE_ADDR_TYPE)
perf_get_data_addr(event, regs, &data.addr);
if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
@@ -2086,18 +2314,26 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
cpuhw = this_cpu_ptr(&cpu_hw_events);
power_pmu_bhrb_read(event, cpuhw);
data.br_stack = &cpuhw->bhrb_stack;
+ data.sample_flags |= PERF_SAMPLE_BRANCH_STACK;
}
if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
- ppmu->get_mem_data_src)
+ ppmu->get_mem_data_src) {
ppmu->get_mem_data_src(&data.data_src, ppmu->flags, regs);
+ data.sample_flags |= PERF_SAMPLE_DATA_SRC;
+ }
- if (event->attr.sample_type & PERF_SAMPLE_WEIGHT &&
- ppmu->get_mem_weight)
- ppmu->get_mem_weight(&data.weight);
-
+ if (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE &&
+ ppmu->get_mem_weight) {
+ ppmu->get_mem_weight(&data.weight.full, event->attr.sample_type);
+ data.sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
+ }
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
+ } else if (period) {
+ /* Account for interrupt in case of invalid SIAR */
+ if (perf_event_account_interrupt(event))
+ power_pmu_stop(event, 0);
}
}
@@ -2121,12 +2357,10 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- bool use_siar = regs_use_siar(regs);
+ unsigned long siar = mfspr(SPRN_SIAR);
- if (use_siar && siar_valid(regs))
- return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
- else if (use_siar)
- return 0; // no valid instruction pointer
+ if (regs_use_siar(regs) && siar_valid(regs) && siar)
+ return siar + perf_ip_adjust(regs);
else
return regs->nip;
}
@@ -2166,9 +2400,7 @@ static void __perf_event_interrupt(struct pt_regs *regs)
int i, j;
struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
- unsigned long val[8];
int found, active;
- int nmi;
if (cpuhw->n_limited)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
@@ -2176,20 +2408,14 @@ static void __perf_event_interrupt(struct pt_regs *regs)
perf_read_regs(regs);
- nmi = perf_intr_is_nmi(regs);
- if (nmi)
- nmi_enter();
- else
- irq_enter();
-
/* Read all the PMCs since we'll need them a bunch of times */
for (i = 0; i < ppmu->n_counter; ++i)
- val[i] = read_pmc(i + 1);
+ cpuhw->pmcs[i] = read_pmc(i + 1);
/* Try to find what caused the IRQ */
found = 0;
for (i = 0; i < ppmu->n_counter; ++i) {
- if (!pmc_overflow(val[i]))
+ if (!pmc_overflow(cpuhw->pmcs[i]))
continue;
if (is_limited_pmc(i + 1))
continue; /* these won't generate IRQs */
@@ -2204,10 +2430,18 @@ static void __perf_event_interrupt(struct pt_regs *regs)
event = cpuhw->event[j];
if (event->hw.idx == (i + 1)) {
active = 1;
- record_and_restart(event, val[i], regs);
+ record_and_restart(event, cpuhw->pmcs[i], regs);
break;
}
}
+
+ /*
+ * Clear PACA_IRQ_PMI in case it was set by
+ * set_pmi_irq_pending() when PMU was enabled
+ * after accounting for interrupts.
+ */
+ clear_pmi_irq_pending();
+
if (!active)
/* reset non active counters that have overflowed */
write_pmc(i + 1, 0);
@@ -2218,17 +2452,24 @@ static void __perf_event_interrupt(struct pt_regs *regs)
event = cpuhw->event[i];
if (!event->hw.idx || is_limited_pmc(event->hw.idx))
continue;
- if (pmc_overflow_power7(val[event->hw.idx - 1])) {
+ if (pmc_overflow_power7(cpuhw->pmcs[event->hw.idx - 1])) {
/* event has overflowed in a buggy way*/
found = 1;
record_and_restart(event,
- val[event->hw.idx - 1],
+ cpuhw->pmcs[event->hw.idx - 1],
regs);
}
}
}
- if (!found && !nmi && printk_ratelimit())
- printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
+
+ /*
+ * During system wide profiling or while specific CPU is monitored for an
+ * event, some corner cases could cause PMC to overflow in idle path. This
+ * will trigger a PMI after waking up from idle. Since counter values are _not_
+ * saved/restored in idle path, can lead to below "Can't find PMC" message.
+ */
+ if (unlikely(!found) && !arch_irq_disabled_regs(regs))
+ printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n");
/*
* Reset MMCR0 to its normal value. This will set PMXE and
@@ -2237,12 +2478,11 @@ static void __perf_event_interrupt(struct pt_regs *regs)
* XXX might want to use MSR.PM to keep the events frozen until
* we get back out of this interrupt.
*/
- write_mmcr0(cpuhw, cpuhw->mmcr[0]);
+ write_mmcr0(cpuhw, cpuhw->mmcr.mmcr0);
+
+ /* Clear the cpuhw->pmcs */
+ memset(&cpuhw->pmcs, 0, sizeof(cpuhw->pmcs));
- if (nmi)
- nmi_exit();
- else
- irq_exit();
}
static void perf_event_interrupt(struct pt_regs *regs)
@@ -2259,12 +2499,39 @@ static int power_pmu_prepare_cpu(unsigned int cpu)
if (ppmu) {
memset(cpuhw, 0, sizeof(*cpuhw));
- cpuhw->mmcr[0] = MMCR0_FC;
+ cpuhw->mmcr.mmcr0 = MMCR0_FC;
}
return 0;
}
-int register_power_pmu(struct power_pmu *pmu)
+static ssize_t pmu_name_show(struct device *cdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (ppmu)
+ return sysfs_emit(buf, "%s", ppmu->name);
+
+ return 0;
+}
+
+static DEVICE_ATTR_RO(pmu_name);
+
+static struct attribute *pmu_caps_attrs[] = {
+ &dev_attr_pmu_name.attr,
+ NULL
+};
+
+static const struct attribute_group pmu_caps_group = {
+ .name = "caps",
+ .attrs = pmu_caps_attrs,
+};
+
+static const struct attribute_group *pmu_caps_groups[] = {
+ &pmu_caps_group,
+ NULL,
+};
+
+int __init register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
@@ -2275,6 +2542,11 @@ int register_power_pmu(struct power_pmu *pmu)
power_pmu.attr_groups = ppmu->attr_groups;
+ if (ppmu->flags & PPMU_ARCH_207S)
+ power_pmu.attr_update = pmu_caps_groups;
+
+ power_pmu.capabilities |= (ppmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS);
+
#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
@@ -2290,8 +2562,24 @@ int register_power_pmu(struct power_pmu *pmu)
}
#ifdef CONFIG_PPC64
+static bool pmu_override = false;
+static unsigned long pmu_override_val;
+static void do_pmu_override(void *data)
+{
+ ppc_set_pmu_inuse(1);
+ if (pmu_override_val)
+ mtspr(SPRN_MMCR1, pmu_override_val);
+ mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
+}
+
static int __init init_ppc64_pmu(void)
{
+ if (cpu_has_feature(CPU_FTR_HVMODE) && pmu_override) {
+ pr_warn("disabling perf due to pmu_override= command line option.\n");
+ on_each_cpu(do_pmu_override, NULL, 1);
+ return 0;
+ }
+
/* run through all the pmu drivers one at a time */
if (!init_power5_pmu())
return 0;
@@ -2305,10 +2593,31 @@ static int __init init_ppc64_pmu(void)
return 0;
else if (!init_power9_pmu())
return 0;
+ else if (!init_power10_pmu())
+ return 0;
else if (!init_ppc970_pmu())
return 0;
else
return init_generic_compat_pmu();
}
early_initcall(init_ppc64_pmu);
+
+static int __init pmu_setup(char *str)
+{
+ unsigned long val;
+
+ if (!early_cpu_has_feature(CPU_FTR_HVMODE))
+ return 0;
+
+ pmu_override = true;
+
+ if (kstrtoul(str, 0, &val))
+ val = 0;
+
+ pmu_override_val = val;
+
+ return 1;
+}
+__setup("pmu_override=", pmu_setup);
+
#endif
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index e0e7e276bfd2..ee721f420a7b 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -31,19 +31,6 @@ static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
-/*
- * If interrupts were soft-disabled when a PMU interrupt occurs, treat
- * it as an NMI.
- */
-static inline int perf_intr_is_nmi(struct pt_regs *regs)
-{
-#ifdef __powerpc64__
- return (regs->softe & IRQS_DISABLED);
-#else
- return 0;
-#endif
-}
-
static void perf_event_interrupt(struct pt_regs *regs);
/*
@@ -659,13 +646,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
struct perf_event *event;
unsigned long val;
int found = 0;
- int nmi;
-
- nmi = perf_intr_is_nmi(regs);
- if (nmi)
- nmi_enter();
- else
- irq_enter();
for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i];
@@ -690,11 +670,6 @@ static void perf_event_interrupt(struct pt_regs *regs)
mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
-
- if (nmi)
- nmi_exit();
- else
- irq_exit();
}
void hw_perf_event_setup(int cpu)
diff --git a/arch/powerpc/perf/e500-pmu.c b/arch/powerpc/perf/e500-pmu.c
index a59c33bed32a..e3e1a68eb1d5 100644
--- a/arch/powerpc/perf/e500-pmu.c
+++ b/arch/powerpc/perf/e500-pmu.c
@@ -118,12 +118,13 @@ static struct fsl_emb_pmu e500_pmu = {
static int init_e500_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type)
- return -ENODEV;
+ unsigned int pvr = mfspr(SPRN_PVR);
- if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500mc"))
+ /* ec500mc */
+ if (PVR_VER(pvr) == PVR_VER_E500MC || PVR_VER(pvr) == PVR_VER_E5500)
num_events = 256;
- else if (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e500"))
+ /* e500 */
+ else if (PVR_VER(pvr) != PVR_VER_E500V1 && PVR_VER(pvr) != PVR_VER_E500V2)
return -ENODEV;
return register_fsl_emb_pmu(&e500_pmu);
diff --git a/arch/powerpc/perf/e6500-pmu.c b/arch/powerpc/perf/e6500-pmu.c
index 44ad65da82ed..bd779a2338f8 100644
--- a/arch/powerpc/perf/e6500-pmu.c
+++ b/arch/powerpc/perf/e6500-pmu.c
@@ -107,8 +107,9 @@ static struct fsl_emb_pmu e6500_pmu = {
static int init_e6500_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e6500"))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_VER_E6500)
return -ENODEV;
return register_fsl_emb_pmu(&e6500_pmu);
diff --git a/arch/powerpc/perf/generic-compat-pmu.c b/arch/powerpc/perf/generic-compat-pmu.c
index 5e5a54d5588e..b5c414876ed5 100644
--- a/arch/powerpc/perf/generic-compat-pmu.c
+++ b/arch/powerpc/perf/generic-compat-pmu.c
@@ -14,84 +14,165 @@
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ pmc ] [unit ] [ ] m [ pmcxsel ]
- * | |
- * | *- mark
- * |
- * |
- * *- combine
- *
- * Below uses IBM bit numbering.
- *
- * MMCR1[x:y] = unit (PMCxUNIT)
- * MMCR1[24] = pmc1combine[0]
- * MMCR1[25] = pmc1combine[1]
- * MMCR1[26] = pmc2combine[0]
- * MMCR1[27] = pmc2combine[1]
- * MMCR1[28] = pmc3combine[0]
- * MMCR1[29] = pmc3combine[1]
- * MMCR1[30] = pmc4combine[0]
- * MMCR1[31] = pmc4combine[1]
- *
+ * [ pmc ] [ pmcxsel ]
*/
/*
- * Some power9 event codes.
+ * Event codes defined in ISA v3.0B
*/
#define EVENT(_name, _code) _name = _code,
enum {
-EVENT(PM_CYC, 0x0001e)
-EVENT(PM_INST_CMPL, 0x00002)
+ /* Cycles, alternate code */
+ EVENT(PM_CYC_ALT, 0x100f0)
+ /* One or more instructions completed in a cycle */
+ EVENT(PM_CYC_INST_CMPL, 0x100f2)
+ /* Floating-point instruction completed */
+ EVENT(PM_FLOP_CMPL, 0x100f4)
+ /* Instruction ERAT/L1-TLB miss */
+ EVENT(PM_L1_ITLB_MISS, 0x100f6)
+ /* All instructions completed and none available */
+ EVENT(PM_NO_INST_AVAIL, 0x100f8)
+ /* A load-type instruction completed (ISA v3.0+) */
+ EVENT(PM_LD_CMPL, 0x100fc)
+ /* Instruction completed, alternate code (ISA v3.0+) */
+ EVENT(PM_INST_CMPL_ALT, 0x100fe)
+ /* A store-type instruction completed */
+ EVENT(PM_ST_CMPL, 0x200f0)
+ /* Instruction Dispatched */
+ EVENT(PM_INST_DISP, 0x200f2)
+ /* Run_cycles */
+ EVENT(PM_RUN_CYC, 0x200f4)
+ /* Data ERAT/L1-TLB miss/reload */
+ EVENT(PM_L1_DTLB_RELOAD, 0x200f6)
+ /* Taken branch completed */
+ EVENT(PM_BR_TAKEN_CMPL, 0x200fa)
+ /* Demand iCache Miss */
+ EVENT(PM_L1_ICACHE_MISS, 0x200fc)
+ /* L1 Dcache reload from memory */
+ EVENT(PM_L1_RELOAD_FROM_MEM, 0x200fe)
+ /* L1 Dcache store miss */
+ EVENT(PM_ST_MISS_L1, 0x300f0)
+ /* Alternate code for PM_INST_DISP */
+ EVENT(PM_INST_DISP_ALT, 0x300f2)
+ /* Branch direction or target mispredicted */
+ EVENT(PM_BR_MISPREDICT, 0x300f6)
+ /* Data TLB miss/reload */
+ EVENT(PM_DTLB_MISS, 0x300fc)
+ /* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+ EVENT(PM_DATA_FROM_L3MISS, 0x300fe)
+ /* L1 Dcache load miss */
+ EVENT(PM_LD_MISS_L1, 0x400f0)
+ /* Cycle when instruction(s) dispatched */
+ EVENT(PM_CYC_INST_DISP, 0x400f2)
+ /* Branch or branch target mispredicted */
+ EVENT(PM_BR_MPRED_CMPL, 0x400f6)
+ /* Instructions completed with run latch set */
+ EVENT(PM_RUN_INST_CMPL, 0x400fa)
+ /* Instruction TLB miss/reload */
+ EVENT(PM_ITLB_MISS, 0x400fc)
+ /* Load data not cached */
+ EVENT(PM_LD_NOT_CACHED, 0x400fe)
+ /* Instructions */
+ EVENT(PM_INST_CMPL, 0x500fa)
+ /* Cycles */
+ EVENT(PM_CYC, 0x600f4)
};
#undef EVENT
+/* Table of alternatives, sorted in increasing order of column 0 */
+/* Note that in each row, column 0 must be the smallest */
+static const unsigned int generic_event_alternatives[][MAX_ALT] = {
+ { PM_CYC_ALT, PM_CYC },
+ { PM_INST_CMPL_ALT, PM_INST_CMPL },
+ { PM_INST_DISP, PM_INST_DISP_ALT },
+};
+
+static int generic_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int num_alt = 0;
+
+ num_alt = isa207_get_alternatives(event, alt,
+ ARRAY_SIZE(generic_event_alternatives), flags,
+ generic_event_alternatives);
+
+ return num_alt;
+}
+
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_NO_INST_AVAIL);
+GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
+
+CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
+CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
+CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
+CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
+CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
+CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
+CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
static struct attribute *generic_compat_events_attr[] = {
GENERIC_EVENT_PTR(PM_CYC),
GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_NO_INST_AVAIL),
+ GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
+ GENERIC_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_ST_MISS_L1),
+ CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
+ CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
+ CACHE_EVENT_PTR(PM_DTLB_MISS),
+ CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
};
-static struct attribute_group generic_compat_pmu_events_group = {
+static const struct attribute_group generic_compat_pmu_events_group = {
.name = "events",
.attrs = generic_compat_events_attr,
};
PMU_FORMAT_ATTR(event, "config:0-19");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
-PMU_FORMAT_ATTR(mark, "config:8");
-PMU_FORMAT_ATTR(combine, "config:10-11");
-PMU_FORMAT_ATTR(unit, "config:12-15");
PMU_FORMAT_ATTR(pmc, "config:16-19");
static struct attribute *generic_compat_pmu_format_attr[] = {
&format_attr_event.attr,
&format_attr_pmcxsel.attr,
- &format_attr_mark.attr,
- &format_attr_combine.attr,
- &format_attr_unit.attr,
&format_attr_pmc.attr,
NULL,
};
-static struct attribute_group generic_compat_pmu_format_group = {
+static const struct attribute_group generic_compat_pmu_format_group = {
.name = "format",
.attrs = generic_compat_pmu_format_attr,
};
+static struct attribute *generic_compat_pmu_caps_attrs[] = {
+ NULL
+};
+
+static struct attribute_group generic_compat_pmu_caps_group = {
+ .name = "caps",
+ .attrs = generic_compat_pmu_caps_attrs,
+};
+
static const struct attribute_group *generic_compat_pmu_attr_groups[] = {
&generic_compat_pmu_format_group,
&generic_compat_pmu_events_group,
+ &generic_compat_pmu_caps_group,
NULL,
};
static int compat_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_NO_INST_AVAIL,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
#define C(x) PERF_COUNT_HW_CACHE_##x
@@ -101,15 +182,15 @@ static int compat_generic_events[] = {
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
-static int generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static u64 generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -119,7 +200,7 @@ static int generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -133,7 +214,7 @@ static int generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
@@ -147,7 +228,7 @@ static int generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DTLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -161,7 +242,7 @@ static int generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ITLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -175,7 +256,7 @@ static int generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
- [ C(RESULT_MISS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
@@ -204,13 +285,30 @@ static int generic_compat_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
#undef C
+/*
+ * We set MMCR0[CC5-6RUN] so we can use counters 5 and 6 for
+ * PM_INST_CMPL and PM_CYC.
+ */
+static int generic_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[], u32 flags)
+{
+ int ret;
+
+ ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
+ if (!ret)
+ mmcr->mmcr0 |= MMCR0_C56RUN;
+ return ret;
+}
+
static struct power_pmu generic_compat_pmu = {
- .name = "GENERIC_COMPAT",
+ .name = "ISAv3",
.n_counter = MAX_PMU_COUNTERS,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
- .compute_mmcr = isa207_compute_mmcr,
+ .compute_mmcr = generic_compute_mmcr,
.get_constraint = isa207_get_constraint,
+ .get_alternatives = generic_get_alternatives,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(compat_generic_events),
@@ -219,10 +317,20 @@ static struct power_pmu generic_compat_pmu = {
.attr_groups = generic_compat_pmu_attr_groups,
};
-int init_generic_compat_pmu(void)
+int __init init_generic_compat_pmu(void)
{
int rc = 0;
+ /*
+ * From ISA v2.07 on, PMU features are architected;
+ * we require >= v3.0 because (a) that has PM_LD_CMPL and
+ * PM_INST_CMPL_ALT, which v2.07 doesn't have, and
+ * (b) we don't expect any non-IBM Power ISA
+ * implementations that conform to v2.07 but not v3.0.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ return -ENODEV;
+
rc = register_power_pmu(&generic_compat_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 573e0b309c0c..33c23225fd54 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -20,6 +20,7 @@
#include <asm/io.h>
#include <linux/byteorder/generic.h>
+#include <asm/rtas.h>
#include "hv-24x7.h"
#include "hv-24x7-catalog.h"
#include "hv-common.h"
@@ -30,7 +31,9 @@ static int interface_version;
/* Whether we have to aggregate result data for some domains. */
static bool aggregate_result_elements;
-static bool domain_is_valid(unsigned domain)
+static cpumask_t hv_24x7_cpumask;
+
+static bool domain_is_valid(unsigned int domain)
{
switch (domain) {
#define DOMAIN(n, v, x, c) \
@@ -44,7 +47,7 @@ static bool domain_is_valid(unsigned domain)
}
}
-static bool is_physical_domain(unsigned domain)
+static bool is_physical_domain(unsigned int domain)
{
switch (domain) {
#define DOMAIN(n, v, x, c) \
@@ -57,6 +60,65 @@ static bool is_physical_domain(unsigned domain)
}
}
+/*
+ * The Processor Module Information system parameter allows transferring
+ * of certain processor module information from the platform to the OS.
+ * Refer PAPR+ document to get parameter token value as '43'.
+ */
+
+#define PROCESSOR_MODULE_INFO 43
+
+static u32 phys_sockets; /* Physical sockets */
+static u32 phys_chipspersocket; /* Physical chips per socket*/
+static u32 phys_coresperchip; /* Physical cores per chip */
+
+/*
+ * read_24x7_sys_info()
+ * Retrieve the number of sockets and chips per socket and cores per
+ * chip details through the get-system-parameter rtas call.
+ */
+void read_24x7_sys_info(void)
+{
+ int call_status, len, ntypes;
+
+ spin_lock(&rtas_data_buf_lock);
+
+ /*
+ * Making system parameter: chips and sockets and cores per chip
+ * default to 1.
+ */
+ phys_sockets = 1;
+ phys_chipspersocket = 1;
+ phys_coresperchip = 1;
+
+ call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
+ NULL,
+ PROCESSOR_MODULE_INFO,
+ __pa(rtas_data_buf),
+ RTAS_DATA_BUF_SIZE);
+
+ if (call_status != 0) {
+ pr_err("Error calling get-system-parameter %d\n",
+ call_status);
+ } else {
+ len = be16_to_cpup((__be16 *)&rtas_data_buf[0]);
+ if (len < 8)
+ goto out;
+
+ ntypes = be16_to_cpup((__be16 *)&rtas_data_buf[2]);
+
+ if (!ntypes)
+ goto out;
+
+ phys_sockets = be16_to_cpup((__be16 *)&rtas_data_buf[4]);
+ phys_chipspersocket = be16_to_cpup((__be16 *)&rtas_data_buf[6]);
+ phys_coresperchip = be16_to_cpup((__be16 *)&rtas_data_buf[8]);
+ }
+
+out:
+ spin_unlock(&rtas_data_buf_lock);
+}
+
/* Domains for which more than one result element are returned for each event. */
static bool domain_needs_aggregation(unsigned int domain)
{
@@ -66,7 +128,7 @@ static bool domain_needs_aggregation(unsigned int domain)
domain <= HV_PERF_DOMAIN_VCPU_REMOTE_NODE));
}
-static const char *domain_name(unsigned domain)
+static const char *domain_name(unsigned int domain)
{
if (!domain_is_valid(domain))
return NULL;
@@ -84,7 +146,7 @@ static const char *domain_name(unsigned domain)
return NULL;
}
-static bool catalog_entry_domain_is_valid(unsigned domain)
+static bool catalog_entry_domain_is_valid(unsigned int domain)
{
/* POWER8 doesn't support virtual domains. */
if (interface_version == 1)
@@ -142,7 +204,7 @@ static struct attribute *format_attrs[] = {
NULL,
};
-static struct attribute_group format_group = {
+static const struct attribute_group format_group = {
.name = "format",
.attrs = format_attrs,
};
@@ -164,14 +226,14 @@ static struct attribute_group event_long_desc_group = {
static struct kmem_cache *hv_page_cache;
-DEFINE_PER_CPU(int, hv_24x7_txn_flags);
-DEFINE_PER_CPU(int, hv_24x7_txn_err);
+static DEFINE_PER_CPU(int, hv_24x7_txn_flags);
+static DEFINE_PER_CPU(int, hv_24x7_txn_err);
struct hv_24x7_hw {
struct perf_event *events[255];
};
-DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
+static DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
/*
* request_buffer and result_buffer are not required to be 4k aligned,
@@ -179,8 +241,8 @@ DEFINE_PER_CPU(struct hv_24x7_hw, hv_24x7_hw);
* the simplest way to ensure that.
*/
#define H24x7_DATA_BUFFER_SIZE 4096
-DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
-DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
+static DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
+static DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
static unsigned int max_num_requests(int interface_version)
{
@@ -196,7 +258,7 @@ static char *event_name(struct hv_24x7_event_data *ev, int *len)
static char *event_desc(struct hv_24x7_event_data *ev, int *len)
{
- unsigned nl = be16_to_cpu(ev->event_name_len);
+ unsigned int nl = be16_to_cpu(ev->event_name_len);
__be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
*len = be16_to_cpu(*desc_len) - 2;
@@ -205,9 +267,9 @@ static char *event_desc(struct hv_24x7_event_data *ev, int *len)
static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
{
- unsigned nl = be16_to_cpu(ev->event_name_len);
+ unsigned int nl = be16_to_cpu(ev->event_name_len);
__be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
- unsigned desc_len = be16_to_cpu(*desc_len_);
+ unsigned int desc_len = be16_to_cpu(*desc_len_);
__be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
*len = be16_to_cpu(*long_desc_len) - 2;
@@ -234,8 +296,8 @@ static void *event_end(struct hv_24x7_event_data *ev, void *end)
{
void *start = ev;
__be16 *dl_, *ldl_;
- unsigned dl, ldl;
- unsigned nl = be16_to_cpu(ev->event_name_len);
+ unsigned int dl, ldl;
+ unsigned int nl = be16_to_cpu(ev->event_name_len);
if (nl < 2) {
pr_debug("%s: name length too short: %d", __func__, nl);
@@ -336,7 +398,7 @@ static long h_get_24x7_catalog_page(char page[], u64 version, u32 index)
* - Specifying (i.e overriding) values for other parameters
* is undefined.
*/
-static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain)
+static char *event_fmt(struct hv_24x7_event_data *event, unsigned int domain)
{
const char *sindex;
const char *lpar;
@@ -386,6 +448,30 @@ static ssize_t device_show_string(struct device *dev,
return sprintf(buf, "%s\n", (char *)d->var);
}
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &hv_24x7_cpumask);
+}
+
+static ssize_t sockets_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", phys_sockets);
+}
+
+static ssize_t chipspersocket_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", phys_chipspersocket);
+}
+
+static ssize_t coresperchip_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", phys_coresperchip);
+}
+
static struct attribute *device_str_attr_create_(char *name, char *str)
{
struct dev_ext_attribute *attr = kzalloc(sizeof(*attr), GFP_KERNEL);
@@ -443,9 +529,9 @@ out_s:
return NULL;
}
-static struct attribute *event_to_attr(unsigned ix,
+static struct attribute *event_to_attr(unsigned int ix,
struct hv_24x7_event_data *event,
- unsigned domain,
+ unsigned int domain,
int nonce)
{
int event_name_len;
@@ -513,8 +599,8 @@ event_to_long_desc_attr(struct hv_24x7_event_data *event, int nonce)
return device_str_attr_create(name, nl, nonce, desc, dl);
}
-static int event_data_to_attrs(unsigned ix, struct attribute **attrs,
- struct hv_24x7_event_data *event, int nonce)
+static int event_data_to_attrs(unsigned int ix, struct attribute **attrs,
+ struct hv_24x7_event_data *event, int nonce)
{
*attrs = event_to_attr(ix, event, event->domain, nonce);
if (!*attrs)
@@ -528,8 +614,8 @@ struct event_uniq {
struct rb_node node;
const char *name;
int nl;
- unsigned ct;
- unsigned domain;
+ unsigned int ct;
+ unsigned int domain;
};
static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
@@ -542,8 +628,8 @@ static int memord(const void *d1, size_t s1, const void *d2, size_t s2)
return memcmp(d1, d2, s1);
}
-static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
- size_t s2, unsigned d2)
+static int ev_uniq_ord(const void *v1, size_t s1, unsigned int d1,
+ const void *v2, size_t s2, unsigned int d2)
{
int r = memord(v1, s1, v2, s2);
@@ -557,7 +643,7 @@ static int ev_uniq_ord(const void *v1, size_t s1, unsigned d1, const void *v2,
}
static int event_uniq_add(struct rb_root *root, const char *name, int nl,
- unsigned domain)
+ unsigned int domain)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
struct event_uniq *data;
@@ -670,7 +756,7 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
}
if (calc_ev_end > ev_end) {
- pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
+ pr_warn("event %zu exceeds its own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
event_idx, event, ev_end, offset, calc_ev_end);
return -1;
}
@@ -678,6 +764,14 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
return ev_len;
}
+/*
+ * Return true incase of invalid or dummy events with names like RESERVED*
+ */
+static bool ignore_event(const char *name)
+{
+ return strncmp(name, "RESERVED", 8) == 0;
+}
+
#define MAX_4K (SIZE_MAX / 4096)
static int create_events_from_catalog(struct attribute ***events_,
@@ -808,6 +902,10 @@ static int create_events_from_catalog(struct attribute ***events_,
name = event_name(event, &nl);
+ if (ignore_event(name)) {
+ junk_events++;
+ continue;
+ }
if (event->event_group_record_len == 0) {
pr_devel("invalid event %zu (%.*s): group_record_len == 0, skipping\n",
event_idx, nl, name);
@@ -869,6 +967,9 @@ static int create_events_from_catalog(struct attribute ***events_,
continue;
name = event_name(event, &nl);
+ if (ignore_event(name))
+ continue;
+
nonce = event_uniq_add(&ev_uniq, name, nl, event->domain);
ct = event_data_to_attrs(event_idx, events + event_attr_ct,
event, nonce);
@@ -1032,20 +1133,36 @@ PAGE_0_ATTR(catalog_len, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->length) * 4096);
static BIN_ATTR_RO(catalog, 0/* real length varies */);
static DEVICE_ATTR_RO(domains);
+static DEVICE_ATTR_RO(sockets);
+static DEVICE_ATTR_RO(chipspersocket);
+static DEVICE_ATTR_RO(coresperchip);
+static DEVICE_ATTR_RO(cpumask);
static struct bin_attribute *if_bin_attrs[] = {
&bin_attr_catalog,
NULL,
};
+static struct attribute *cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group cpumask_attr_group = {
+ .attrs = cpumask_attrs,
+};
+
static struct attribute *if_attrs[] = {
&dev_attr_catalog_len.attr,
&dev_attr_catalog_version.attr,
&dev_attr_domains.attr,
+ &dev_attr_sockets.attr,
+ &dev_attr_chipspersocket.attr,
+ &dev_attr_coresperchip.attr,
NULL,
};
-static struct attribute_group if_group = {
+static const struct attribute_group if_group = {
.name = "interface",
.bin_attrs = if_bin_attrs,
.attrs = if_attrs,
@@ -1057,6 +1174,7 @@ static const struct attribute_group *attr_groups[] = {
&event_desc_group,
&event_long_desc_group,
&if_group,
+ &cpumask_attr_group,
NULL,
};
@@ -1280,7 +1398,7 @@ out:
static int h_24x7_event_init(struct perf_event *event)
{
struct hv_perf_caps caps;
- unsigned domain;
+ unsigned int domain;
unsigned long hret;
u64 ct;
@@ -1400,16 +1518,6 @@ static void h_24x7_event_read(struct perf_event *event)
h24x7hw = &get_cpu_var(hv_24x7_hw);
h24x7hw->events[i] = event;
put_cpu_var(h24x7hw);
- /*
- * Clear the event count so we can compute the _change_
- * in the 24x7 raw counter value at the end of the txn.
- *
- * Note that we could alternatively read the 24x7 value
- * now and save its value in event->hw.prev_count. But
- * that would require issuing a hcall, which would then
- * defeat the purpose of using the txn interface.
- */
- local64_set(&event->count, 0);
}
put_cpu_var(hv_24x7_reqb);
@@ -1567,20 +1675,59 @@ static struct pmu h_24x7_pmu = {
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
+static int ppc_hv_24x7_cpu_online(unsigned int cpu)
+{
+ if (cpumask_empty(&hv_24x7_cpumask))
+ cpumask_set_cpu(cpu, &hv_24x7_cpumask);
+
+ return 0;
+}
+
+static int ppc_hv_24x7_cpu_offline(unsigned int cpu)
+{
+ int target;
+
+ /* Check if exiting cpu is used for collecting 24x7 events */
+ if (!cpumask_test_and_clear_cpu(cpu, &hv_24x7_cpumask))
+ return 0;
+
+ /* Find a new cpu to collect 24x7 events */
+ target = cpumask_last(cpu_active_mask);
+
+ if (target < 0 || target >= nr_cpu_ids) {
+ pr_err("hv_24x7: CPU hotplug init failed\n");
+ return -1;
+ }
+
+ /* Migrate 24x7 events to the new target */
+ cpumask_set_cpu(target, &hv_24x7_cpumask);
+ perf_pmu_migrate_context(&h_24x7_pmu, cpu, target);
+
+ return 0;
+}
+
+static int hv_24x7_cpu_hotplug_init(void)
+{
+ return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
+ "perf/powerpc/hv_24x7:online",
+ ppc_hv_24x7_cpu_online,
+ ppc_hv_24x7_cpu_offline);
+}
+
static int hv_24x7_init(void)
{
int r;
unsigned long hret;
+ unsigned int pvr = mfspr(SPRN_PVR);
struct hv_perf_caps caps;
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
pr_debug("not a virtualized system, not enabling\n");
return -ENODEV;
- } else if (!cur_cpu_spec->oprofile_cpu_type)
- return -ENODEV;
+ }
/* POWER8 only supports v1, while POWER9 only supports v2. */
- if (!strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
+ if (PVR_VER(pvr) == PVR_POWER8)
interface_version = 1;
else {
interface_version = 2;
@@ -1611,10 +1758,17 @@ static int hv_24x7_init(void)
if (r)
return r;
+ /* init cpuhotplug */
+ r = hv_24x7_cpu_hotplug_init();
+ if (r)
+ return r;
+
r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
if (r)
return r;
+ read_24x7_sys_info();
+
return 0;
}
diff --git a/arch/powerpc/perf/hv-gpci-requests.h b/arch/powerpc/perf/hv-gpci-requests.h
index e608f9db12dd..8965b4463d43 100644
--- a/arch/powerpc/perf/hv-gpci-requests.h
+++ b/arch/powerpc/perf/hv-gpci-requests.h
@@ -95,7 +95,7 @@ REQUEST(__field(0, 8, partition_id)
#define REQUEST_NAME system_performance_capabilities
#define REQUEST_NUM 0x40
-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
+#define REQUEST_IDX_KIND "starting_index=0xffffffff"
#include I(REQUEST_BEGIN)
REQUEST(__field(0, 1, perf_collect_privileged)
__field(0x1, 1, capability_mask)
@@ -223,7 +223,7 @@ REQUEST(__field(0, 2, partition_id)
#define REQUEST_NAME system_hypervisor_times
#define REQUEST_NUM 0xF0
-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
+#define REQUEST_IDX_KIND "starting_index=0xffffffff"
#include I(REQUEST_BEGIN)
REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
__count(0x8, 8, time_spent_processing_virtual_processor_timers)
@@ -234,7 +234,7 @@ REQUEST(__count(0, 8, time_spent_to_dispatch_virtual_processors)
#define REQUEST_NAME system_tlbie_count_and_time
#define REQUEST_NUM 0xF4
-#define REQUEST_IDX_KIND "starting_index=0xffffffffffffffff"
+#define REQUEST_IDX_KIND "starting_index=0xffffffff"
#include I(REQUEST_BEGIN)
REQUEST(__count(0, 8, tlbie_instructions_issued)
/*
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 6884d16ec19b..5eb60ed5b5e8 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -48,6 +48,8 @@ EVENT_DEFINE_RANGE_FORMAT(length, config1, 24, 31);
/* u32, byte offset */
EVENT_DEFINE_RANGE_FORMAT(offset, config1, 32, 63);
+static cpumask_t hv_gpci_cpumask;
+
static struct attribute *format_attrs[] = {
&format_attr_request.attr,
&format_attr_starting_index.attr,
@@ -63,12 +65,12 @@ static struct attribute *format_attrs[] = {
NULL,
};
-static struct attribute_group format_group = {
+static const struct attribute_group format_group = {
.name = "format",
.attrs = format_attrs,
};
-static struct attribute_group event_group = {
+static const struct attribute_group event_group = {
.name = "events",
.attrs = hv_gpci_event_attrs,
};
@@ -94,7 +96,15 @@ static ssize_t kernel_version_show(struct device *dev,
return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
}
+static ssize_t cpumask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &hv_gpci_cpumask);
+}
+
static DEVICE_ATTR_RO(kernel_version);
+static DEVICE_ATTR_RO(cpumask);
+
HV_CAPS_ATTR(version, "0x%x\n");
HV_CAPS_ATTR(ga, "%d\n");
HV_CAPS_ATTR(expanded, "%d\n");
@@ -111,7 +121,16 @@ static struct attribute *interface_attrs[] = {
NULL,
};
-static struct attribute_group interface_group = {
+static struct attribute *cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static const struct attribute_group cpumask_attr_group = {
+ .attrs = cpumask_attrs,
+};
+
+static const struct attribute_group interface_group = {
.name = "interface",
.attrs = interface_attrs,
};
@@ -120,20 +139,12 @@ static const struct attribute_group *attr_groups[] = {
&format_group,
&event_group,
&interface_group,
+ &cpumask_attr_group,
NULL,
};
-#define HGPCI_REQ_BUFFER_SIZE 4096
-#define HGPCI_MAX_DATA_BYTES \
- (HGPCI_REQ_BUFFER_SIZE - sizeof(struct hv_get_perf_counter_info_params))
-
static DEFINE_PER_CPU(char, hv_gpci_reqb[HGPCI_REQ_BUFFER_SIZE]) __aligned(sizeof(uint64_t));
-struct hv_gpci_request_buffer {
- struct hv_get_perf_counter_info_params params;
- uint8_t bytes[HGPCI_MAX_DATA_BYTES];
-} __packed;
-
static unsigned long single_gpci_request(u32 req, u32 starting_index,
u16 secondary_index, u8 version_in, u32 offset, u8 length,
u64 *value)
@@ -164,7 +175,7 @@ static unsigned long single_gpci_request(u32 req, u32 starting_index,
*/
count = 0;
for (i = offset; i < offset + length; i++)
- count |= arg->bytes[i] << (i - offset);
+ count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
*value = count;
out:
@@ -275,6 +286,45 @@ static struct pmu h_gpci_pmu = {
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
};
+static int ppc_hv_gpci_cpu_online(unsigned int cpu)
+{
+ if (cpumask_empty(&hv_gpci_cpumask))
+ cpumask_set_cpu(cpu, &hv_gpci_cpumask);
+
+ return 0;
+}
+
+static int ppc_hv_gpci_cpu_offline(unsigned int cpu)
+{
+ int target;
+
+ /* Check if exiting cpu is used for collecting gpci events */
+ if (!cpumask_test_and_clear_cpu(cpu, &hv_gpci_cpumask))
+ return 0;
+
+ /* Find a new cpu to collect gpci events */
+ target = cpumask_last(cpu_active_mask);
+
+ if (target < 0 || target >= nr_cpu_ids) {
+ pr_err("hv_gpci: CPU hotplug init failed\n");
+ return -1;
+ }
+
+ /* Migrate gpci events to the new target */
+ cpumask_set_cpu(target, &hv_gpci_cpumask);
+ perf_pmu_migrate_context(&h_gpci_pmu, cpu, target);
+
+ return 0;
+}
+
+static int hv_gpci_cpu_hotplug_init(void)
+{
+ return cpuhp_setup_state(CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
+ "perf/powerpc/hv_gcpi:online",
+ ppc_hv_gpci_cpu_online,
+ ppc_hv_gpci_cpu_offline);
+}
+
static int hv_gpci_init(void)
{
int r;
@@ -295,6 +345,11 @@ static int hv_gpci_init(void)
return -ENODEV;
}
+ /* init cpuhotplug */
+ r = hv_gpci_cpu_hotplug_init();
+ if (r)
+ return r;
+
/* sampling not supported */
h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
diff --git a/arch/powerpc/perf/hv-gpci.h b/arch/powerpc/perf/hv-gpci.h
index a3053eda5dcc..4d108262bed7 100644
--- a/arch/powerpc/perf/hv-gpci.h
+++ b/arch/powerpc/perf/hv-gpci.h
@@ -2,33 +2,6 @@
#ifndef LINUX_POWERPC_PERF_HV_GPCI_H_
#define LINUX_POWERPC_PERF_HV_GPCI_H_
-#include <linux/types.h>
-
-/* From the document "H_GetPerformanceCounterInfo Interface" v1.07 */
-
-/* H_GET_PERF_COUNTER_INFO argument */
-struct hv_get_perf_counter_info_params {
- __be32 counter_request; /* I */
- __be32 starting_index; /* IO */
- __be16 secondary_index; /* IO */
- __be16 returned_values; /* O */
- __be32 detail_rc; /* O, only needed when called via *_norets() */
-
- /*
- * O, size each of counter_value element in bytes, only set for version
- * >= 0x3
- */
- __be16 cv_element_size;
-
- /* I, 0 (zero) for versions < 0x3 */
- __u8 counter_info_version_in;
-
- /* O, 0 (zero) if version < 0x3. Must be set to 0 when making hcall */
- __u8 counter_info_version_out;
- __u8 reserved[0xC];
- __u8 counter_value[];
-} __packed;
-
/*
* counter info version => fw version/reference (spec version)
*
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index cb50a9e1fd2d..d517aba94d1b 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -6,6 +6,7 @@
* (C) 2017 Anju T Sudhakar, IBM Corporation.
* (C) 2017 Hemant K Shaw, IBM Corporation.
*/
+#include <linux/of.h>
#include <linux/perf_event.h>
#include <linux/slab.h>
#include <asm/opal.h>
@@ -44,6 +45,16 @@ static DEFINE_PER_CPU(u64 *, trace_imc_mem);
static struct imc_pmu_ref *trace_imc_refc;
static int trace_imc_mem_size;
+/*
+ * Global data structure used to avoid races between thread,
+ * core and trace-imc
+ */
+static struct imc_pmu_ref imc_global_refc = {
+ .lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
+ .id = 0,
+ .refc = 0,
+};
+
static struct imc_pmu *imc_event_to_pmu(struct perf_event *event)
{
return container_of(event->pmu, struct imc_pmu, pmu);
@@ -61,7 +72,7 @@ static struct attribute *imc_format_attrs[] = {
NULL,
};
-static struct attribute_group imc_format_group = {
+static const struct attribute_group imc_format_group = {
.name = "format",
.attrs = imc_format_attrs,
};
@@ -80,7 +91,7 @@ static struct attribute *trace_imc_format_attrs[] = {
NULL,
};
-static struct attribute_group trace_imc_format_group = {
+static const struct attribute_group trace_imc_format_group = {
.name = "format",
.attrs = trace_imc_format_attrs,
};
@@ -115,7 +126,7 @@ static struct attribute *imc_pmu_cpumask_attrs[] = {
NULL,
};
-static struct attribute_group imc_pmu_cpumask_attr_group = {
+static const struct attribute_group imc_pmu_cpumask_attr_group = {
.attrs = imc_pmu_cpumask_attrs,
};
@@ -229,8 +240,10 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
ct = of_get_child_count(pmu_events);
/* Get the event prefix */
- if (of_property_read_string(node, "events-prefix", &prefix))
+ if (of_property_read_string(node, "events-prefix", &prefix)) {
+ of_node_put(pmu_events);
return 0;
+ }
/* Get a global unit and scale data if available */
if (of_property_read_string(node, "scale", &g_scale))
@@ -244,8 +257,10 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
/* Allocate memory for the events */
pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL);
- if (!pmu->events)
+ if (!pmu->events) {
+ of_node_put(pmu_events);
return -ENOMEM;
+ }
ct = 0;
/* Parse the events and update the struct */
@@ -255,6 +270,8 @@ static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu)
ct++;
}
+ of_node_put(pmu_events);
+
/* Allocate memory for attribute group */
attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL);
if (!attr_group) {
@@ -511,7 +528,7 @@ static int nest_imc_event_init(struct perf_event *event)
/*
* Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
- * Get the base memory addresss for this cpu.
+ * Get the base memory address for this cpu.
*/
chip_id = cpu_to_chip_id(event->cpu);
@@ -664,7 +681,7 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
/*
* Check whether core_imc is registered. We could end up here
* if the cpuhotplug callback registration fails. i.e, callback
- * invokes the offline path for all sucessfully registered cpus.
+ * invokes the offline path for all successfully registered cpus.
* At this stage, core_imc pmu will not be registered and we
* should return here.
*
@@ -698,6 +715,16 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
return -EINVAL;
ref->refc = 0;
+ /*
+ * Reduce the global reference count, if this is the
+ * last cpu in this core and core-imc event running
+ * in this cpu.
+ */
+ mutex_lock(&imc_global_refc.lock);
+ if (imc_global_refc.id == IMC_DOMAIN_CORE)
+ imc_global_refc.refc--;
+
+ mutex_unlock(&imc_global_refc.lock);
}
return 0;
}
@@ -710,6 +737,23 @@ static int core_imc_pmu_cpumask_init(void)
ppc_core_imc_cpu_offline);
}
+static void reset_global_refc(struct perf_event *event)
+{
+ mutex_lock(&imc_global_refc.lock);
+ imc_global_refc.refc--;
+
+ /*
+ * If no other thread is running any
+ * event for this domain(thread/core/trace),
+ * set the global id to zero.
+ */
+ if (imc_global_refc.refc <= 0) {
+ imc_global_refc.refc = 0;
+ imc_global_refc.id = 0;
+ }
+ mutex_unlock(&imc_global_refc.lock);
+}
+
static void core_imc_counters_release(struct perf_event *event)
{
int rc, core_id;
@@ -759,6 +803,8 @@ static void core_imc_counters_release(struct perf_event *event)
ref->refc = 0;
}
mutex_unlock(&ref->lock);
+
+ reset_global_refc(event);
}
static int core_imc_event_init(struct perf_event *event)
@@ -819,6 +865,29 @@ static int core_imc_event_init(struct perf_event *event)
++ref->refc;
mutex_unlock(&ref->lock);
+ /*
+ * Since the system can run either in accumulation or trace-mode
+ * of IMC at a time, core-imc events are allowed only if no other
+ * trace/thread imc events are enabled/monitored.
+ *
+ * Take the global lock, and check the refc.id
+ * to know whether any other trace/thread imc
+ * events are running.
+ */
+ mutex_lock(&imc_global_refc.lock);
+ if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
+ /*
+ * No other trace/thread imc events are running in
+ * the system, so set the refc.id to core-imc.
+ */
+ imc_global_refc.id = IMC_DOMAIN_CORE;
+ imc_global_refc.refc++;
+ } else {
+ mutex_unlock(&imc_global_refc.lock);
+ return -EBUSY;
+ }
+ mutex_unlock(&imc_global_refc.lock);
+
event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
event->destroy = core_imc_counters_release;
return 0;
@@ -877,7 +946,23 @@ static int ppc_thread_imc_cpu_online(unsigned int cpu)
static int ppc_thread_imc_cpu_offline(unsigned int cpu)
{
- mtspr(SPRN_LDBAR, 0);
+ /*
+ * Set the bit 0 of LDBAR to zero.
+ *
+ * If bit 0 of LDBAR is unset, it will stop posting
+ * the counter data to memory.
+ * For thread-imc, bit 0 of LDBAR will be set to 1 in the
+ * event_add function. So reset this bit here, to stop the updates
+ * to memory in the cpu_offline path.
+ */
+ mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
+
+ /* Reduce the refc if thread-imc event running on this cpu */
+ mutex_lock(&imc_global_refc.lock);
+ if (imc_global_refc.id == IMC_DOMAIN_THREAD)
+ imc_global_refc.refc--;
+ mutex_unlock(&imc_global_refc.lock);
+
return 0;
}
@@ -898,7 +983,7 @@ static int thread_imc_event_init(struct perf_event *event)
if (event->attr.type != event->pmu->type)
return -ENOENT;
- if (!capable(CAP_SYS_ADMIN))
+ if (!perfmon_capable())
return -EACCES;
/* Sampling not supported */
@@ -916,7 +1001,22 @@ static int thread_imc_event_init(struct perf_event *event)
if (!target)
return -EINVAL;
+ mutex_lock(&imc_global_refc.lock);
+ /*
+ * Check if any other trace/core imc events are running in the
+ * system, if not set the global id to thread-imc.
+ */
+ if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_THREAD) {
+ imc_global_refc.id = IMC_DOMAIN_THREAD;
+ imc_global_refc.refc++;
+ } else {
+ mutex_unlock(&imc_global_refc.lock);
+ return -EBUSY;
+ }
+ mutex_unlock(&imc_global_refc.lock);
+
event->pmu->task_ctx_nr = perf_sw_context;
+ event->destroy = reset_global_refc;
return 0;
}
@@ -1063,10 +1163,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
int core_id;
struct imc_pmu_ref *ref;
- mtspr(SPRN_LDBAR, 0);
-
core_id = smp_processor_id() / threads_per_core;
ref = &core_imc_refc[core_id];
+ if (!ref) {
+ pr_debug("imc: Failed to get event reference count\n");
+ return;
+ }
mutex_lock(&ref->lock);
ref->refc--;
@@ -1082,6 +1184,10 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
ref->refc = 0;
}
mutex_unlock(&ref->lock);
+
+ /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
+ mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
+
/*
* Take a snapshot and calculate the delta and update
* the event counter values.
@@ -1133,7 +1239,18 @@ static int ppc_trace_imc_cpu_online(unsigned int cpu)
static int ppc_trace_imc_cpu_offline(unsigned int cpu)
{
- mtspr(SPRN_LDBAR, 0);
+ /*
+ * No need to set bit 0 of LDBAR to zero, as
+ * it is set to zero for imc trace-mode
+ *
+ * Reduce the refc if any trace-imc event running
+ * on this cpu.
+ */
+ mutex_lock(&imc_global_refc.lock);
+ if (imc_global_refc.id == IMC_DOMAIN_TRACE)
+ imc_global_refc.refc--;
+ mutex_unlock(&imc_global_refc.lock);
+
return 0;
}
@@ -1178,11 +1295,30 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
header->size = sizeof(*header) + event->header_size;
header->misc = 0;
- if (is_kernel_addr(data->ip))
- header->misc |= PERF_RECORD_MISC_KERNEL;
- else
- header->misc |= PERF_RECORD_MISC_USER;
-
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) {
+ case 0:/* when MSR HV and PR not set in the trace-record */
+ header->misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+ break;
+ case 1: /* MSR HV is 0 and PR is 1 */
+ header->misc |= PERF_RECORD_MISC_GUEST_USER;
+ break;
+ case 2: /* MSR HV is 1 and PR is 0 */
+ header->misc |= PERF_RECORD_MISC_KERNEL;
+ break;
+ case 3: /* MSR HV is 1 and PR is 1 */
+ header->misc |= PERF_RECORD_MISC_USER;
+ break;
+ default:
+ pr_info("IMC: Unable to set the flag based on MSR bits\n");
+ break;
+ }
+ } else {
+ if (is_kernel_addr(data->ip))
+ header->misc |= PERF_RECORD_MISC_KERNEL;
+ else
+ header->misc |= PERF_RECORD_MISC_USER;
+ }
perf_event_header__init_id(header, data, event);
return 0;
@@ -1207,7 +1343,7 @@ static void dump_trace_imc_data(struct perf_event *event)
/* If this is a valid record, create the sample */
struct perf_output_handle handle;
- if (perf_output_begin(&handle, event, header.size))
+ if (perf_output_begin(&handle, &data, event, header.size))
return;
perf_output_sample(&handle, &header, &data, event);
@@ -1226,15 +1362,14 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
local_mem = get_trace_imc_event_base_addr();
ldbar_value = ((u64)local_mem & THREAD_IMC_LDBAR_MASK) | TRACE_IMC_ENABLE;
- if (core_imc_refc)
- ref = &core_imc_refc[core_id];
+ /* trace-imc reference count */
+ if (trace_imc_refc)
+ ref = &trace_imc_refc[core_id];
if (!ref) {
- /* If core-imc is not enabled, use trace-imc reference count */
- if (trace_imc_refc)
- ref = &trace_imc_refc[core_id];
- if (!ref)
- return -EINVAL;
+ pr_debug("imc: Failed to get the event reference count\n");
+ return -EINVAL;
}
+
mtspr(SPRN_LDBAR, ldbar_value);
mutex_lock(&ref->lock);
if (ref->refc == 0) {
@@ -1242,13 +1377,11 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
get_hard_smp_processor_id(smp_processor_id()))) {
mutex_unlock(&ref->lock);
pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
- mtspr(SPRN_LDBAR, 0);
return -EINVAL;
}
}
++ref->refc;
mutex_unlock(&ref->lock);
-
return 0;
}
@@ -1274,16 +1407,13 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
int core_id = smp_processor_id() / threads_per_core;
struct imc_pmu_ref *ref = NULL;
- if (core_imc_refc)
- ref = &core_imc_refc[core_id];
+ if (trace_imc_refc)
+ ref = &trace_imc_refc[core_id];
if (!ref) {
- /* If core-imc is not enabled, use trace-imc reference count */
- if (trace_imc_refc)
- ref = &trace_imc_refc[core_id];
- if (!ref)
- return;
+ pr_debug("imc: Failed to get event reference count\n");
+ return;
}
- mtspr(SPRN_LDBAR, 0);
+
mutex_lock(&ref->lock);
ref->refc--;
if (ref->refc == 0) {
@@ -1297,27 +1427,49 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
ref->refc = 0;
}
mutex_unlock(&ref->lock);
+
trace_imc_event_stop(event, flags);
}
static int trace_imc_event_init(struct perf_event *event)
{
- struct task_struct *target;
-
if (event->attr.type != event->pmu->type)
return -ENOENT;
- if (!capable(CAP_SYS_ADMIN))
+ if (!perfmon_capable())
return -EACCES;
/* Return if this is a couting event */
if (event->attr.sample_period == 0)
return -ENOENT;
+ /*
+ * Take the global lock, and make sure
+ * no other thread is running any core/thread imc
+ * events
+ */
+ mutex_lock(&imc_global_refc.lock);
+ if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
+ /*
+ * No core/thread imc events are running in the
+ * system, so set the refc.id to trace-imc.
+ */
+ imc_global_refc.id = IMC_DOMAIN_TRACE;
+ imc_global_refc.refc++;
+ } else {
+ mutex_unlock(&imc_global_refc.lock);
+ return -EBUSY;
+ }
+ mutex_unlock(&imc_global_refc.lock);
+
event->hw.idx = -1;
- target = event->hw.target;
- event->pmu->task_ctx_nr = perf_hw_context;
+ /*
+ * There can only be a single PMU for perf_hw_context events which is assigned to
+ * core PMU. Hence use "perf_sw_context" for trace_imc.
+ */
+ event->pmu->task_ctx_nr = perf_sw_context;
+ event->destroy = reset_global_refc;
return 0;
}
@@ -1359,6 +1511,7 @@ static int update_pmu_ops(struct imc_pmu *pmu)
pmu->pmu.stop = trace_imc_event_stop;
pmu->pmu.read = trace_imc_event_read;
pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group;
+ break;
default:
break;
}
@@ -1429,10 +1582,10 @@ static void cleanup_all_core_imc_memory(void)
static void thread_imc_ldbar_disable(void *dummy)
{
/*
- * By Zeroing LDBAR, we disable thread-imc
- * updates.
+ * By setting 0th bit of LDBAR to zero, we disable thread-imc
+ * updates to memory.
*/
- mtspr(SPRN_LDBAR, 0);
+ mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
}
void thread_imc_disable(void)
diff --git a/arch/powerpc/perf/internal.h b/arch/powerpc/perf/internal.h
index f755c64da137..4c18b5504326 100644
--- a/arch/powerpc/perf/internal.h
+++ b/arch/powerpc/perf/internal.h
@@ -2,11 +2,12 @@
//
// Copyright 2019 Madhavan Srinivasan, IBM Corporation.
-extern int init_ppc970_pmu(void);
-extern int init_power5_pmu(void);
-extern int init_power5p_pmu(void);
-extern int init_power6_pmu(void);
-extern int init_power7_pmu(void);
-extern int init_power8_pmu(void);
-extern int init_power9_pmu(void);
-extern int init_generic_compat_pmu(void);
+int __init init_ppc970_pmu(void);
+int __init init_power5_pmu(void);
+int __init init_power5p_pmu(void);
+int __init init_power6_pmu(void);
+int __init init_power7_pmu(void);
+int __init init_power8_pmu(void);
+int __init init_power9_pmu(void);
+int __init init_power10_pmu(void);
+int __init init_generic_compat_pmu(void);
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 4c86da5eb28a..56301b2bc8ae 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -21,7 +21,7 @@ PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
PMU_FORMAT_ATTR(thresh_start, "config:36-39");
PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
-struct attribute *isa207_pmu_format_attr[] = {
+static struct attribute *isa207_pmu_format_attr[] = {
&format_attr_event.attr,
&format_attr_pmcxsel.attr,
&format_attr_mark.attr,
@@ -37,7 +37,7 @@ struct attribute *isa207_pmu_format_attr[] = {
NULL,
};
-struct attribute_group isa207_pmu_format_group = {
+const struct attribute_group isa207_pmu_format_group = {
.name = "format",
.attrs = isa207_pmu_format_attr,
};
@@ -55,7 +55,9 @@ static bool is_event_valid(u64 event)
{
u64 valid_mask = EVENT_VALID_MASK;
- if (cpu_has_feature(CPU_FTR_ARCH_300))
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ valid_mask = p10_EVENT_VALID_MASK;
+ else if (cpu_has_feature(CPU_FTR_ARCH_300))
valid_mask = p9_EVENT_VALID_MASK;
return !(event & ~valid_mask);
@@ -69,17 +71,25 @@ static inline bool is_event_marked(u64 event)
return false;
}
+static unsigned long sdar_mod_val(u64 event)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ return p10_SDAR_MODE(event);
+
+ return p9_SDAR_MODE(event);
+}
+
static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
{
/*
- * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
- * continous sampling mode.
+ * MMCRA[SDAR_MODE] specifies how the SDAR should be updated in
+ * continuous sampling mode.
*
* Incase of Power8:
- * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
+ * MMCRA[SDAR_MODE] will be programmed as "0b01" for continuous sampling
* mode and will be un-changed when setting MMCRA[63] (Marked events).
*
- * Incase of Power9:
+ * Incase of Power9/power10:
* Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
* or if group already have any marked events.
* For rest
@@ -90,20 +100,65 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
- else if (p9_SDAR_MODE(event))
- *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
+ else if (sdar_mod_val(event))
+ *mmcra |= sdar_mod_val(event) << MMCRA_SDAR_MODE_SHIFT;
else
*mmcra |= MMCRA_SDAR_MODE_DCACHE;
} else
*mmcra |= MMCRA_SDAR_MODE_TLB;
}
+static int p10_thresh_cmp_val(u64 value)
+{
+ int exp = 0;
+ u64 result = value;
+
+ if (!value)
+ return value;
+
+ /*
+ * Incase of P10, thresh_cmp value is not part of raw event code
+ * and provided via attr.config1 parameter. To program threshold in MMCRA,
+ * take a 18 bit number N and shift right 2 places and increment
+ * the exponent E by 1 until the upper 10 bits of N are zero.
+ * Write E to the threshold exponent and write the lower 8 bits of N
+ * to the threshold mantissa.
+ * The max threshold that can be written is 261120.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (value > 261120)
+ value = 261120;
+ while ((64 - __builtin_clzl(value)) > 8) {
+ exp++;
+ value >>= 2;
+ }
+
+ /*
+ * Note that it is invalid to write a mantissa with the
+ * upper 2 bits of mantissa being zero, unless the
+ * exponent is also zero.
+ */
+ if (!(value & 0xC0) && exp)
+ result = -1;
+ else
+ result = (exp << 8) | value;
+ }
+ return result;
+}
+
static u64 thresh_cmp_val(u64 value)
{
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ value = p10_thresh_cmp_val(value);
+
+ /*
+ * Since location of threshold compare bits in MMCRA
+ * is different for p8, using different shift value.
+ */
if (cpu_has_feature(CPU_FTR_ARCH_300))
return value << p9_MMCRA_THR_CMP_SHIFT;
-
- return value << MMCRA_THR_CMP_SHIFT;
+ else
+ return value << MMCRA_THR_CMP_SHIFT;
}
static unsigned long combine_from_event(u64 event)
@@ -131,10 +186,14 @@ static bool is_thresh_cmp_valid(u64 event)
{
unsigned int cmp, exp;
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ return p10_thresh_cmp_val(event) >= 0;
+
/*
* Check the mantissa upper two bits are not zero, unless the
* exponent is also zero. See the THRESH_CMP_MANTISSA doc.
*/
+
cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
exp = cmp >> 7;
@@ -161,36 +220,82 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
/* Nothing to do */
break;
case 1:
- ret = PH(LVL, L1);
+ ret = PH(LVL, L1) | LEVEL(L1) | P(SNOOP, HIT);
break;
case 2:
- ret = PH(LVL, L2);
+ ret = PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT);
break;
case 3:
- ret = PH(LVL, L3);
+ ret = PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
break;
case 4:
- if (sub_idx <= 1)
- ret = PH(LVL, LOC_RAM);
- else if (sub_idx > 1 && sub_idx <= 2)
- ret = PH(LVL, REM_RAM1);
- else
- ret = PH(LVL, REM_RAM2);
- ret |= P(SNOOP, HIT);
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ ret = P(SNOOP, HIT);
+
+ if (sub_idx == 1)
+ ret |= PH(LVL, LOC_RAM) | LEVEL(RAM);
+ else if (sub_idx == 2 || sub_idx == 3)
+ ret |= P(LVL, HIT) | LEVEL(PMEM);
+ else if (sub_idx == 4)
+ ret |= PH(LVL, REM_RAM1) | REM | LEVEL(RAM) | P(HOPS, 2);
+ else if (sub_idx == 5 || sub_idx == 7)
+ ret |= P(LVL, HIT) | LEVEL(PMEM) | REM;
+ else if (sub_idx == 6)
+ ret |= PH(LVL, REM_RAM2) | REM | LEVEL(RAM) | P(HOPS, 3);
+ } else {
+ if (sub_idx <= 1)
+ ret = PH(LVL, LOC_RAM);
+ else if (sub_idx > 1 && sub_idx <= 2)
+ ret = PH(LVL, REM_RAM1);
+ else
+ ret = PH(LVL, REM_RAM2);
+ ret |= P(SNOOP, HIT);
+ }
break;
case 5:
- ret = PH(LVL, REM_CCE1);
- if ((sub_idx == 0) || (sub_idx == 2) || (sub_idx == 4))
- ret |= P(SNOOP, HIT);
- else if ((sub_idx == 1) || (sub_idx == 3) || (sub_idx == 5))
- ret |= P(SNOOP, HITM);
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ ret = REM | P(HOPS, 0);
+
+ if (sub_idx == 0 || sub_idx == 4)
+ ret |= PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT);
+ else if (sub_idx == 1 || sub_idx == 5)
+ ret |= PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HITM);
+ else if (sub_idx == 2 || sub_idx == 6)
+ ret |= PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
+ else if (sub_idx == 3 || sub_idx == 7)
+ ret |= PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
+ } else {
+ if (sub_idx == 0)
+ ret = PH(LVL, L2) | LEVEL(L2) | REM | P(SNOOP, HIT) | P(HOPS, 0);
+ else if (sub_idx == 1)
+ ret = PH(LVL, L2) | LEVEL(L2) | REM | P(SNOOP, HITM) | P(HOPS, 0);
+ else if (sub_idx == 2 || sub_idx == 4)
+ ret = PH(LVL, L3) | LEVEL(L3) | REM | P(SNOOP, HIT) | P(HOPS, 0);
+ else if (sub_idx == 3 || sub_idx == 5)
+ ret = PH(LVL, L3) | LEVEL(L3) | REM | P(SNOOP, HITM) | P(HOPS, 0);
+ }
break;
case 6:
- ret = PH(LVL, REM_CCE2);
- if ((sub_idx == 0) || (sub_idx == 2))
- ret |= P(SNOOP, HIT);
- else if ((sub_idx == 1) || (sub_idx == 3))
- ret |= P(SNOOP, HITM);
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (sub_idx == 0)
+ ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HIT) | P(HOPS, 2);
+ else if (sub_idx == 1)
+ ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HITM) | P(HOPS, 2);
+ else if (sub_idx == 2)
+ ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HIT) | P(HOPS, 3);
+ else if (sub_idx == 3)
+ ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HITM) | P(HOPS, 3);
+ } else {
+ ret = PH(LVL, REM_CCE2);
+ if (sub_idx == 0 || sub_idx == 2)
+ ret |= P(SNOOP, HIT);
+ else if (sub_idx == 1 || sub_idx == 3)
+ ret |= P(SNOOP, HITM);
+ }
break;
case 7:
ret = PM(LVL, L1);
@@ -216,30 +321,84 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
sier = mfspr(SPRN_SIER);
val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
- if (val == 1 || val == 2) {
- idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
- sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
+ if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31)))
+ return;
- dsrc->val = isa207_find_source(idx, sub_idx);
+ idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
+ sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
+
+ dsrc->val = isa207_find_source(idx, sub_idx);
+ if (val == 7) {
+ u64 mmcra;
+ u32 op_type;
+
+ /*
+ * Type 0b111 denotes either larx or stcx instruction. Use the
+ * MMCRA sampling bits [57:59] along with the type value
+ * to determine the exact instruction type. If the sampling
+ * criteria is neither load or store, set the type as default
+ * to NA.
+ */
+ mmcra = mfspr(SPRN_MMCRA);
+
+ op_type = (mmcra >> MMCRA_SAMP_ELIG_SHIFT) & MMCRA_SAMP_ELIG_MASK;
+ switch (op_type) {
+ case 5:
+ dsrc->val |= P(OP, LOAD);
+ break;
+ case 7:
+ dsrc->val |= P(OP, STORE);
+ break;
+ default:
+ dsrc->val |= P(OP, NA);
+ break;
+ }
+ } else {
dsrc->val |= (val == 1) ? P(OP, LOAD) : P(OP, STORE);
}
}
-void isa207_get_mem_weight(u64 *weight)
+void isa207_get_mem_weight(u64 *weight, u64 type)
{
+ union perf_sample_weight *weight_fields;
+ u64 weight_lat;
u64 mmcra = mfspr(SPRN_MMCRA);
u64 exp = MMCRA_THR_CTR_EXP(mmcra);
u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
u64 sier = mfspr(SPRN_SIER);
u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
- if (val == 0 || val == 7)
- *weight = 0;
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ mantissa = P10_MMCRA_THR_CTR_MANT(mmcra);
+
+ if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31)))
+ weight_lat = 0;
else
- *weight = mantissa << (2 * exp);
+ weight_lat = mantissa << (2 * exp);
+
+ /*
+ * Use 64 bit weight field (full) if sample type is
+ * WEIGHT.
+ *
+ * if sample type is WEIGHT_STRUCT:
+ * - store memory latency in the lower 32 bits.
+ * - For ISA v3.1, use remaining two 16 bit fields of
+ * perf_sample_weight to store cycle counter values
+ * from sier2.
+ */
+ weight_fields = (union perf_sample_weight *)weight;
+ if (type & PERF_SAMPLE_WEIGHT)
+ weight_fields->full = weight_lat;
+ else {
+ weight_fields->var1_dw = (u32)weight_lat;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ weight_fields->var2_w = P10_SIER2_FINISH_CYC(mfspr(SPRN_SIER2));
+ weight_fields->var3_w = P10_SIER2_DISPATCH_CYC(mfspr(SPRN_SIER2));
+ }
+ }
}
-int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
+int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1)
{
unsigned int unit, pmc, cache, ebb;
unsigned long mask, value;
@@ -251,7 +410,12 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
- cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) &
+ p10_EVENT_CACHE_SEL_MASK;
+ else
+ cache = (event >> EVENT_CACHE_SEL_SHIFT) &
+ EVENT_CACHE_SEL_MASK;
ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
if (pmc) {
@@ -269,6 +433,15 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
mask |= CNST_PMC_MASK(pmc);
value |= CNST_PMC_VAL(pmc);
+
+ /*
+ * PMC5 and PMC6 are used to count cycles and instructions and
+ * they do not support most of the constraint bits. Add a check
+ * to exclude PMC5/6 from most of the constraints except for
+ * EBB/BHRB.
+ */
+ if (pmc >= 5)
+ goto ebb_bhrb;
}
if (pmc <= 4) {
@@ -283,7 +456,12 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
}
if (unit >= 6 && unit <= 9) {
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (unit == 6) {
+ mask |= CNST_L2L3_GROUP_MASK;
+ value |= CNST_L2L3_GROUP_VAL(event >> p10_L2L3_EVENT_SHIFT);
+ }
+ } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
mask |= CNST_CACHE_GROUP_MASK;
value |= CNST_CACHE_GROUP_VAL(event & 0xff);
@@ -308,16 +486,30 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
value |= CNST_L1_QUAL_VAL(cache);
}
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mask |= CNST_RADIX_SCOPE_GROUP_MASK;
+ value |= CNST_RADIX_SCOPE_GROUP_VAL(event >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT);
+ }
+
if (is_event_marked(event)) {
mask |= CNST_SAMPLE_MASK;
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
}
- if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (event_is_threshold(event) && is_thresh_cmp_valid(event_config1)) {
+ mask |= CNST_THRESH_CTL_SEL_MASK;
+ value |= CNST_THRESH_CTL_SEL_VAL(event >> EVENT_THRESH_SHIFT);
+ mask |= p10_CNST_THRESH_CMP_MASK;
+ value |= p10_CNST_THRESH_CMP_VAL(p10_thresh_cmp_val(event_config1));
+ } else if (event_is_threshold(event))
+ return -1;
+ } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
mask |= CNST_THRESH_MASK;
value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
- }
+ } else if (event_is_threshold(event))
+ return -1;
} else {
/*
* Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
@@ -335,6 +527,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
}
}
+ebb_bhrb:
if (!pmc && ebb)
/* EBB events must specify the PMC */
return -1;
@@ -353,8 +546,8 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
* EBB events are pinned & exclusive, so this should never actually
* hit, but we leave it as a fallback in case.
*/
- mask |= CNST_EBB_VAL(ebb);
- value |= CNST_EBB_MASK;
+ mask |= CNST_EBB_MASK;
+ value |= CNST_EBB_VAL(ebb);
*maskp = mask;
*valp = value;
@@ -363,10 +556,11 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
}
int isa207_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[],
- struct perf_event *pevents[])
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[], u32 flags)
{
unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
+ unsigned long mmcr3;
unsigned int pmc, pmc_inuse;
int i;
@@ -379,7 +573,14 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
pmc_inuse |= 1 << pmc;
}
- mmcra = mmcr1 = mmcr2 = 0;
+ mmcra = mmcr1 = mmcr2 = mmcr3 = 0;
+
+ /*
+ * Disable bhrb unless explicitly requested
+ * by setting MMCRA (BHRBRD) bit.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ mmcra |= MMCRA_BHRB_DISABLE;
/* Second pass: assign PMCs, set all MMCR1 fields */
for (i = 0; i < n_ev; ++i) {
@@ -416,6 +617,13 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
}
}
+ /* Set RADIX_SCOPE_QUAL bit */
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ val = (event[i] >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT) &
+ p10_EVENT_RADIX_SCOPE_QUAL_MASK;
+ mmcr1 |= val << p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT;
+ }
+
if (is_event_marked(event[i])) {
mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -438,8 +646,21 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
mmcra |= val << MMCRA_THR_CTL_SHIFT;
val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
mmcra |= val << MMCRA_THR_SEL_SHIFT;
- val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
- mmcra |= thresh_cmp_val(val);
+ if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
+ val = (event[i] >> EVENT_THR_CMP_SHIFT) &
+ EVENT_THR_CMP_MASK;
+ mmcra |= thresh_cmp_val(val);
+ } else if (flags & PPMU_HAS_ATTR_CONFIG1) {
+ val = (pevents[i]->attr.config1 >> p10_EVENT_THR_CMP_SHIFT) &
+ p10_EVENT_THR_CMP_MASK;
+ mmcra |= thresh_cmp_val(val);
+ }
+ }
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31) && (unit == 6)) {
+ val = (event[i] >> p10_L2L3_EVENT_SHIFT) &
+ p10_EVENT_L2L3_SEL_MASK;
+ mmcr2 |= val << p10_L2L3_SEL_SHIFT;
}
if (event[i] & EVENT_WANTS_BHRB) {
@@ -447,6 +668,11 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
mmcra |= val << MMCRA_IFM_SHIFT;
}
+ /* set MMCRA (BHRBRD) to 0 if there is user request for BHRB */
+ if (cpu_has_feature(CPU_FTR_ARCH_31) &&
+ (has_branch_stack(pevents[i]) || (event[i] & EVENT_WANTS_BHRB)))
+ mmcra &= ~MMCRA_BHRB_DISABLE;
+
if (pevents[i]->attr.exclude_user)
mmcr2 |= MMCR2_FCP(pmc);
@@ -460,34 +686,54 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
mmcr2 |= MMCR2_FCS(pmc);
}
+ if (pevents[i]->attr.exclude_idle)
+ mmcr2 |= MMCR2_FCWAIT(pmc);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (pmc <= 4) {
+ val = (event[i] >> p10_EVENT_MMCR3_SHIFT) &
+ p10_EVENT_MMCR3_MASK;
+ mmcr3 |= val << MMCR3_SHIFT(pmc);
+ }
+ }
+
hwc[i] = pmc - 1;
}
/* Return MMCRx values */
- mmcr[0] = 0;
+ mmcr->mmcr0 = 0;
/* pmc_inuse is 1-based */
if (pmc_inuse & 2)
- mmcr[0] = MMCR0_PMC1CE;
+ mmcr->mmcr0 = MMCR0_PMC1CE;
if (pmc_inuse & 0x7c)
- mmcr[0] |= MMCR0_PMCjCE;
+ mmcr->mmcr0 |= MMCR0_PMCjCE;
/* If we're not using PMC 5 or 6, freeze them */
if (!(pmc_inuse & 0x60))
- mmcr[0] |= MMCR0_FC56;
+ mmcr->mmcr0 |= MMCR0_FC56;
+
+ /*
+ * Set mmcr0 (PMCCEXT) for p10 which
+ * will restrict access to group B registers
+ * when MMCR0 PMCC=0b00.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ mmcr->mmcr0 |= MMCR0_PMCCEXT;
- mmcr[1] = mmcr1;
- mmcr[2] = mmcra;
- mmcr[3] = mmcr2;
+ mmcr->mmcr1 = mmcr1;
+ mmcr->mmcra = mmcra;
+ mmcr->mmcr2 = mmcr2;
+ mmcr->mmcr3 = mmcr3;
return 0;
}
-void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
if (pmc <= 3)
- mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
+ mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
}
static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size)
@@ -550,3 +796,45 @@ int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
return num_alt;
}
+
+int isa3XX_check_attr_config(struct perf_event *ev)
+{
+ u64 val, sample_mode;
+ u64 event = ev->attr.config;
+
+ val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
+ sample_mode = val & 0x3;
+
+ /*
+ * MMCRA[61:62] is Random Sampling Mode (SM).
+ * value of 0b11 is reserved.
+ */
+ if (sample_mode == 0x3)
+ return -EINVAL;
+
+ /*
+ * Check for all reserved value
+ * Source: Performance Monitoring Unit User Guide
+ */
+ switch (val) {
+ case 0x5:
+ case 0x9:
+ case 0xD:
+ case 0x19:
+ case 0x1D:
+ case 0x1A:
+ case 0x1E:
+ return -EINVAL;
+ }
+
+ /*
+ * MMCRA[48:51]/[52:55]) Threshold Start/Stop
+ * Events Selection.
+ * 0b11110000/0b00001111 is reserved.
+ */
+ val = (event >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
+ if (((val & 0xF0) == 0xF0) || ((val & 0xF) == 0xF))
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 63fd4f3f6013..f594fa6580d1 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -13,6 +13,8 @@
#include <asm/firmware.h>
#include <asm/cputable.h>
+#include "internal.h"
+
#define EVENT_EBB_MASK 1ull
#define EVENT_EBB_SHIFT PERF_EVENT_CONFIG_EBB_SHIFT
#define EVENT_BHRB_MASK 1ull
@@ -87,20 +89,53 @@
EVENT_LINUX_MASK | \
EVENT_PSEL_MASK))
+/* Contants to support power10 raw encoding format */
+#define p10_SDAR_MODE_SHIFT 22
+#define p10_SDAR_MODE_MASK 0x3ull
+#define p10_SDAR_MODE(v) (((v) >> p10_SDAR_MODE_SHIFT) & \
+ p10_SDAR_MODE_MASK)
+#define p10_EVENT_L2L3_SEL_MASK 0x1f
+#define p10_L2L3_SEL_SHIFT 3
+#define p10_L2L3_EVENT_SHIFT 40
+#define p10_EVENT_THRESH_MASK 0xffffull
+#define p10_EVENT_CACHE_SEL_MASK 0x3ull
+#define p10_EVENT_MMCR3_MASK 0x7fffull
+#define p10_EVENT_MMCR3_SHIFT 45
+#define p10_EVENT_RADIX_SCOPE_QUAL_SHIFT 9
+#define p10_EVENT_RADIX_SCOPE_QUAL_MASK 0x1
+#define p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT 45
+
+/* Event Threshold Compare bit constant for power10 in config1 attribute */
+#define p10_EVENT_THR_CMP_SHIFT 0
+#define p10_EVENT_THR_CMP_MASK 0x3FFFFull
+
+#define p10_EVENT_VALID_MASK \
+ ((p10_SDAR_MODE_MASK << p10_SDAR_MODE_SHIFT | \
+ (p10_EVENT_THRESH_MASK << EVENT_THRESH_SHIFT) | \
+ (EVENT_SAMPLE_MASK << EVENT_SAMPLE_SHIFT) | \
+ (p10_EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT) | \
+ (EVENT_PMC_MASK << EVENT_PMC_SHIFT) | \
+ (EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
+ (p9_EVENT_COMBINE_MASK << p9_EVENT_COMBINE_SHIFT) | \
+ (p10_EVENT_MMCR3_MASK << p10_EVENT_MMCR3_SHIFT) | \
+ (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
+ (p10_EVENT_RADIX_SCOPE_QUAL_MASK << p10_EVENT_RADIX_SCOPE_QUAL_SHIFT) | \
+ EVENT_LINUX_MASK | \
+ EVENT_PSEL_MASK))
/*
* Layout of constraint bits:
*
* 60 56 52 48 44 40 36 32
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
- * |
- * thresh_sel -*
+ * | |
+ * [ thresh_cmp bits for p10] thresh_sel -*
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ ] | [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
- * | | | |
- * BHRB IFM -* | | | Count of events for each PMC.
+ * [ ] | [ ] | [ sample ] [ ] [6] [5] [4] [3] [2] [1]
+ * | | | | |
+ * BHRB IFM -* | | |*radix_scope | Count of events for each PMC.
* EBB -* | | p1, p2, p3, p4, p5, p6.
* L1 I/D qualifier -* |
* nc - number of counters -*
@@ -118,6 +153,12 @@
#define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
#define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
+#define CNST_THRESH_CTL_SEL_VAL(v) (((v) & 0x7ffull) << 32)
+#define CNST_THRESH_CTL_SEL_MASK CNST_THRESH_CTL_SEL_VAL(0x7ff)
+
+#define p10_CNST_THRESH_CMP_VAL(v) (((v) & 0x7ffull) << 43)
+#define p10_CNST_THRESH_CMP_MASK p10_CNST_THRESH_CMP_VAL(0x7ff)
+
#define CNST_EBB_VAL(v) (((v) & EVENT_EBB_MASK) << 24)
#define CNST_EBB_MASK CNST_EBB_VAL(EVENT_EBB_MASK)
@@ -135,6 +176,12 @@
#define CNST_CACHE_PMC4_VAL (1ull << 54)
#define CNST_CACHE_PMC4_MASK CNST_CACHE_PMC4_VAL
+#define CNST_L2L3_GROUP_VAL(v) (((v) & 0x1full) << 55)
+#define CNST_L2L3_GROUP_MASK CNST_L2L3_GROUP_VAL(0x1f)
+
+#define CNST_RADIX_SCOPE_GROUP_VAL(v) (((v) & 0x1ull) << 21)
+#define CNST_RADIX_SCOPE_GROUP_MASK CNST_RADIX_SCOPE_GROUP_VAL(1)
+
/*
* For NC we are counting up to 4 events. This requires three bits, and we need
* the fifth event to overflow and set the 4th bit. To achieve that we bias the
@@ -173,6 +220,7 @@
/* Bits in MMCRA for PowerISA v2.07 */
#define MMCRA_SAMP_MODE_SHIFT 1
#define MMCRA_SAMP_ELIG_SHIFT 4
+#define MMCRA_SAMP_ELIG_MASK 7
#define MMCRA_THR_CTL_SHIFT 8
#define MMCRA_THR_SEL_SHIFT 16
#define MMCRA_THR_CMP_SHIFT 32
@@ -191,17 +239,25 @@
#define MMCRA_THR_CTR_EXP(v) (((v) >> MMCRA_THR_CTR_EXP_SHIFT) &\
MMCRA_THR_CTR_EXP_MASK)
-/* MMCR1 Threshold Compare bit constant for power9 */
+#define P10_MMCRA_THR_CTR_MANT_MASK 0xFFul
+#define P10_MMCRA_THR_CTR_MANT(v) (((v) >> MMCRA_THR_CTR_MANT_SHIFT) &\
+ P10_MMCRA_THR_CTR_MANT_MASK)
+
+/* MMCRA Threshold Compare bit constant for power9 */
#define p9_MMCRA_THR_CMP_SHIFT 45
/* Bits in MMCR2 for PowerISA v2.07 */
#define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9)))
#define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9)))
+#define MMCR2_FCWAIT(pmc) (1ull << (58 - (((pmc) - 1) * 9)))
#define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9)))
#define MAX_ALT 2
#define MAX_PMU_COUNTERS 6
+/* Bits in MMCR3 for PowerISA v3.10 */
+#define MMCR3_SHIFT(pmc) (49 - (15 * ((pmc) - 1)))
+
#define ISA207_SIER_TYPE_SHIFT 15
#define ISA207_SIER_TYPE_MASK (0x7ull << ISA207_SIER_TYPE_SHIFT)
@@ -211,19 +267,27 @@
#define ISA207_SIER_DATA_SRC_SHIFT 53
#define ISA207_SIER_DATA_SRC_MASK (0x7ull << ISA207_SIER_DATA_SRC_SHIFT)
+/* Bits in SIER2/SIER3 for Power10 */
+#define P10_SIER2_FINISH_CYC(sier2) (((sier2) >> (63 - 37)) & 0x7fful)
+#define P10_SIER2_DISPATCH_CYC(sier2) (((sier2) >> (63 - 13)) & 0x7fful)
+
#define P(a, b) PERF_MEM_S(a, b)
#define PH(a, b) (P(LVL, HIT) | P(a, b))
#define PM(a, b) (P(LVL, MISS) | P(a, b))
+#define LEVEL(x) P(LVLNUM, x)
+#define REM P(REMOTE, REMOTE)
-int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp);
+int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp, u64 event_config1);
int isa207_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[],
- struct perf_event *pevents[]);
-void isa207_disable_pmc(unsigned int pmc, unsigned long mmcr[]);
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[], u32 flags);
+void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr);
int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
const unsigned int ev_alt[][MAX_ALT]);
void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
struct pt_regs *regs);
-void isa207_get_mem_weight(u64 *weight);
+void isa207_get_mem_weight(u64 *weight, u64 type);
+
+int isa3XX_check_attr_config(struct perf_event *ev);
#endif
diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c
index 4d5ef92511d1..552d51a925d3 100644
--- a/arch/powerpc/perf/mpc7450-pmu.c
+++ b/arch/powerpc/perf/mpc7450-pmu.c
@@ -148,7 +148,7 @@ static u32 classbits[N_CLASSES - 1][2] = {
};
static int mpc7450_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, class;
u32 mask, value;
@@ -257,8 +257,9 @@ static const u32 pmcsel_mask[N_COUNTER] = {
* Compute MMCR0/1/2 values for a set of events.
*/
static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[],
- unsigned long mmcr[],
- struct perf_event *pevents[])
+ struct mmcr_regs *mmcr,
+ struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
u8 event_index[N_CLASSES][N_COUNTER];
int n_classevent[N_CLASSES];
@@ -321,9 +322,16 @@ static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[],
mmcr0 |= MMCR0_PMCnCE;
/* Return MMCRx values */
- mmcr[0] = mmcr0;
- mmcr[1] = mmcr1;
- mmcr[2] = mmcr2;
+ mmcr->mmcr0 = mmcr0;
+ mmcr->mmcr1 = mmcr1;
+ mmcr->mmcr2 = mmcr2;
+ /*
+ * 32-bit doesn't have an MMCRA and uses SPRN_MMCR2 to define
+ * SPRN_MMCRA. So assign mmcra of cpu_hw_events with `mmcr2`
+ * value to ensure that any write to this SPRN_MMCRA will
+ * use mmcr2 value.
+ */
+ mmcr->mmcra = mmcr2;
return 0;
}
@@ -331,12 +339,12 @@ static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[],
* Disable counting by a PMC.
* Note that the pmc argument is 0-based here, not 1-based.
*/
-static void mpc7450_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+static void mpc7450_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
if (pmc <= 1)
- mmcr[0] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
+ mmcr->mmcr0 &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
else
- mmcr[1] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
+ mmcr->mmcr1 &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
}
static int mpc7450_generic_events[] = {
@@ -354,7 +362,7 @@ static int mpc7450_generic_events[] = {
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
-static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static u64 mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x225 },
[C(OP_WRITE)] = { 0, 0x227 },
@@ -409,8 +417,9 @@ struct power_pmu mpc7450_pmu = {
static int __init init_mpc7450_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450"))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_7450)
return -ENODEV;
return register_power_pmu(&mpc7450_pmu);
diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c
index a213a0aa5d25..350dccb0143c 100644
--- a/arch/powerpc/perf/perf_regs.c
+++ b/arch/powerpc/perf/perf_regs.c
@@ -13,9 +13,11 @@
#include <asm/ptrace.h>
#include <asm/perf_regs.h>
+u64 PERF_REG_EXTENDED_MASK;
+
#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
-#define REG_RESERVED (~((1ULL << PERF_REG_POWERPC_MAX) - 1))
+#define REG_RESERVED (~(PERF_REG_EXTENDED_MASK | PERF_REG_PMU_MASK))
static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_R0, gpr[0]),
@@ -69,11 +71,36 @@ static unsigned int pt_regs_offset[PERF_REG_POWERPC_MAX] = {
PT_REGS_OFFSET(PERF_REG_POWERPC_MMCRA, dsisr),
};
-u64 perf_reg_value(struct pt_regs *regs, int idx)
+/* Function to return the extended register values */
+static u64 get_ext_regs_value(int idx)
{
- if (WARN_ON_ONCE(idx >= PERF_REG_POWERPC_MAX))
- return 0;
+ switch (idx) {
+ case PERF_REG_POWERPC_PMC1 ... PERF_REG_POWERPC_PMC6:
+ return get_pmcs_ext_regs(idx - PERF_REG_POWERPC_PMC1);
+ case PERF_REG_POWERPC_MMCR0:
+ return mfspr(SPRN_MMCR0);
+ case PERF_REG_POWERPC_MMCR1:
+ return mfspr(SPRN_MMCR1);
+ case PERF_REG_POWERPC_MMCR2:
+ return mfspr(SPRN_MMCR2);
+#ifdef CONFIG_PPC64
+ case PERF_REG_POWERPC_MMCR3:
+ return mfspr(SPRN_MMCR3);
+ case PERF_REG_POWERPC_SIER2:
+ return mfspr(SPRN_SIER2);
+ case PERF_REG_POWERPC_SIER3:
+ return mfspr(SPRN_SIER3);
+ case PERF_REG_POWERPC_SDAR:
+ return mfspr(SPRN_SDAR);
+#endif
+ case PERF_REG_POWERPC_SIAR:
+ return mfspr(SPRN_SIAR);
+ default: return 0;
+ }
+}
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
if (idx == PERF_REG_POWERPC_SIER &&
(IS_ENABLED(CONFIG_FSL_EMB_PERF_EVENT) ||
IS_ENABLED(CONFIG_PPC32) ||
@@ -85,6 +112,16 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
IS_ENABLED(CONFIG_PPC32)))
return 0;
+ if (idx >= PERF_REG_POWERPC_MAX && idx < PERF_REG_EXTENDED_MAX)
+ return get_ext_regs_value(idx);
+
+ /*
+ * If the idx is referring to value beyond the
+ * supported registers, return 0 with a warning
+ */
+ if (WARN_ON_ONCE(idx >= PERF_REG_EXTENDED_MAX))
+ return 0;
+
return regs_get_register(regs, pt_regs_offset[idx]);
}
@@ -97,17 +134,14 @@ int perf_reg_validate(u64 mask)
u64 perf_reg_abi(struct task_struct *task)
{
-#ifdef CONFIG_PPC64
- if (!test_tsk_thread_flag(task, TIF_32BIT))
- return PERF_SAMPLE_REGS_ABI_64;
+ if (is_tsk_32bit_task(task))
+ return PERF_SAMPLE_REGS_ABI_32;
else
-#endif
- return PERF_SAMPLE_REGS_ABI_32;
+ return PERF_SAMPLE_REGS_ABI_64;
}
void perf_get_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs,
- struct pt_regs *regs_user_copy)
+ struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = (regs_user->regs) ? perf_reg_abi(current) :
diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h
new file mode 100644
index 000000000000..564f14097f07
--- /dev/null
+++ b/arch/powerpc/perf/power10-events-list.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Performance counter support for POWER10 processors.
+ *
+ * Copyright 2020 Madhavan Srinivasan, IBM Corporation.
+ * Copyright 2020 Athira Rajeev, IBM Corporation.
+ */
+
+/*
+ * Power10 event codes.
+ */
+EVENT(PM_CYC, 0x600f4);
+EVENT(PM_DISP_STALL_CYC, 0x100f8);
+EVENT(PM_EXEC_STALL, 0x30008);
+EVENT(PM_INST_CMPL, 0x500fa);
+EVENT(PM_BR_CMPL, 0x4d05e);
+EVENT(PM_BR_MPRED_CMPL, 0x400f6);
+EVENT(PM_BR_FIN, 0x2f04a);
+EVENT(PM_MPRED_BR_FIN, 0x3e098);
+EVENT(PM_LD_DEMAND_MISS_L1_FIN, 0x400f0);
+
+/* All L1 D cache load references counted at finish, gated by reject */
+EVENT(PM_LD_REF_L1, 0x100fc);
+/* Load Missed L1 */
+EVENT(PM_LD_MISS_L1, 0x3e054);
+/* Store Missed L1 */
+EVENT(PM_ST_MISS_L1, 0x300f0);
+/* L1 cache data prefetches */
+EVENT(PM_LD_PREFETCH_CACHE_LINE_MISS, 0x1002c);
+/* Demand iCache Miss */
+EVENT(PM_L1_ICACHE_MISS, 0x200fc);
+/* Instruction fetches from L1 */
+EVENT(PM_INST_FROM_L1, 0x04080);
+/* Instruction Demand sectors wriittent into IL1 */
+EVENT(PM_INST_FROM_L1MISS, 0x03f00000001c040);
+/* Instruction prefetch written into IL1 */
+EVENT(PM_IC_PREF_REQ, 0x040a0);
+/* The data cache was reloaded from local core's L3 due to a demand load */
+EVENT(PM_DATA_FROM_L3, 0x01340000001c040);
+/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+EVENT(PM_DATA_FROM_L3MISS, 0x300fe);
+/* All successful D-side store dispatches for this thread */
+EVENT(PM_L2_ST, 0x010000046080);
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+EVENT(PM_L2_ST_MISS, 0x26880);
+/* Total HW L3 prefetches(Load+store) */
+EVENT(PM_L3_PF_MISS_L3, 0x100000016080);
+/* Data PTEG reload */
+EVENT(PM_DTLB_MISS, 0x300fc);
+/* ITLB Reloaded */
+EVENT(PM_ITLB_MISS, 0x400fc);
+
+EVENT(PM_CYC_ALT, 0x0001e);
+EVENT(PM_INST_CMPL_ALT, 0x00002);
+
+/*
+ * Memory Access Events
+ *
+ * Primary PMU event used here is PM_MRK_INST_CMPL (0x401e0)
+ * To enable capturing of memory profiling, these MMCRA bits
+ * needs to be programmed and corresponding raw event format
+ * encoding.
+ *
+ * MMCRA bits encoding needed are
+ * SM (Sampling Mode)
+ * EM (Eligibility for Random Sampling)
+ * TECE (Threshold Event Counter Event)
+ * TS (Threshold Start Event)
+ * TE (Threshold End Event)
+ *
+ * Corresponding Raw Encoding bits:
+ * sample [EM,SM]
+ * thresh_sel (TECE)
+ * thresh start (TS)
+ * thresh end (TE)
+ */
+
+EVENT(MEM_LOADS, 0x35340401e0);
+EVENT(MEM_STORES, 0x353c0401e0);
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
new file mode 100644
index 000000000000..9b5133e361a7
--- /dev/null
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Performance counter support for POWER10 processors.
+ *
+ * Copyright 2020 Madhavan Srinivasan, IBM Corporation.
+ * Copyright 2020 Athira Rajeev, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) "power10-pmu: " fmt
+
+#include "isa207-common.h"
+
+/*
+ * Raw event encoding for Power10:
+ *
+ * 60 56 52 48 44 40 36 32
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * | | [ ] [ src_match ] [ src_mask ] | [ ] [ l2l3_sel ] [ thresh_ctl ]
+ * | | | | | |
+ * | | *- IFM (Linux) | | thresh start/stop -*
+ * | *- BHRB (Linux) | src_sel
+ * *- EBB (Linux) *invert_bit
+ *
+ * 28 24 20 16 12 8 4 0
+ * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
+ * [ ] [ sample ] [ ] [ ] [ pmc ] [unit ] [ ] | m [ pmcxsel ]
+ * | | | | | | |
+ * | | | | | | *- mark
+ * | | | *- L1/L2/L3 cache_sel | |*-radix_scope_qual
+ * | | sdar_mode |
+ * | *- sampling mode for marked events *- combine
+ * |
+ * *- thresh_sel
+ *
+ * Below uses IBM bit numbering.
+ *
+ * MMCR1[x:y] = unit (PMCxUNIT)
+ * MMCR1[24] = pmc1combine[0]
+ * MMCR1[25] = pmc1combine[1]
+ * MMCR1[26] = pmc2combine[0]
+ * MMCR1[27] = pmc2combine[1]
+ * MMCR1[28] = pmc3combine[0]
+ * MMCR1[29] = pmc3combine[1]
+ * MMCR1[30] = pmc4combine[0]
+ * MMCR1[31] = pmc4combine[1]
+ *
+ * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
+ * MMCR1[20:27] = thresh_ctl
+ * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
+ * MMCR1[20:27] = thresh_ctl
+ * else
+ * MMCRA[48:55] = thresh_ctl (THRESH START/END)
+ *
+ * if thresh_sel:
+ * MMCRA[45:47] = thresh_sel
+ *
+ * if l2l3_sel:
+ * MMCR2[56:60] = l2l3_sel[0:4]
+ *
+ * MMCR1[16] = cache_sel[0]
+ * MMCR1[17] = cache_sel[1]
+ * MMCR1[18] = radix_scope_qual
+ *
+ * if mark:
+ * MMCRA[63] = 1 (SAMPLE_ENABLE)
+ * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
+ * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
+ *
+ * if EBB and BHRB:
+ * MMCRA[32:33] = IFM
+ *
+ * MMCRA[SDAR_MODE] = sdar_mode[0:1]
+ */
+
+/*
+ * Some power10 event codes.
+ */
+#define EVENT(_name, _code) enum{_name = _code}
+
+#include "power10-events-list.h"
+
+#undef EVENT
+
+/* MMCRA IFM bits - POWER10 */
+#define POWER10_MMCRA_IFM1 0x0000000040000000UL
+#define POWER10_MMCRA_IFM2 0x0000000080000000UL
+#define POWER10_MMCRA_IFM3 0x00000000C0000000UL
+#define POWER10_MMCRA_BHRB_MASK 0x00000000C0000000UL
+
+extern u64 PERF_REG_EXTENDED_MASK;
+
+/* Table of alternatives, sorted by column 0 */
+static const unsigned int power10_event_alternatives[][MAX_ALT] = {
+ { PM_INST_CMPL_ALT, PM_INST_CMPL },
+ { PM_CYC_ALT, PM_CYC },
+};
+
+static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int num_alt = 0;
+
+ num_alt = isa207_get_alternatives(event, alt,
+ ARRAY_SIZE(power10_event_alternatives), flags,
+ power10_event_alternatives);
+
+ return num_alt;
+}
+
+static int power10_check_attr_config(struct perf_event *ev)
+{
+ u64 val;
+ u64 event = ev->attr.config;
+
+ val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
+ if (val == 0x10 || isa3XX_check_attr_config(ev))
+ return -EINVAL;
+
+ return 0;
+}
+
+GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
+GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
+GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
+GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
+GENERIC_EVENT_ATTR(mem-loads, MEM_LOADS);
+GENERIC_EVENT_ATTR(mem-stores, MEM_STORES);
+GENERIC_EVENT_ATTR(branch-instructions, PM_BR_FIN);
+GENERIC_EVENT_ATTR(branch-misses, PM_MPRED_BR_FIN);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_DEMAND_MISS_L1_FIN);
+
+CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
+CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
+CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_LD_PREFETCH_CACHE_LINE_MISS);
+CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
+CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
+CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
+CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_REQ);
+CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
+CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
+CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PF_MISS_L3);
+CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
+CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
+CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
+CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
+CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
+CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
+
+static struct attribute *power10_events_attr_dd1[] = {
+ GENERIC_EVENT_PTR(PM_CYC),
+ GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_BR_CMPL),
+ GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
+ GENERIC_EVENT_PTR(PM_LD_REF_L1),
+ GENERIC_EVENT_PTR(PM_LD_MISS_L1),
+ GENERIC_EVENT_PTR(MEM_LOADS),
+ GENERIC_EVENT_PTR(MEM_STORES),
+ CACHE_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_LD_REF_L1),
+ CACHE_EVENT_PTR(PM_LD_PREFETCH_CACHE_LINE_MISS),
+ CACHE_EVENT_PTR(PM_ST_MISS_L1),
+ CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
+ CACHE_EVENT_PTR(PM_INST_FROM_L1),
+ CACHE_EVENT_PTR(PM_IC_PREF_REQ),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3),
+ CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
+ CACHE_EVENT_PTR(PM_BR_CMPL),
+ CACHE_EVENT_PTR(PM_DTLB_MISS),
+ CACHE_EVENT_PTR(PM_ITLB_MISS),
+ NULL
+};
+
+static struct attribute *power10_events_attr[] = {
+ GENERIC_EVENT_PTR(PM_CYC),
+ GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_BR_FIN),
+ GENERIC_EVENT_PTR(PM_MPRED_BR_FIN),
+ GENERIC_EVENT_PTR(PM_LD_REF_L1),
+ GENERIC_EVENT_PTR(PM_LD_DEMAND_MISS_L1_FIN),
+ GENERIC_EVENT_PTR(MEM_LOADS),
+ GENERIC_EVENT_PTR(MEM_STORES),
+ CACHE_EVENT_PTR(PM_LD_MISS_L1),
+ CACHE_EVENT_PTR(PM_LD_REF_L1),
+ CACHE_EVENT_PTR(PM_LD_PREFETCH_CACHE_LINE_MISS),
+ CACHE_EVENT_PTR(PM_ST_MISS_L1),
+ CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
+ CACHE_EVENT_PTR(PM_INST_FROM_L1),
+ CACHE_EVENT_PTR(PM_IC_PREF_REQ),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
+ CACHE_EVENT_PTR(PM_DATA_FROM_L3),
+ CACHE_EVENT_PTR(PM_L3_PF_MISS_L3),
+ CACHE_EVENT_PTR(PM_L2_ST_MISS),
+ CACHE_EVENT_PTR(PM_L2_ST),
+ CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
+ CACHE_EVENT_PTR(PM_BR_CMPL),
+ CACHE_EVENT_PTR(PM_DTLB_MISS),
+ CACHE_EVENT_PTR(PM_ITLB_MISS),
+ NULL
+};
+
+static const struct attribute_group power10_pmu_events_group_dd1 = {
+ .name = "events",
+ .attrs = power10_events_attr_dd1,
+};
+
+static const struct attribute_group power10_pmu_events_group = {
+ .name = "events",
+ .attrs = power10_events_attr,
+};
+
+PMU_FORMAT_ATTR(event, "config:0-59");
+PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
+PMU_FORMAT_ATTR(mark, "config:8");
+PMU_FORMAT_ATTR(combine, "config:10-11");
+PMU_FORMAT_ATTR(unit, "config:12-15");
+PMU_FORMAT_ATTR(pmc, "config:16-19");
+PMU_FORMAT_ATTR(cache_sel, "config:20-21");
+PMU_FORMAT_ATTR(sdar_mode, "config:22-23");
+PMU_FORMAT_ATTR(sample_mode, "config:24-28");
+PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
+PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
+PMU_FORMAT_ATTR(thresh_start, "config:36-39");
+PMU_FORMAT_ATTR(l2l3_sel, "config:40-44");
+PMU_FORMAT_ATTR(src_sel, "config:45-46");
+PMU_FORMAT_ATTR(invert_bit, "config:47");
+PMU_FORMAT_ATTR(src_mask, "config:48-53");
+PMU_FORMAT_ATTR(src_match, "config:54-59");
+PMU_FORMAT_ATTR(radix_scope, "config:9");
+PMU_FORMAT_ATTR(thresh_cmp, "config1:0-17");
+
+static struct attribute *power10_pmu_format_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_pmcxsel.attr,
+ &format_attr_mark.attr,
+ &format_attr_combine.attr,
+ &format_attr_unit.attr,
+ &format_attr_pmc.attr,
+ &format_attr_cache_sel.attr,
+ &format_attr_sdar_mode.attr,
+ &format_attr_sample_mode.attr,
+ &format_attr_thresh_sel.attr,
+ &format_attr_thresh_stop.attr,
+ &format_attr_thresh_start.attr,
+ &format_attr_l2l3_sel.attr,
+ &format_attr_src_sel.attr,
+ &format_attr_invert_bit.attr,
+ &format_attr_src_mask.attr,
+ &format_attr_src_match.attr,
+ &format_attr_radix_scope.attr,
+ &format_attr_thresh_cmp.attr,
+ NULL,
+};
+
+static const struct attribute_group power10_pmu_format_group = {
+ .name = "format",
+ .attrs = power10_pmu_format_attr,
+};
+
+static struct attribute *power10_pmu_caps_attrs[] = {
+ NULL
+};
+
+static struct attribute_group power10_pmu_caps_group = {
+ .name = "caps",
+ .attrs = power10_pmu_caps_attrs,
+};
+
+static const struct attribute_group *power10_pmu_attr_groups_dd1[] = {
+ &power10_pmu_format_group,
+ &power10_pmu_events_group_dd1,
+ &power10_pmu_caps_group,
+ NULL,
+};
+
+static const struct attribute_group *power10_pmu_attr_groups[] = {
+ &power10_pmu_format_group,
+ &power10_pmu_events_group,
+ &power10_pmu_caps_group,
+ NULL,
+};
+
+static int power10_generic_events_dd1[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
+};
+
+static int power10_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
+ [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_FIN,
+ [PERF_COUNT_HW_BRANCH_MISSES] = PM_MPRED_BR_FIN,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_DEMAND_MISS_L1_FIN,
+};
+
+static u64 power10_bhrb_filter_map(u64 branch_sample_type)
+{
+ u64 pmu_bhrb_filter = 0;
+
+ /* BHRB and regular PMU events share the same privilege state
+ * filter configuration. BHRB is always recorded along with a
+ * regular PMU event. As the privilege state filter is handled
+ * in the basic PMC configuration of the accompanying regular
+ * PMU event, we ignore any separate BHRB specific request.
+ */
+
+ /* No branch filter requested */
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
+ return pmu_bhrb_filter;
+
+ /* Invalid branch filter options - HW does not support */
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL) {
+ pmu_bhrb_filter |= POWER10_MMCRA_IFM2;
+ return pmu_bhrb_filter;
+ }
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_COND) {
+ pmu_bhrb_filter |= POWER10_MMCRA_IFM3;
+ return pmu_bhrb_filter;
+ }
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
+ return -1;
+
+ if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
+ pmu_bhrb_filter |= POWER10_MMCRA_IFM1;
+ return pmu_bhrb_filter;
+ }
+
+ /* Every thing else is unsupported */
+ return -1;
+}
+
+static void power10_config_bhrb(u64 pmu_bhrb_filter)
+{
+ pmu_bhrb_filter &= POWER10_MMCRA_BHRB_MASK;
+
+ /* Enable BHRB filter in PMU */
+ mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
+}
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static u64 power10_cache_events_dd1[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PM_LD_REF_L1,
+ [C(RESULT_MISS)] = PM_LD_MISS_L1,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = PM_ST_MISS_L1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = PM_LD_PREFETCH_CACHE_LINE_MISS,
+ [C(RESULT_MISS)] = 0,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PM_INST_FROM_L1,
+ [C(RESULT_MISS)] = PM_L1_ICACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = PM_INST_FROM_L1MISS,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = PM_IC_PREF_REQ,
+ [C(RESULT_MISS)] = 0,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PM_DATA_FROM_L3,
+ [C(RESULT_MISS)] = PM_DATA_FROM_L3MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = 0,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = PM_DTLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = PM_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PM_BR_CMPL,
+ [C(RESULT_MISS)] = PM_BR_MPRED_CMPL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+};
+
+static u64 power10_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PM_LD_REF_L1,
+ [C(RESULT_MISS)] = PM_LD_MISS_L1,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = PM_ST_MISS_L1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = PM_LD_PREFETCH_CACHE_LINE_MISS,
+ [C(RESULT_MISS)] = 0,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PM_INST_FROM_L1,
+ [C(RESULT_MISS)] = PM_L1_ICACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = PM_INST_FROM_L1MISS,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = PM_IC_PREF_REQ,
+ [C(RESULT_MISS)] = 0,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PM_DATA_FROM_L3,
+ [C(RESULT_MISS)] = PM_DATA_FROM_L3MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = PM_L2_ST,
+ [C(RESULT_MISS)] = PM_L2_ST_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = PM_L3_PF_MISS_L3,
+ [C(RESULT_MISS)] = 0,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = PM_DTLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = 0,
+ [C(RESULT_MISS)] = PM_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PM_BR_CMPL,
+ [C(RESULT_MISS)] = PM_BR_MPRED_CMPL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = -1,
+ [C(RESULT_MISS)] = -1,
+ },
+ },
+};
+
+#undef C
+
+/*
+ * Set the MMCR0[CC56RUN] bit to enable counting for
+ * PMC5 and PMC6 regardless of the state of CTRL[RUN],
+ * so that we can use counters 5 and 6 as PM_INST_CMPL and
+ * PM_CYC.
+ */
+static int power10_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[], u32 flags)
+{
+ int ret;
+
+ ret = isa207_compute_mmcr(event, n_ev, hwc, mmcr, pevents, flags);
+ if (!ret)
+ mmcr->mmcr0 |= MMCR0_C56RUN;
+ return ret;
+}
+
+static struct power_pmu power10_pmu = {
+ .name = "POWER10",
+ .n_counter = MAX_PMU_COUNTERS,
+ .add_fields = ISA207_ADD_FIELDS,
+ .test_adder = ISA207_TEST_ADDER,
+ .group_constraint_mask = CNST_CACHE_PMC4_MASK,
+ .group_constraint_val = CNST_CACHE_PMC4_VAL,
+ .compute_mmcr = power10_compute_mmcr,
+ .config_bhrb = power10_config_bhrb,
+ .bhrb_filter_map = power10_bhrb_filter_map,
+ .get_constraint = isa207_get_constraint,
+ .get_alternatives = power10_get_alternatives,
+ .get_mem_data_src = isa207_get_mem_data_src,
+ .get_mem_weight = isa207_get_mem_weight,
+ .disable_pmc = isa207_disable_pmc,
+ .flags = PPMU_HAS_SIER | PPMU_ARCH_207S |
+ PPMU_ARCH_31 | PPMU_HAS_ATTR_CONFIG1,
+ .n_generic = ARRAY_SIZE(power10_generic_events),
+ .generic_events = power10_generic_events,
+ .cache_events = &power10_cache_events,
+ .attr_groups = power10_pmu_attr_groups,
+ .bhrb_nr = 32,
+ .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
+ .check_attr_config = power10_check_attr_config,
+};
+
+int __init init_power10_pmu(void)
+{
+ unsigned int pvr;
+ int rc;
+
+ pvr = mfspr(SPRN_PVR);
+ if (PVR_VER(pvr) != PVR_POWER10)
+ return -ENODEV;
+
+ /* Add the ppmu flag for power10 DD1 */
+ if ((PVR_CFG(pvr) == 1))
+ power10_pmu.flags |= PPMU_P10_DD1;
+
+ /* Set the PERF_REG_EXTENDED_MASK here */
+ PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_31;
+
+ if ((PVR_CFG(pvr) == 1)) {
+ power10_pmu.generic_events = power10_generic_events_dd1;
+ power10_pmu.attr_groups = power10_pmu_attr_groups_dd1;
+ power10_pmu.cache_events = &power10_cache_events_dd1;
+ }
+
+ rc = register_power_pmu(&power10_pmu);
+ if (rc)
+ return rc;
+
+ /* Tell userspace that EBB is supported */
+ cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
+
+ return 0;
+}
diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index f8574547fc6b..b4708ab73145 100644
--- a/arch/powerpc/perf/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
@@ -10,6 +10,8 @@
#include <asm/reg.h>
#include <asm/cputable.h>
+#include "internal.h"
+
/*
* Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
*/
@@ -130,7 +132,7 @@ static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
};
static int power5p_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, unit, sh;
int bit, fmask;
@@ -448,7 +450,9 @@ static int power5p_marked_instr_event(u64 event)
}
static int power5p_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
unsigned long mmcr1 = 0;
unsigned long mmcra = 0;
@@ -586,20 +590,20 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
}
/* Return MMCRx values */
- mmcr[0] = 0;
+ mmcr->mmcr0 = 0;
if (pmc_inuse & 1)
- mmcr[0] = MMCR0_PMC1CE;
+ mmcr->mmcr0 = MMCR0_PMC1CE;
if (pmc_inuse & 0x3e)
- mmcr[0] |= MMCR0_PMCjCE;
- mmcr[1] = mmcr1;
- mmcr[2] = mmcra;
+ mmcr->mmcr0 |= MMCR0_PMCjCE;
+ mmcr->mmcr1 = mmcr1;
+ mmcr->mmcra = mmcra;
return 0;
}
-static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+static void power5p_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
if (pmc <= 3)
- mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
+ mmcr->mmcr1 &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
}
static int power5p_generic_events[] = {
@@ -618,7 +622,7 @@ static int power5p_generic_events[] = {
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
-static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static u64 power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x1c10a8, 0x3c1088 },
[C(OP_WRITE)] = { 0x2c10a8, 0xc10c3 },
@@ -673,11 +677,11 @@ static struct power_pmu power5p_pmu = {
.cache_events = &power5p_cache_events,
};
-int init_power5p_pmu(void)
+int __init init_power5p_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
- && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++")))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_POWER5p)
return -ENODEV;
return register_power_pmu(&power5p_pmu);
diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index da52ecab7c9a..c6aefd0a1cc8 100644
--- a/arch/powerpc/perf/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
@@ -10,6 +10,8 @@
#include <asm/reg.h>
#include <asm/cputable.h>
+#include "internal.h"
+
/*
* Bits in event code for POWER5 (not POWER5++)
*/
@@ -134,7 +136,7 @@ static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
};
static int power5_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, unit, sh;
int bit, fmask;
@@ -379,7 +381,9 @@ static int power5_marked_instr_event(u64 event)
}
static int power5_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
unsigned long mmcr1 = 0;
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
@@ -528,20 +532,20 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
}
/* Return MMCRx values */
- mmcr[0] = 0;
+ mmcr->mmcr0 = 0;
if (pmc_inuse & 1)
- mmcr[0] = MMCR0_PMC1CE;
+ mmcr->mmcr0 = MMCR0_PMC1CE;
if (pmc_inuse & 0x3e)
- mmcr[0] |= MMCR0_PMCjCE;
- mmcr[1] = mmcr1;
- mmcr[2] = mmcra;
+ mmcr->mmcr0 |= MMCR0_PMCjCE;
+ mmcr->mmcr1 = mmcr1;
+ mmcr->mmcra = mmcra;
return 0;
}
-static void power5_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+static void power5_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
if (pmc <= 3)
- mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
+ mmcr->mmcr1 &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
}
static int power5_generic_events[] = {
@@ -560,7 +564,7 @@ static int power5_generic_events[] = {
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
-static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static u64 power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x4c1090, 0x3c1088 },
[C(OP_WRITE)] = { 0x3c1090, 0xc10c3 },
@@ -614,10 +618,11 @@ static struct power_pmu power5_pmu = {
.flags = PPMU_HAS_SSLOT,
};
-int init_power5_pmu(void)
+int __init init_power5_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_POWER5)
return -ENODEV;
return register_power_pmu(&power5_pmu);
diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index 3929cacf72ed..5729b6e059de 100644
--- a/arch/powerpc/perf/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -10,6 +10,8 @@
#include <asm/reg.h>
#include <asm/cputable.h>
+#include "internal.h"
+
/*
* Bits in event code for POWER6
*/
@@ -171,7 +173,8 @@ static int power6_marked_instr_event(u64 event)
* Assign PMC numbers and compute MMCR1 value for a set of events
*/
static int p6_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
+ unsigned int hwc[], struct mmcr_regs *mmcr, struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
unsigned long mmcr1 = 0;
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
@@ -243,13 +246,13 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
if (pmc < 4)
mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc);
}
- mmcr[0] = 0;
+ mmcr->mmcr0 = 0;
if (pmc_inuse & 1)
- mmcr[0] = MMCR0_PMC1CE;
+ mmcr->mmcr0 = MMCR0_PMC1CE;
if (pmc_inuse & 0xe)
- mmcr[0] |= MMCR0_PMCjCE;
- mmcr[1] = mmcr1;
- mmcr[2] = mmcra;
+ mmcr->mmcr0 |= MMCR0_PMCjCE;
+ mmcr->mmcr1 = mmcr1;
+ mmcr->mmcra = mmcra;
return 0;
}
@@ -264,7 +267,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
* 32-34 select field: nest (subunit) event selector
*/
static int p6_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, sh, subunit;
unsigned long mask = 0, value = 0;
@@ -457,11 +460,11 @@ static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[])
return nalt;
}
-static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+static void p6_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
/* Set PMCxSEL to 0 to disable PMCx */
if (pmc <= 3)
- mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
+ mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
}
static int power6_generic_events[] = {
@@ -481,7 +484,7 @@ static int power6_generic_events[] = {
* are event codes.
* The "DTLB" and "ITLB" events relate to the DERAT and IERAT.
*/
-static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static u64 power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x280030, 0x80080 },
[C(OP_WRITE)] = { 0x180032, 0x80088 },
@@ -536,10 +539,11 @@ static struct power_pmu power6_pmu = {
.cache_events = &power6_cache_events,
};
-int init_power6_pmu(void)
+int __init init_power6_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_POWER6)
return -ENODEV;
return register_power_pmu(&power6_pmu);
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index a137813a3076..c95ccf2e28da 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -10,6 +10,8 @@
#include <asm/reg.h>
#include <asm/cputable.h>
+#include "internal.h"
+
/*
* Bits in event code for POWER7
*/
@@ -79,7 +81,7 @@ enum {
*/
static int power7_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, sh, unit;
unsigned long mask = 0, value = 0;
@@ -242,7 +244,9 @@ static int power7_marked_instr_event(u64 event)
}
static int power7_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
unsigned long mmcr1 = 0;
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
@@ -298,20 +302,20 @@ static int power7_compute_mmcr(u64 event[], int n_ev,
}
/* Return MMCRx values */
- mmcr[0] = 0;
+ mmcr->mmcr0 = 0;
if (pmc_inuse & 1)
- mmcr[0] = MMCR0_PMC1CE;
+ mmcr->mmcr0 = MMCR0_PMC1CE;
if (pmc_inuse & 0x3e)
- mmcr[0] |= MMCR0_PMCjCE;
- mmcr[1] = mmcr1;
- mmcr[2] = mmcra;
+ mmcr->mmcr0 |= MMCR0_PMCjCE;
+ mmcr->mmcr1 = mmcr1;
+ mmcr->mmcra = mmcra;
return 0;
}
-static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+static void power7_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
if (pmc <= 3)
- mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
+ mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
}
static int power7_generic_events[] = {
@@ -332,7 +336,7 @@ static int power7_generic_events[] = {
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
-static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static u64 power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0xc880, 0x400f0 },
[C(OP_WRITE)] = { 0, 0x300f0 },
@@ -401,7 +405,7 @@ static struct attribute *power7_events_attr[] = {
NULL
};
-static struct attribute_group power7_pmu_events_group = {
+static const struct attribute_group power7_pmu_events_group = {
.name = "events",
.attrs = power7_events_attr,
};
@@ -413,7 +417,7 @@ static struct attribute *power7_pmu_format_attr[] = {
NULL,
};
-static struct attribute_group power7_pmu_format_group = {
+static const struct attribute_group power7_pmu_format_group = {
.name = "format",
.attrs = power7_pmu_format_attr,
};
@@ -441,13 +445,14 @@ static struct power_pmu power7_pmu = {
.cache_events = &power7_cache_events,
};
-int init_power7_pmu(void)
+int __init init_power7_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_POWER7 && PVR_VER(pvr) != PVR_POWER7p)
return -ENODEV;
- if (pvr_version_is(PVR_POWER7p))
+ if (PVR_VER(pvr) == PVR_POWER7p)
power7_pmu.flags |= PPMU_SIAR_VALID;
return register_power_pmu(&power7_pmu);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 3a5fcc20ff31..ef9685065aaf 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -92,7 +92,7 @@ enum {
*/
/* PowerISA v2.07 format attribute structure*/
-extern struct attribute_group isa207_pmu_format_group;
+extern const struct attribute_group isa207_pmu_format_group;
/* Table of alternatives, sorted by column 0 */
static const unsigned int event_alternatives[][MAX_ALT] = {
@@ -182,14 +182,24 @@ static struct attribute *power8_events_attr[] = {
NULL
};
-static struct attribute_group power8_pmu_events_group = {
+static const struct attribute_group power8_pmu_events_group = {
.name = "events",
.attrs = power8_events_attr,
};
+static struct attribute *power8_pmu_caps_attrs[] = {
+ NULL
+};
+
+static struct attribute_group power8_pmu_caps_group = {
+ .name = "caps",
+ .attrs = power8_pmu_caps_attrs,
+};
+
static const struct attribute_group *power8_pmu_attr_groups[] = {
&isa207_pmu_format_group,
&power8_pmu_events_group,
+ &power8_pmu_caps_group,
NULL,
};
@@ -253,7 +263,7 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter)
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
-static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static u64 power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
@@ -378,12 +388,13 @@ static struct power_pmu power8_pmu = {
.bhrb_nr = 32,
};
-int init_power8_pmu(void)
+int __init init_power8_pmu(void)
{
int rc;
+ unsigned int pvr = mfspr(SPRN_PVR);
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
+ if (PVR_VER(pvr) != PVR_POWER8E && PVR_VER(pvr) != PVR_POWER8NVL &&
+ PVR_VER(pvr) != PVR_POWER8)
return -ENODEV;
rc = register_power_pmu(&power8_pmu);
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 08c3ef796198..cb6a7dc02dd7 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -90,13 +90,15 @@ enum {
#define POWER9_MMCRA_IFM3 0x00000000C0000000UL
#define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
+extern u64 PERF_REG_EXTENDED_MASK;
+
/* Nasty Power9 specific hack */
#define PVR_POWER9_CUMULUS 0x00002000
/* PowerISA v2.07 format attribute structure*/
-extern struct attribute_group isa207_pmu_format_group;
+extern const struct attribute_group isa207_pmu_format_group;
-int p9_dd21_bl_ev[] = {
+static int p9_dd21_bl_ev[] = {
PM_MRK_ST_DONE_L2,
PM_RADIX_PWC_L1_HIT,
PM_FLOP_CMPL,
@@ -110,7 +112,7 @@ int p9_dd21_bl_ev[] = {
PM_DISP_HELD_SYNC_HOLD,
};
-int p9_dd22_bl_ev[] = {
+static int p9_dd22_bl_ev[] = {
PM_DTLB_MISS_16G,
PM_DERAT_MISS_2M,
PM_DTLB_MISS_2M,
@@ -131,11 +133,11 @@ int p9_dd22_bl_ev[] = {
/* Table of alternatives, sorted by column 0 */
static const unsigned int power9_event_alternatives[][MAX_ALT] = {
- { PM_INST_DISP, PM_INST_DISP_ALT },
- { PM_RUN_CYC_ALT, PM_RUN_CYC },
- { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
- { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
+ { PM_INST_DISP, PM_INST_DISP_ALT },
+ { PM_RUN_CYC_ALT, PM_RUN_CYC },
+ { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
+ { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
};
static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
@@ -149,6 +151,18 @@ static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
return num_alt;
}
+static int power9_check_attr_config(struct perf_event *ev)
+{
+ u64 val;
+ u64 event = ev->attr.config;
+
+ val = (event >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
+ if (val == 0xC || isa3XX_check_attr_config(ev))
+ return -EINVAL;
+
+ return 0;
+}
+
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
@@ -203,7 +217,7 @@ static struct attribute *power9_events_attr[] = {
NULL
};
-static struct attribute_group power9_pmu_events_group = {
+static const struct attribute_group power9_pmu_events_group = {
.name = "events",
.attrs = power9_events_attr,
};
@@ -239,14 +253,24 @@ static struct attribute *power9_pmu_format_attr[] = {
NULL,
};
-static struct attribute_group power9_pmu_format_group = {
+static const struct attribute_group power9_pmu_format_group = {
.name = "format",
.attrs = power9_pmu_format_attr,
};
+static struct attribute *power9_pmu_caps_attrs[] = {
+ NULL
+};
+
+static struct attribute_group power9_pmu_caps_group = {
+ .name = "caps",
+ .attrs = power9_pmu_caps_attrs,
+};
+
static const struct attribute_group *power9_pmu_attr_groups[] = {
&power9_pmu_format_group,
&power9_pmu_events_group,
+ &power9_pmu_caps_group,
NULL,
};
@@ -310,7 +334,7 @@ static void power9_config_bhrb(u64 pmu_bhrb_filter)
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
-static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static u64 power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
@@ -434,16 +458,16 @@ static struct power_pmu power9_pmu = {
.cache_events = &power9_cache_events,
.attr_groups = power9_pmu_attr_groups,
.bhrb_nr = 32,
+ .capabilities = PERF_PMU_CAP_EXTENDED_REGS,
+ .check_attr_config = power9_check_attr_config,
};
-int init_power9_pmu(void)
+int __init init_power9_pmu(void)
{
int rc = 0;
unsigned int pvr = mfspr(SPRN_PVR);
- /* Comes from cpu_specs[] */
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9"))
+ if (PVR_VER(pvr) != PVR_POWER9)
return -ENODEV;
/* Blacklist events */
@@ -457,6 +481,9 @@ int init_power9_pmu(void)
}
}
+ /* Set the PERF_REG_EXTENDED_MASK here */
+ PERF_REG_EXTENDED_MASK = PERF_REG_PMU_MASK_300;
+
rc = register_power_pmu(&power9_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c
index 4035d93d87ab..762676fb839e 100644
--- a/arch/powerpc/perf/ppc970-pmu.c
+++ b/arch/powerpc/perf/ppc970-pmu.c
@@ -9,6 +9,8 @@
#include <asm/reg.h>
#include <asm/cputable.h>
+#include "internal.h"
+
/*
* Bits in event code for PPC970
*/
@@ -188,7 +190,7 @@ static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
};
static int p970_get_constraint(u64 event, unsigned long *maskp,
- unsigned long *valp)
+ unsigned long *valp, u64 event_config1 __maybe_unused)
{
int pmc, byte, unit, sh, spcsel;
unsigned long mask = 0, value = 0;
@@ -253,7 +255,9 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
}
static int p970_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
+ unsigned int hwc[], struct mmcr_regs *mmcr,
+ struct perf_event *pevents[],
+ u32 flags __maybe_unused)
{
unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel;
@@ -393,27 +397,26 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
mmcra |= 0x2000; /* mark only one IOP per PPC instruction */
/* Return MMCRx values */
- mmcr[0] = mmcr0;
- mmcr[1] = mmcr1;
- mmcr[2] = mmcra;
+ mmcr->mmcr0 = mmcr0;
+ mmcr->mmcr1 = mmcr1;
+ mmcr->mmcra = mmcra;
return 0;
}
-static void p970_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+static void p970_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
{
- int shift, i;
+ int shift;
+ /*
+ * Setting the PMCxSEL field to 0x08 disables PMC x.
+ */
if (pmc <= 1) {
shift = MMCR0_PMC1SEL_SH - 7 * pmc;
- i = 0;
+ mmcr->mmcr0 = (mmcr->mmcr0 & ~(0x1fUL << shift)) | (0x08UL << shift);
} else {
shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2);
- i = 1;
+ mmcr->mmcr1 = (mmcr->mmcr1 & ~(0x1fUL << shift)) | (0x08UL << shift);
}
- /*
- * Setting the PMCxSEL field to 0x08 disables PMC x.
- */
- mmcr[i] = (mmcr[i] & ~(0x1fUL << shift)) | (0x08UL << shift);
}
static int ppc970_generic_events[] = {
@@ -432,7 +435,7 @@ static int ppc970_generic_events[] = {
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
-static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+static u64 ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x8810, 0x3810 },
[C(OP_WRITE)] = { 0x7810, 0x813 },
@@ -486,11 +489,12 @@ static struct power_pmu ppc970_pmu = {
.flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
};
-int init_ppc970_pmu(void)
+int __init init_ppc970_pmu(void)
{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
- && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP")))
+ unsigned int pvr = mfspr(SPRN_PVR);
+
+ if (PVR_VER(pvr) != PVR_970 && PVR_VER(pvr) != PVR_970MP &&
+ PVR_VER(pvr) != PVR_970FX && PVR_VER(pvr) != PVR_970GX)
return -ENODEV;
return register_power_pmu(&ppc970_pmu);
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 6da813b65b42..614ea6dc994c 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -7,14 +7,6 @@ config ACADIA
help
This option enables support for the AMCC 405EZ Acadia evaluation board.
-config EP405
- bool "EP405/EP405PC"
- depends on 40x
- select 405GP
- select FORCE_PCI
- help
- This option enables support for the EP405/EP405PC boards.
-
config HOTFOOT
bool "Hotfoot"
depends on 40x
@@ -31,7 +23,6 @@ config KILAUEA
select PPC4xx_PCI_EXPRESS
select FORCE_PCI
select PCI_MSI
- select PPC4xx_MSI
help
This option enables support for the AMCC PPC405EX evaluation board.
@@ -45,33 +36,6 @@ config MAKALU
help
This option enables support for the AMCC PPC405EX board.
-config WALNUT
- bool "Walnut"
- depends on 40x
- default y
- select 405GP
- select FORCE_PCI
- select OF_RTC
- help
- This option enables support for the IBM PPC405GP evaluation board.
-
-config XILINX_VIRTEX_GENERIC_BOARD
- bool "Generic Xilinx Virtex board"
- depends on 40x
- select XILINX_VIRTEX_II_PRO
- select XILINX_VIRTEX_4_FX
- select XILINX_INTC
- help
- This option enables generic support for Xilinx Virtex based boards.
-
- The generic virtex board support matches any device tree which
- specifies 'xilinx,virtex' in its compatible field. This includes
- the Xilinx ML3xx and ML4xx reference designs using the powerpc
- core.
-
- Most Virtex designs should use this unless it needs to do some
- special configuration at board probe time.
-
config OBS600
bool "OpenBlockS 600"
depends on 40x
@@ -86,18 +50,6 @@ config PPC40x_SIMPLE
help
This option enables the simple PowerPC 40x platform support.
-# OAK doesn't exist but wanted to keep this around for any future 403GCX boards
-config 403GCX
- bool
- #depends on OAK
- select IBM405_ERR51
-
-config 405GP
- bool
- select IBM405_ERR77
- select IBM405_ERR51
- select IBM_EMAC_ZMII if IBM_EMAC
-
config 405EX
bool
select IBM_EMAC_EMAC4 if IBM_EMAC
@@ -109,25 +61,6 @@ config 405EZ
select IBM_EMAC_MAL_CLR_ICINTSTAT if IBM_EMAC
select IBM_EMAC_MAL_COMMON_ERR if IBM_EMAC
-config XILINX_VIRTEX
- bool
- select DEFAULT_UIMAGE
-
-config XILINX_VIRTEX_II_PRO
- bool
- select XILINX_VIRTEX
- select IBM405_ERR77
- select IBM405_ERR51
-
-config XILINX_VIRTEX_4_FX
- bool
- select XILINX_VIRTEX
-
-config STB03xxx
- bool
- select IBM405_ERR77
- select IBM405_ERR51
-
config PPC4xx_GPIO
bool "PPC4xx GPIO support"
depends on 40x
@@ -135,16 +68,6 @@ config PPC4xx_GPIO
help
Enable gpiolib support for ppc40x based boards
-# 40x errata/workaround config symbols, selected by the CPU models above
-
-# All 405-based cores up until the 405GPR and 405EP have this errata.
-config IBM405_ERR77
- bool
-
-# All 40x-based cores, up until the 405GPR and 405EP have this errata.
-config IBM405_ERR51
- bool
-
config APM8018X
bool "APM8018X"
depends on 40x
diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile
index 828d78340dd9..122de98527c4 100644
--- a/arch/powerpc/platforms/40x/Makefile
+++ b/arch/powerpc/platforms/40x/Makefile
@@ -1,5 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_WALNUT) += walnut.o
-obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o
-obj-$(CONFIG_EP405) += ep405.o
obj-$(CONFIG_PPC40x_SIMPLE) += ppc40x_simple.o
diff --git a/arch/powerpc/platforms/40x/ep405.c b/arch/powerpc/platforms/40x/ep405.c
deleted file mode 100644
index 1c8aec6e9bb7..000000000000
--- a/arch/powerpc/platforms/40x/ep405.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Architecture- / platform-specific boot-time initialization code for
- * IBM PowerPC 4xx based boards. Adapted from original
- * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
- * <dan@net4x.com>.
- *
- * Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
- *
- * Rewritten and ported to the merged powerpc tree:
- * Copyright 2007 IBM Corporation
- * Josh Boyer <jwboyer@linux.vnet.ibm.com>
- *
- * Adapted to EP405 by Ben. Herrenschmidt <benh@kernel.crashing.org>
- *
- * TODO: Wire up the PCI IRQ mux and the southbridge interrupts
- *
- * 2002 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/time.h>
-#include <asm/uic.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc4xx.h>
-
-static struct device_node *bcsr_node;
-static void __iomem *bcsr_regs;
-
-/* BCSR registers */
-#define BCSR_ID 0
-#define BCSR_PCI_CTRL 1
-#define BCSR_FLASH_NV_POR_CTRL 2
-#define BCSR_FENET_UART_CTRL 3
-#define BCSR_PCI_IRQ 4
-#define BCSR_XIRQ_SELECT 5
-#define BCSR_XIRQ_ROUTING 6
-#define BCSR_XIRQ_STATUS 7
-#define BCSR_XIRQ_STATUS2 8
-#define BCSR_SW_STAT_LED_CTRL 9
-#define BCSR_GPIO_IRQ_PAR_CTRL 10
-/* there's more, can't be bothered typing them tho */
-
-
-static const struct of_device_id ep405_of_bus[] __initconst = {
- { .compatible = "ibm,plb3", },
- { .compatible = "ibm,opb", },
- { .compatible = "ibm,ebc", },
- {},
-};
-
-static int __init ep405_device_probe(void)
-{
- of_platform_bus_probe(NULL, ep405_of_bus, NULL);
-
- return 0;
-}
-machine_device_initcall(ep405, ep405_device_probe);
-
-static void __init ep405_init_bcsr(void)
-{
- const u8 *irq_routing;
- int i;
-
- /* Find the bloody thing & map it */
- bcsr_node = of_find_compatible_node(NULL, NULL, "ep405-bcsr");
- if (bcsr_node == NULL) {
- printk(KERN_ERR "EP405 BCSR not found !\n");
- return;
- }
- bcsr_regs = of_iomap(bcsr_node, 0);
- if (bcsr_regs == NULL) {
- printk(KERN_ERR "EP405 BCSR failed to map !\n");
- return;
- }
-
- /* Get the irq-routing property and apply the routing to the CPLD */
- irq_routing = of_get_property(bcsr_node, "irq-routing", NULL);
- if (irq_routing == NULL)
- return;
- for (i = 0; i < 16; i++) {
- u8 irq = irq_routing[i];
- out_8(bcsr_regs + BCSR_XIRQ_SELECT, i);
- out_8(bcsr_regs + BCSR_XIRQ_ROUTING, irq);
- }
- in_8(bcsr_regs + BCSR_XIRQ_SELECT);
- mb();
- out_8(bcsr_regs + BCSR_GPIO_IRQ_PAR_CTRL, 0xfe);
-}
-
-static void __init ep405_setup_arch(void)
-{
- /* Find & init the BCSR CPLD */
- ep405_init_bcsr();
-
- pci_set_flags(PCI_REASSIGN_ALL_RSRC);
-}
-
-static int __init ep405_probe(void)
-{
- if (!of_machine_is_compatible("ep405"))
- return 0;
-
- return 1;
-}
-
-define_machine(ep405) {
- .name = "EP405",
- .probe = ep405_probe,
- .setup_arch = ep405_setup_arch,
- .progress = udbg_progress,
- .init_IRQ = uic_init_tree,
- .get_irq = uic_get_irq,
- .restart = ppc4xx_reset_system,
- .calibrate_decr = generic_calibrate_decr,
-};
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c
index e70b42729322..dce696c32679 100644
--- a/arch/powerpc/platforms/40x/ppc40x_simple.c
+++ b/arch/powerpc/platforms/40x/ppc40x_simple.c
@@ -13,7 +13,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/uic.h>
diff --git a/arch/powerpc/platforms/40x/virtex.c b/arch/powerpc/platforms/40x/virtex.c
deleted file mode 100644
index e3d5e095846b..000000000000
--- a/arch/powerpc/platforms/40x/virtex.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Xilinx Virtex (IIpro & 4FX) based board support
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/time.h>
-#include <asm/xilinx_intc.h>
-#include <asm/xilinx_pci.h>
-#include <asm/ppc4xx.h>
-
-static const struct of_device_id xilinx_of_bus_ids[] __initconst = {
- { .compatible = "xlnx,plb-v46-1.00.a", },
- { .compatible = "xlnx,plb-v34-1.01.a", },
- { .compatible = "xlnx,plb-v34-1.02.a", },
- { .compatible = "xlnx,opb-v20-1.10.c", },
- { .compatible = "xlnx,dcr-v29-1.00.a", },
- { .compatible = "xlnx,compound", },
- {}
-};
-
-static int __init virtex_device_probe(void)
-{
- of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(virtex, virtex_device_probe);
-
-static int __init virtex_probe(void)
-{
- if (!of_machine_is_compatible("xlnx,virtex"))
- return 0;
-
- return 1;
-}
-
-define_machine(virtex) {
- .name = "Xilinx Virtex",
- .probe = virtex_probe,
- .setup_arch = xilinx_pci_init,
- .init_IRQ = xilinx_intc_init_tree,
- .get_irq = xintc_get_irq,
- .restart = ppc4xx_reset_system,
- .calibrate_decr = generic_calibrate_decr,
-};
diff --git a/arch/powerpc/platforms/40x/walnut.c b/arch/powerpc/platforms/40x/walnut.c
deleted file mode 100644
index e5797815e2f1..000000000000
--- a/arch/powerpc/platforms/40x/walnut.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Architecture- / platform-specific boot-time initialization code for
- * IBM PowerPC 4xx based boards. Adapted from original
- * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
- * <dan@net4x.com>.
- *
- * Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
- *
- * Rewritten and ported to the merged powerpc tree:
- * Copyright 2007 IBM Corporation
- * Josh Boyer <jwboyer@linux.vnet.ibm.com>
- *
- * 2002 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <linux/rtc.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/time.h>
-#include <asm/uic.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc4xx.h>
-
-static const struct of_device_id walnut_of_bus[] __initconst = {
- { .compatible = "ibm,plb3", },
- { .compatible = "ibm,opb", },
- { .compatible = "ibm,ebc", },
- {},
-};
-
-static int __init walnut_device_probe(void)
-{
- of_platform_bus_probe(NULL, walnut_of_bus, NULL);
- of_instantiate_rtc();
-
- return 0;
-}
-machine_device_initcall(walnut, walnut_device_probe);
-
-static int __init walnut_probe(void)
-{
- if (!of_machine_is_compatible("ibm,walnut"))
- return 0;
-
- pci_set_flags(PCI_REASSIGN_ALL_RSRC);
-
- return 1;
-}
-
-define_machine(walnut) {
- .name = "Walnut",
- .probe = walnut_probe,
- .progress = udbg_progress,
- .init_IRQ = uic_init_tree,
- .get_irq = uic_get_irq,
- .restart = ppc4xx_reset_system,
- .calibrate_decr = generic_calibrate_decr,
-};
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 25ebe634a661..25b80cd558f8 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -5,7 +5,7 @@ config PPC_47x
select MPIC
help
This option enables support for the 47x family of processors and is
- not currently compatible with other 44x or 46x varients
+ not currently compatible with other 44x or 46x variants
config BAMBOO
bool "Bamboo"
@@ -23,7 +23,6 @@ config BLUESTONE
select APM821xx
select FORCE_PCI
select PCI_MSI
- select PPC4xx_MSI
select PPC4xx_PCI_EXPRESS
select IBM_EMAC_RGMII if IBM_EMAC
help
@@ -73,7 +72,6 @@ config KATMAI
select FORCE_PCI
select PPC4xx_PCI_EXPRESS
select PCI_MSI
- select PPC4xx_MSI
help
This option enables support for the AMCC PPC440SPe evaluation board.
@@ -115,7 +113,6 @@ config CANYONLANDS
select FORCE_PCI
select PPC4xx_PCI_EXPRESS
select PCI_MSI
- select PPC4xx_MSI
select IBM_EMAC_RGMII if IBM_EMAC
select IBM_EMAC_ZMII if IBM_EMAC
help
@@ -141,7 +138,6 @@ config REDWOOD
select FORCE_PCI
select PPC4xx_PCI_EXPRESS
select PCI_MSI
- select PPC4xx_MSI
help
This option enables support for the AMCC PPC460SX Redwood board.
@@ -167,8 +163,7 @@ config YOSEMITE
config ISS4xx
bool "ISS 4xx Simulator"
- depends on (44x || 40x)
- select 405GP if 40x
+ depends on 44x
select 440GP if 44x && !PPC_47x
select PPC_FPU
select OF_RTC
@@ -207,17 +202,10 @@ config AKEBONO
select PPC4xx_HSTA_MSI
select I2C
select I2C_IBM_IIC
- select NETDEVICES
- select ETHERNET
- select NET_VENDOR_IBM
select IBM_EMAC_EMAC4 if IBM_EMAC
select USB if USB_SUPPORT
select USB_OHCI_HCD_PLATFORM if USB_OHCI_HCD
select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
- select MMC_SDHCI
- select MMC_SDHCI_PLTFM
- select ATA
- select SATA_AHCI_PLATFORM
help
This option enables support for the IBM Akebono (476gtr) evaluation board
@@ -232,33 +220,6 @@ config ICON
help
This option enables support for the AMCC PPC440SPe evaluation board.
-config XILINX_VIRTEX440_GENERIC_BOARD
- bool "Generic Xilinx Virtex 5 FXT board support"
- depends on 44x
- select XILINX_VIRTEX_5_FXT
- select XILINX_INTC
- help
- This option enables generic support for Xilinx Virtex based boards
- that use a 440 based processor in the Virtex 5 FXT FPGA architecture.
-
- The generic virtex board support matches any device tree which
- specifies 'xlnx,virtex440' in its compatible field. This includes
- the Xilinx ML5xx reference designs using the powerpc core.
-
- Most Virtex 5 designs should use this unless it needs to do some
- special configuration at board probe time.
-
-config XILINX_ML510
- bool "Xilinx ML510 extra support"
- depends on XILINX_VIRTEX440_GENERIC_BOARD
- select HAVE_PCI
- select XILINX_PCI if PCI
- select PPC_INDIRECT_PCI if PCI
- select PPC_I8259 if PCI
- help
- This option enables extra support for features on the Xilinx ML510
- board. The ML510 has a PCI bus with ALI south bridge.
-
config PPC44x_SIMPLE
bool "Simple PowerPC 44x board support"
depends on 44x
@@ -354,13 +315,3 @@ config 476FPE_ERR46
config IBM440EP_ERR42
bool
-# Xilinx specific config options.
-config XILINX_VIRTEX
- bool
- select DEFAULT_UIMAGE
-
-# Xilinx Virtex 5 FXT FPGA architecture, selected by a Xilinx board above
-config XILINX_VIRTEX_5_FXT
- bool
- select XILINX_VIRTEX
-
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index 1b78c6af821a..5ba031f57652 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -7,8 +7,6 @@ obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o
obj-$(CONFIG_EBONY) += ebony.o
obj-$(CONFIG_SAM440EP) += sam440ep.o
obj-$(CONFIG_WARP) += warp.o
-obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
-obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
obj-$(CONFIG_ISS4xx) += iss4xx.o
obj-$(CONFIG_CANYONLANDS)+= canyonlands.o
obj-$(CONFIG_CURRITUCK) += ppc476.o
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c
index 807968a755ef..5b23aef8bdef 100644
--- a/arch/powerpc/platforms/44x/canyonlands.c
+++ b/arch/powerpc/platforms/44x/canyonlands.c
@@ -12,6 +12,7 @@
#include <asm/ppc4xx.h>
#include <asm/udbg.h>
#include <asm/uic.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/delay.h>
#include "44x.h"
diff --git a/arch/powerpc/platforms/44x/fsp2.c b/arch/powerpc/platforms/44x/fsp2.c
index b299e43f5ef9..e2e4f6d8150d 100644
--- a/arch/powerpc/platforms/44x/fsp2.c
+++ b/arch/powerpc/platforms/44x/fsp2.c
@@ -14,11 +14,11 @@
*/
#include <linux/init.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
@@ -197,7 +197,7 @@ static irqreturn_t rst_wrn_handler(int irq, void *data) {
}
}
-static void node_irq_request(const char *compat, irq_handler_t errirq_handler)
+static void __init node_irq_request(const char *compat, irq_handler_t errirq_handler)
{
struct device_node *np;
unsigned int irq;
@@ -208,6 +208,7 @@ static void node_irq_request(const char *compat, irq_handler_t errirq_handler)
if (irq == NO_IRQ) {
pr_err("device tree node %pOFn is missing a interrupt",
np);
+ of_node_put(np);
return;
}
@@ -215,12 +216,13 @@ static void node_irq_request(const char *compat, irq_handler_t errirq_handler)
if (rc) {
pr_err("fsp_of_probe: request_irq failed: np=%pOF rc=%d",
np, rc);
+ of_node_put(np);
return;
}
}
}
-static void critical_irq_setup(void)
+static void __init critical_irq_setup(void)
{
node_irq_request(FSP2_CMU_ERR, cmu_err_handler);
node_irq_request(FSP2_BUS_ERR, bus_err_handler);
diff --git a/arch/powerpc/platforms/44x/machine_check.c b/arch/powerpc/platforms/44x/machine_check.c
index 90ad6ac529d2..5d19daacd78a 100644
--- a/arch/powerpc/platforms/44x/machine_check.c
+++ b/arch/powerpc/platforms/44x/machine_check.c
@@ -7,10 +7,11 @@
#include <linux/ptrace.h>
#include <asm/reg.h>
+#include <asm/cacheflush.h>
int machine_check_440A(struct pt_regs *regs)
{
- unsigned long reason = regs->dsisr;
+ unsigned long reason = regs->esr;
printk("Machine check in kernel mode.\n");
if (reason & ESR_IMCP){
@@ -47,7 +48,7 @@ int machine_check_440A(struct pt_regs *regs)
#ifdef CONFIG_PPC_47x
int machine_check_47x(struct pt_regs *regs)
{
- unsigned long reason = regs->dsisr;
+ unsigned long reason = regs->esr;
u32 mcsr;
printk(KERN_ERR "Machine check in kernel mode.\n");
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 3dbd8ddd734a..2a0dcdf04b21 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -13,7 +13,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/ppc4xx.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/uic.h>
diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c
index cba83eee685c..7c91ac5a5241 100644
--- a/arch/powerpc/platforms/44x/ppc476.c
+++ b/arch/powerpc/platforms/44x/ppc476.c
@@ -19,11 +19,11 @@
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
@@ -86,8 +86,7 @@ static void __noreturn avr_reset_system(char *cmd)
avr_halt_system(AVR_PWRCTL_RESET);
}
-static int avr_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int avr_probe(struct i2c_client *client)
{
avr_i2c_client = client;
ppc_md.restart = avr_reset_system;
@@ -104,7 +103,7 @@ static struct i2c_driver avr_driver = {
.driver = {
.name = "akebono-avr",
},
- .probe = avr_probe,
+ .probe_new = avr_probe,
.id_table = avr_id,
};
@@ -141,6 +140,8 @@ static void __init ppc47x_init_irq(void)
ppc_md.get_irq = mpic_get_irq;
} else
panic("Unrecognized top level interrupt controller");
+
+ of_node_put(np);
}
#ifdef CONFIG_SMP
@@ -220,7 +221,7 @@ static int board_rev = -1;
static int __init ppc47x_get_board_rev(void)
{
int reg;
- u8 *fpga;
+ u8 __iomem *fpga;
struct device_node *np = NULL;
if (of_machine_is_compatible("ibm,currituck")) {
@@ -234,7 +235,7 @@ static int __init ppc47x_get_board_rev(void)
if (!np)
goto fail;
- fpga = (u8 *) of_iomap(np, 0);
+ fpga = of_iomap(np, 0);
of_node_put(np);
if (!fpga)
goto fail;
diff --git a/arch/powerpc/platforms/44x/sam440ep.c b/arch/powerpc/platforms/44x/sam440ep.c
index 68ba4b009da0..ed854b53877e 100644
--- a/arch/powerpc/platforms/44x/sam440ep.c
+++ b/arch/powerpc/platforms/44x/sam440ep.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
diff --git a/arch/powerpc/platforms/44x/virtex.c b/arch/powerpc/platforms/44x/virtex.c
deleted file mode 100644
index 3eb13ed926ee..000000000000
--- a/arch/powerpc/platforms/44x/virtex.c
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Xilinx Virtex 5FXT based board support, derived from
- * the Xilinx Virtex (IIpro & 4FX) based board support
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- * Copyright 2008 Xilinx, Inc.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/time.h>
-#include <asm/xilinx_intc.h>
-#include <asm/xilinx_pci.h>
-#include <asm/reg.h>
-#include <asm/ppc4xx.h>
-#include "44x.h"
-
-static const struct of_device_id xilinx_of_bus_ids[] __initconst = {
- { .compatible = "simple-bus", },
- { .compatible = "xlnx,plb-v46-1.00.a", },
- { .compatible = "xlnx,plb-v46-1.02.a", },
- { .compatible = "xlnx,plb-v34-1.01.a", },
- { .compatible = "xlnx,plb-v34-1.02.a", },
- { .compatible = "xlnx,opb-v20-1.10.c", },
- { .compatible = "xlnx,dcr-v29-1.00.a", },
- { .compatible = "xlnx,compound", },
- {}
-};
-
-static int __init virtex_device_probe(void)
-{
- of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(virtex, virtex_device_probe);
-
-static int __init virtex_probe(void)
-{
- if (!of_machine_is_compatible("xlnx,virtex440"))
- return 0;
-
- return 1;
-}
-
-define_machine(virtex) {
- .name = "Xilinx Virtex440",
- .probe = virtex_probe,
- .setup_arch = xilinx_pci_init,
- .init_IRQ = xilinx_intc_init_tree,
- .get_irq = xintc_get_irq,
- .calibrate_decr = generic_calibrate_decr,
- .restart = ppc4xx_reset_system,
-};
diff --git a/arch/powerpc/platforms/44x/virtex_ml510.c b/arch/powerpc/platforms/44x/virtex_ml510.c
deleted file mode 100644
index 349f218b335c..000000000000
--- a/arch/powerpc/platforms/44x/virtex_ml510.c
+++ /dev/null
@@ -1,30 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <asm/i8259.h>
-#include <linux/pci.h>
-#include "44x.h"
-
-/**
- * ml510_ail_quirk
- */
-static void ml510_ali_quirk(struct pci_dev *dev)
-{
- /* Enable the IDE controller */
- pci_write_config_byte(dev, 0x58, 0x4c);
- /* Assign irq 14 to the primary ide channel */
- pci_write_config_byte(dev, 0x44, 0x0d);
- /* Assign irq 15 to the secondary ide channel */
- pci_write_config_byte(dev, 0x75, 0x0f);
- /* Set the ide controller in native mode */
- pci_write_config_byte(dev, 0x09, 0xff);
-
- /* INTB = disabled, INTA = disabled */
- pci_write_config_byte(dev, 0x48, 0x00);
- /* INTD = disabled, INTC = disabled */
- pci_write_config_byte(dev, 0x4a, 0x00);
- /* Audio = INT7, Modem = disabled. */
- pci_write_config_byte(dev, 0x4b, 0x60);
- /* USB = INT7 */
- pci_write_config_byte(dev, 0x74, 0x06);
-}
-DECLARE_PCI_FIXUP_EARLY(0x10b9, 0x1533, ml510_ali_quirk);
-
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 6620b64e4963..f03432ef010b 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -11,12 +11,13 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
@@ -43,9 +44,6 @@ static int __init warp_probe(void)
if (!of_machine_is_compatible("pika,warp"))
return 0;
- /* For arch_dma_alloc */
- ISA_DMA_THRESHOLD = ~0L;
-
return 1;
}
diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile
index d009d2e0b9e8..2071a0abe09b 100644
--- a/arch/powerpc/platforms/4xx/Makefile
+++ b/arch/powerpc/platforms/4xx/Makefile
@@ -3,6 +3,5 @@ obj-y += uic.o machine_check.o
obj-$(CONFIG_4xx_SOC) += soc.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o
-obj-$(CONFIG_PPC4xx_MSI) += msi.o
obj-$(CONFIG_PPC4xx_CPM) += cpm.o
obj-$(CONFIG_PPC4xx_GPIO) += gpio.o
diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/4xx/cpm.c
index ae8b812c9202..182e12855c27 100644
--- a/arch/powerpc/platforms/4xx/cpm.c
+++ b/arch/powerpc/platforms/4xx/cpm.c
@@ -63,7 +63,7 @@ static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
* known as class 1, 2 and 3. For class 1 units, they are
* unconditionally put to sleep when the corresponding CPM bit is
* set. For class 2 and 3 units this is not case; if they can be
- * put to to sleep, they will. Here we do not verify, we just
+ * put to sleep, they will. Here we do not verify, we just
* set them and expect them to eventually go off when they can.
*/
value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
@@ -163,7 +163,7 @@ static ssize_t cpm_idle_store(struct kobject *kobj,
static struct kobj_attribute cpm_idle_attr =
__ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
-static void cpm_idle_config_sysfs(void)
+static void __init cpm_idle_config_sysfs(void)
{
struct device *dev;
unsigned long ret;
@@ -231,7 +231,7 @@ static const struct platform_suspend_ops cpm_suspend_ops = {
.enter = cpm_suspend_enter,
};
-static int cpm_get_uint_property(struct device_node *np,
+static int __init cpm_get_uint_property(struct device_node *np,
const char *name)
{
int len;
@@ -327,6 +327,6 @@ late_initcall(cpm_init);
static int __init cpm_powersave_off(char *arg)
{
cpm.powersave_off = 1;
- return 0;
+ return 1;
}
__setup("powersave=off", cpm_powersave_off);
diff --git a/arch/powerpc/platforms/4xx/hsta_msi.c b/arch/powerpc/platforms/4xx/hsta_msi.c
index c950fed43b32..d4f7fff1fc87 100644
--- a/arch/powerpc/platforms/4xx/hsta_msi.c
+++ b/arch/powerpc/platforms/4xx/hsta_msi.c
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/semaphore.h>
@@ -47,7 +48,7 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return -EINVAL;
}
- for_each_pci_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1);
if (irq < 0) {
pr_debug("%s: Failed to allocate msi interrupt\n",
@@ -105,10 +106,7 @@ static void hsta_teardown_msi_irqs(struct pci_dev *dev)
struct msi_desc *entry;
int irq;
- for_each_pci_msi_entry(entry, dev) {
- if (!entry->irq)
- continue;
-
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) {
irq = hsta_find_hwirq_offset(entry->irq);
/* entry->irq should always be in irq_map */
diff --git a/arch/powerpc/platforms/4xx/machine_check.c b/arch/powerpc/platforms/4xx/machine_check.c
index a71c29892a91..a905da1d6f41 100644
--- a/arch/powerpc/platforms/4xx/machine_check.c
+++ b/arch/powerpc/platforms/4xx/machine_check.c
@@ -10,7 +10,7 @@
int machine_check_4xx(struct pt_regs *regs)
{
- unsigned long reason = regs->dsisr;
+ unsigned long reason = regs->esr;
if (reason & ESR_IMCP) {
printk("Instruction");
diff --git a/arch/powerpc/platforms/4xx/msi.c b/arch/powerpc/platforms/4xx/msi.c
deleted file mode 100644
index 1051564b94f2..000000000000
--- a/arch/powerpc/platforms/4xx/msi.c
+++ /dev/null
@@ -1,281 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Adding PCI-E MSI support for PPC4XX SoCs.
- *
- * Copyright (c) 2010, Applied Micro Circuits Corporation
- * Authors: Tirumala R Marri <tmarri@apm.com>
- * Feng Kan <fkan@apm.com>
- */
-
-#include <linux/irq.h>
-#include <linux/pci.h>
-#include <linux/msi.h>
-#include <linux/of_platform.h>
-#include <linux/interrupt.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <asm/prom.h>
-#include <asm/hw_irq.h>
-#include <asm/ppc-pci.h>
-#include <asm/dcr.h>
-#include <asm/dcr-regs.h>
-#include <asm/msi_bitmap.h>
-
-#define PEIH_TERMADH 0x00
-#define PEIH_TERMADL 0x08
-#define PEIH_MSIED 0x10
-#define PEIH_MSIMK 0x18
-#define PEIH_MSIASS 0x20
-#define PEIH_FLUSH0 0x30
-#define PEIH_FLUSH1 0x38
-#define PEIH_CNTRST 0x48
-
-static int msi_irqs;
-
-struct ppc4xx_msi {
- u32 msi_addr_lo;
- u32 msi_addr_hi;
- void __iomem *msi_regs;
- int *msi_virqs;
- struct msi_bitmap bitmap;
- struct device_node *msi_dev;
-};
-
-static struct ppc4xx_msi ppc4xx_msi;
-
-static int ppc4xx_msi_init_allocator(struct platform_device *dev,
- struct ppc4xx_msi *msi_data)
-{
- int err;
-
- err = msi_bitmap_alloc(&msi_data->bitmap, msi_irqs,
- dev->dev.of_node);
- if (err)
- return err;
-
- err = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap);
- if (err < 0) {
- msi_bitmap_free(&msi_data->bitmap);
- return err;
- }
-
- return 0;
-}
-
-static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- int int_no = -ENOMEM;
- unsigned int virq;
- struct msi_msg msg;
- struct msi_desc *entry;
- struct ppc4xx_msi *msi_data = &ppc4xx_msi;
-
- dev_dbg(&dev->dev, "PCIE-MSI:%s called. vec %x type %d\n",
- __func__, nvec, type);
- if (type == PCI_CAP_ID_MSIX)
- pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
-
- msi_data->msi_virqs = kmalloc_array(msi_irqs, sizeof(int), GFP_KERNEL);
- if (!msi_data->msi_virqs)
- return -ENOMEM;
-
- for_each_pci_msi_entry(entry, dev) {
- int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
- if (int_no >= 0)
- break;
- if (int_no < 0) {
- pr_debug("%s: fail allocating msi interrupt\n",
- __func__);
- }
- virq = irq_of_parse_and_map(msi_data->msi_dev, int_no);
- if (!virq) {
- dev_err(&dev->dev, "%s: fail mapping irq\n", __func__);
- msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1);
- return -ENOSPC;
- }
- dev_dbg(&dev->dev, "%s: virq = %d\n", __func__, virq);
-
- /* Setup msi address space */
- msg.address_hi = msi_data->msi_addr_hi;
- msg.address_lo = msi_data->msi_addr_lo;
-
- irq_set_msi_desc(virq, entry);
- msg.data = int_no;
- pci_write_msi_msg(virq, &msg);
- }
- return 0;
-}
-
-void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
-{
- struct msi_desc *entry;
- struct ppc4xx_msi *msi_data = &ppc4xx_msi;
- irq_hw_number_t hwirq;
-
- dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
-
- for_each_pci_msi_entry(entry, dev) {
- if (!entry->irq)
- continue;
- hwirq = virq_to_hw(entry->irq);
- irq_set_msi_desc(entry->irq, NULL);
- irq_dispose_mapping(entry->irq);
- msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
- }
-}
-
-static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
- struct resource res, struct ppc4xx_msi *msi)
-{
- const u32 *msi_data;
- const u32 *msi_mask;
- const u32 *sdr_addr;
- dma_addr_t msi_phys;
- void *msi_virt;
- int err;
-
- sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
- if (!sdr_addr)
- return -EINVAL;
-
- msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
- if (!msi_data)
- return -EINVAL;
-
- msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
- if (!msi_mask)
- return -EINVAL;
-
- msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
- if (!msi->msi_dev)
- return -ENODEV;
-
- msi->msi_regs = of_iomap(msi->msi_dev, 0);
- if (!msi->msi_regs) {
- dev_err(&dev->dev, "of_iomap failed\n");
- err = -ENOMEM;
- goto node_put;
- }
- dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
- (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
-
- msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
- if (!msi_virt) {
- err = -ENOMEM;
- goto iounmap;
- }
- msi->msi_addr_hi = upper_32_bits(msi_phys);
- msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff);
- dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n",
- msi->msi_addr_hi, msi->msi_addr_lo);
-
- mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
- mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
-
- /* Progam the Interrupt handler Termination addr registers */
- out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
- out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);
-
- /* Program MSI Expected data and Mask bits */
- out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
- out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
-
- dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys);
-
- return 0;
-
-iounmap:
- iounmap(msi->msi_regs);
-node_put:
- of_node_put(msi->msi_dev);
- return err;
-}
-
-static int ppc4xx_of_msi_remove(struct platform_device *dev)
-{
- struct ppc4xx_msi *msi = dev->dev.platform_data;
- int i;
- int virq;
-
- for (i = 0; i < msi_irqs; i++) {
- virq = msi->msi_virqs[i];
- if (virq)
- irq_dispose_mapping(virq);
- }
-
- if (msi->bitmap.bitmap)
- msi_bitmap_free(&msi->bitmap);
- iounmap(msi->msi_regs);
- of_node_put(msi->msi_dev);
-
- return 0;
-}
-
-static int ppc4xx_msi_probe(struct platform_device *dev)
-{
- struct ppc4xx_msi *msi;
- struct resource res;
- int err = 0;
- struct pci_controller *phb;
-
- dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
-
- msi = devm_kzalloc(&dev->dev, sizeof(*msi), GFP_KERNEL);
- if (!msi)
- return -ENOMEM;
- dev->dev.platform_data = msi;
-
- /* Get MSI ranges */
- err = of_address_to_resource(dev->dev.of_node, 0, &res);
- if (err) {
- dev_err(&dev->dev, "%pOF resource error!\n", dev->dev.of_node);
- return err;
- }
-
- msi_irqs = of_irq_count(dev->dev.of_node);
- if (!msi_irqs)
- return -ENODEV;
-
- err = ppc4xx_setup_pcieh_hw(dev, res, msi);
- if (err)
- return err;
-
- err = ppc4xx_msi_init_allocator(dev, msi);
- if (err) {
- dev_err(&dev->dev, "Error allocating MSI bitmap\n");
- goto error_out;
- }
- ppc4xx_msi = *msi;
-
- list_for_each_entry(phb, &hose_list, list_node) {
- phb->controller_ops.setup_msi_irqs = ppc4xx_setup_msi_irqs;
- phb->controller_ops.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
- }
- return 0;
-
-error_out:
- ppc4xx_of_msi_remove(dev);
- return err;
-}
-static const struct of_device_id ppc4xx_msi_ids[] = {
- {
- .compatible = "amcc,ppc4xx-msi",
- },
- {}
-};
-static struct platform_driver ppc4xx_msi_driver = {
- .probe = ppc4xx_msi_probe,
- .remove = ppc4xx_of_msi_remove,
- .driver = {
- .name = "ppc4xx-msi",
- .of_match_table = ppc4xx_msi_ids,
- },
-
-};
-
-static __init int ppc4xx_msi_init(void)
-{
- return platform_driver_register(&ppc4xx_msi_driver);
-}
-
-subsys_initcall(ppc4xx_msi_init);
diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c
index e6e2adcc7b64..ca5dd7a5842a 100644
--- a/arch/powerpc/platforms/4xx/pci.c
+++ b/arch/powerpc/platforms/4xx/pci.c
@@ -22,6 +22,7 @@
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -1242,7 +1243,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
if (mbase == NULL) {
printk(KERN_ERR "%pOF: Can't map internal config space !",
port->node);
- goto done;
+ return;
}
while (attempt && (0 == (in_le32(mbase + PECFG_460SX_DLLSTA)
@@ -1252,9 +1253,7 @@ static void __init ppc460sx_pciex_check_link(struct ppc4xx_pciex_port *port)
}
if (attempt)
port->link = 1;
-done:
iounmap(mbase);
-
}
static struct ppc4xx_pciex_hwops ppc460sx_pcie_hwops __initdata = {
@@ -1275,7 +1274,7 @@ static int __init ppc405ex_pciex_core_init(struct device_node *np)
return 2;
}
-static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
+static void __init ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
{
/* Assert the PE0_PHY reset */
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/4xx/uic.c
index 36fb66ce54cf..d667ad039bd3 100644
--- a/arch/powerpc/platforms/4xx/uic.c
+++ b/arch/powerpc/platforms/4xx/uic.c
@@ -19,9 +19,10 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/dcr.h>
#define NR_UIC_INTS 32
@@ -198,7 +199,6 @@ static void uic_irq_cascade(struct irq_desc *desc)
struct uic *uic = irq_desc_get_handler_data(desc);
u32 msr;
int src;
- int subvirq;
raw_spin_lock(&desc->lock);
if (irqd_is_level_type(idata))
@@ -213,8 +213,7 @@ static void uic_irq_cascade(struct irq_desc *desc)
src = 32 - ffs(msr);
- subvirq = irq_linear_revmap(uic->irqhost, src);
- generic_handle_irq(subvirq);
+ generic_handle_domain_irq(uic->irqhost, src);
uic_irq_ret:
raw_spin_lock(&desc->lock);
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index 30342b60aa63..42abeba4f698 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -97,7 +97,7 @@ static enum soc_type {
MPC512x_SOC_MPC5125,
} soc;
-static void mpc512x_clk_determine_soc(void)
+static void __init mpc512x_clk_determine_soc(void)
{
if (of_machine_is_compatible("fsl,mpc5121")) {
soc = MPC512x_SOC_MPC5121;
@@ -113,98 +113,98 @@ static void mpc512x_clk_determine_soc(void)
}
}
-static bool soc_has_mbx(void)
+static bool __init soc_has_mbx(void)
{
if (soc == MPC512x_SOC_MPC5121)
return true;
return false;
}
-static bool soc_has_axe(void)
+static bool __init soc_has_axe(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_viu(void)
+static bool __init soc_has_viu(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_spdif(void)
+static bool __init soc_has_spdif(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_pata(void)
+static bool __init soc_has_pata(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_sata(void)
+static bool __init soc_has_sata(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_pci(void)
+static bool __init soc_has_pci(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_fec2(void)
+static bool __init soc_has_fec2(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
return false;
}
-static int soc_max_pscnum(void)
+static int __init soc_max_pscnum(void)
{
if (soc == MPC512x_SOC_MPC5125)
return 10;
return 12;
}
-static bool soc_has_sdhc2(void)
+static bool __init soc_has_sdhc2(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
return false;
}
-static bool soc_has_nfc_5125(void)
+static bool __init soc_has_nfc_5125(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
return false;
}
-static bool soc_has_outclk(void)
+static bool __init soc_has_outclk(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
return false;
}
-static bool soc_has_cpmf_0_bypass(void)
+static bool __init soc_has_cpmf_0_bypass(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
return false;
}
-static bool soc_has_mclk_mux0_canin(void)
+static bool __init soc_has_mclk_mux0_canin(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
@@ -294,7 +294,7 @@ static inline int get_bit_field(uint32_t __iomem *reg, uint8_t pos, uint8_t len)
}
/* get the SPMF and translate it into the "sys pll" multiplier */
-static int get_spmf_mult(void)
+static int __init get_spmf_mult(void)
{
static int spmf_to_mult[] = {
68, 1, 12, 16, 20, 24, 28, 32,
@@ -312,7 +312,7 @@ static int get_spmf_mult(void)
* values returned from here are a multiple of the real factor since the
* divide ratio is fractional
*/
-static int get_sys_div_x2(void)
+static int __init get_sys_div_x2(void)
{
static int sysdiv_code_to_x2[] = {
4, 5, 6, 7, 8, 9, 10, 14,
@@ -333,7 +333,7 @@ static int get_sys_div_x2(void)
* values returned from here are a multiple of the real factor since the
* multiplier ratio is fractional
*/
-static int get_cpmf_mult_x2(void)
+static int __init get_cpmf_mult_x2(void)
{
static int cpmf_to_mult_x36[] = {
/* 0b000 is "times 36" */
@@ -379,7 +379,7 @@ static const struct clk_div_table divtab_1234[] = {
{ .div = 0, },
};
-static int get_freq_from_dt(char *propname)
+static int __init get_freq_from_dt(char *propname)
{
struct device_node *np;
const unsigned int *prop;
@@ -396,7 +396,7 @@ static int get_freq_from_dt(char *propname)
return val;
}
-static void mpc512x_clk_preset_data(void)
+static void __init mpc512x_clk_preset_data(void)
{
size_t i;
@@ -418,7 +418,7 @@ static void mpc512x_clk_preset_data(void)
* SYS -> CSB -> IPS) from the REF clock rate and the returned mul/div
* values
*/
-static void mpc512x_clk_setup_ref_clock(struct device_node *np, int bus_freq,
+static void __init mpc512x_clk_setup_ref_clock(struct device_node *np, int bus_freq,
int *sys_mul, int *sys_div,
int *ips_div)
{
@@ -592,7 +592,7 @@ static struct mclk_setup_data mclk_outclk_data[] = {
};
/* setup the MCLK clock subtree of an individual PSC/MSCAN/SPDIF */
-static void mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t idx)
+static void __init mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t idx)
{
size_t clks_idx_pub, clks_idx_int;
u32 __iomem *mccr_reg; /* MCLK control register (mux, en, div) */
@@ -663,7 +663,7 @@ static void mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t idx)
* the PSC/MSCAN/SPDIF (serial drivers et al) need the MCLK
* for their bitrate
* - in the absence of "aliases" for clocks we need to create
- * individial 'struct clk' items for whatever might get
+ * individual 'struct clk' items for whatever might get
* referenced or looked up, even if several of those items are
* identical from the logical POV (their rate value)
* - for easier future maintenance and for better reflection of
@@ -701,7 +701,7 @@ static void mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t idx)
/* }}} MCLK helpers */
-static void mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
+static void __init mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
{
int sys_mul, sys_div, ips_div;
int mul, div;
@@ -937,7 +937,7 @@ static void mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
* registers the set of public clocks (those listed in the dt-bindings/
* header file) for OF lookups, keeps the intermediates private to us
*/
-static void mpc5121_clk_register_of_provider(struct device_node *np)
+static void __init mpc5121_clk_register_of_provider(struct device_node *np)
{
clk_data.clks = clks;
clk_data.clk_num = MPC512x_CLK_LAST_PUBLIC + 1; /* _not_ ARRAY_SIZE() */
@@ -948,9 +948,9 @@ static void mpc5121_clk_register_of_provider(struct device_node *np)
* temporary support for the period of time between introduction of CCF
* support and the adjustment of peripheral drivers to OF based lookups
*/
-static void mpc5121_clk_provide_migration_support(void)
+static void __init mpc5121_clk_provide_migration_support(void)
{
-
+ struct device_node *np;
/*
* pre-enable those clock items which are not yet appropriately
* acquired by their peripheral driver
@@ -970,7 +970,9 @@ static void mpc5121_clk_provide_migration_support(void)
* unused and so it gets disabled
*/
clk_prepare_enable(clks[MPC512x_CLK_PSC3_MCLK]);/* serial console */
- if (of_find_compatible_node(NULL, "pci", "fsl,mpc5121-pci"))
+ np = of_find_compatible_node(NULL, "pci", "fsl,mpc5121-pci");
+ of_node_put(np);
+ if (np)
clk_prepare_enable(clks[MPC512x_CLK_PCI]);
}
@@ -1009,7 +1011,7 @@ static void mpc5121_clk_provide_migration_support(void)
* case of not yet adjusted device tree data, where clock related specs
* are missing)
*/
-static void mpc5121_clk_provide_backwards_compat(void)
+static void __init mpc5121_clk_provide_backwards_compat(void)
{
enum did_reg_flags {
DID_REG_PSC = BIT(0),
@@ -1208,6 +1210,8 @@ int __init mpc5121_clk_init(void)
/* register as an OF clock provider */
mpc5121_clk_register_of_provider(clk_np);
+ of_node_put(clk_np);
+
/*
* unbreak not yet adjusted peripheral drivers during migration
* towards fully operational common clock support, and allow
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads.c b/arch/powerpc/platforms/512x/mpc5121_ads.c
index 6303fbfc4e4f..fc3fb999cd74 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads.c
@@ -14,7 +14,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <sysdev/fsl_pci.h>
@@ -24,21 +23,23 @@
static void __init mpc5121_ads_setup_arch(void)
{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
printk(KERN_INFO "MPC5121 ADS board from Freescale Semiconductor\n");
/*
* cpld regs are needed early
*/
mpc5121_ads_cpld_map();
+ mpc512x_setup_arch();
+}
+
+static void __init mpc5121_ads_setup_pci(void)
+{
#ifdef CONFIG_PCI
+ struct device_node *np;
+
for_each_compatible_node(np, "pci", "fsl,mpc5121-pci")
mpc83xx_add_bridge(np);
#endif
-
- mpc512x_setup_arch();
}
static void __init mpc5121_ads_init_IRQ(void)
@@ -64,6 +65,7 @@ define_machine(mpc5121_ads) {
.name = "MPC5121 ADS",
.probe = mpc5121_ads_probe,
.setup_arch = mpc5121_ads_setup_arch,
+ .discover_phbs = mpc5121_ads_setup_pci,
.init = mpc512x_init,
.init_IRQ = mpc5121_ads_init_IRQ,
.get_irq = ipic_get_irq,
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index b2981634f1f8..6f08d07aee3b 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -14,7 +14,8 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <asm/prom.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
static struct device_node *cpld_pic_node;
static struct irq_domain *cpld_pic_host;
@@ -81,11 +82,10 @@ static struct irq_chip cpld_pic = {
.irq_unmask = cpld_unmask_irq,
};
-static int
+static unsigned int
cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
u8 __iomem *maskp)
{
- int cpld_irq;
u8 status = in_8(statusp);
u8 mask = in_8(maskp);
@@ -93,28 +93,26 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp,
status |= (ignore | mask);
if (status == 0xff)
- return 0;
-
- cpld_irq = ffz(status) + offset;
+ return ~0;
- return irq_linear_revmap(cpld_pic_host, cpld_irq);
+ return ffz(status) + offset;
}
static void cpld_pic_cascade(struct irq_desc *desc)
{
- unsigned int irq;
+ unsigned int hwirq;
- irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
+ hwirq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status,
&cpld_regs->pci_mask);
- if (irq) {
- generic_handle_irq(irq);
+ if (hwirq != ~0) {
+ generic_handle_domain_irq(cpld_pic_host, hwirq);
return;
}
- irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
+ hwirq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status,
&cpld_regs->misc_mask);
- if (irq) {
- generic_handle_irq(irq);
+ if (hwirq != ~0) {
+ generic_handle_domain_irq(cpld_pic_host, hwirq);
return;
}
}
diff --git a/arch/powerpc/platforms/512x/mpc512x.h b/arch/powerpc/platforms/512x/mpc512x.h
index fff225901e2f..2f3c60e373e1 100644
--- a/arch/powerpc/platforms/512x/mpc512x.h
+++ b/arch/powerpc/platforms/512x/mpc512x.h
@@ -12,8 +12,8 @@ extern void __init mpc512x_init_early(void);
extern void __init mpc512x_init(void);
extern void __init mpc512x_setup_arch(void);
extern int __init mpc5121_clk_init(void);
-extern const char *mpc512x_select_psc_compat(void);
-extern const char *mpc512x_select_reset_compat(void);
+const char *__init mpc512x_select_psc_compat(void);
+const char *__init mpc512x_select_reset_compat(void);
extern void __noreturn mpc512x_restart(char *cmd);
#endif /* __MPC512X_H__ */
diff --git a/arch/powerpc/platforms/512x/mpc512x_generic.c b/arch/powerpc/platforms/512x/mpc512x_generic.c
index 303bc308b2e6..364564c995bd 100644
--- a/arch/powerpc/platforms/512x/mpc512x_generic.c
+++ b/arch/powerpc/platforms/512x/mpc512x_generic.c
@@ -13,7 +13,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include "mpc512x.h"
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index 7a9ae9591d60..5ac0ead2540f 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/fsl-diu-fb.h>
#include <linux/memblock.h>
@@ -20,7 +21,6 @@
#include <asm/cacheflush.h>
#include <asm/machdep.h>
#include <asm/ipic.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/mpc5121.h>
#include <asm/mpc52xx_psc.h>
@@ -289,7 +289,7 @@ static void __init mpc512x_setup_diu(void)
/*
* We do not allocate and configure new area for bitmap buffer
- * because it would requere copying bitmap data (splash image)
+ * because it would require copying bitmap data (splash image)
* and so negatively affect boot time. Instead we reserve the
* already configured frame buffer area so that it won't be
* destroyed. The starting address of the area to reserve and
@@ -352,7 +352,7 @@ static void __init mpc512x_declare_of_platform_devices(void)
#define DEFAULT_FIFO_SIZE 16
-const char *mpc512x_select_psc_compat(void)
+const char *__init mpc512x_select_psc_compat(void)
{
if (of_machine_is_compatible("fsl,mpc5121"))
return "fsl,mpc5121-psc";
@@ -363,7 +363,7 @@ const char *mpc512x_select_psc_compat(void)
return NULL;
}
-const char *mpc512x_select_reset_compat(void)
+const char *__init mpc512x_select_reset_compat(void)
{
if (of_machine_is_compatible("fsl,mpc5121"))
return "fsl,mpc5121-reset";
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 99d60acc20c8..b72ed2950ca8 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -34,7 +34,7 @@ config PPC_EFIKA
bool "bPlan Efika 5k2. MPC5200B based computer"
depends on PPC_MPC52xx
select PPC_RTAS
- select PPC_NATIVE
+ select PPC_HASH_MMU_NATIVE
config PPC_LITE5200
bool "Freescale Lite5200 Eval Board"
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 61538869e88a..e0647720ed5e 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -14,7 +14,6 @@
#include <linux/pci.h>
#include <linux/of.h>
#include <asm/dma.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
@@ -185,8 +184,6 @@ static void __init efika_setup_arch(void)
/* Map important registers from the internal memory map */
mpc52xx_map_common_devices();
- efika_pcisetup();
-
#ifdef CONFIG_PM
mpc52xx_suspend.board_suspend_prepare = efika_suspend_prepare;
mpc52xx_pm_init();
@@ -205,7 +202,6 @@ static int __init efika_probe(void)
if (strcmp(model, "EFIKA5K2"))
return 0;
- ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
@@ -219,6 +215,7 @@ define_machine(efika)
.name = EFIKA_PLATFORM_NAME,
.probe = efika_probe,
.setup_arch = efika_setup_arch,
+ .discover_phbs = efika_pcisetup,
.init = mpc52xx_declare_of_platform_devices,
.show_cpuinfo = efika_show_cpuinfo,
.init_IRQ = mpc52xx_init_irq,
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c
index 3181aac08225..7ea9b6ce0591 100644
--- a/arch/powerpc/platforms/52xx/lite5200.c
+++ b/arch/powerpc/platforms/52xx/lite5200.c
@@ -21,7 +21,6 @@
#include <asm/time.h>
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/mpc52xx.h>
/* ************************************************************************
@@ -165,8 +164,6 @@ static void __init lite5200_setup_arch(void)
mpc52xx_suspend.board_resume_finish = lite5200_resume_finish;
lite5200_pm_init();
#endif
-
- mpc52xx_setup_pci();
}
static const char * const board[] __initconst = {
@@ -187,6 +184,7 @@ define_machine(lite5200) {
.name = "lite5200",
.probe = lite5200_probe,
.setup_arch = lite5200_setup_arch,
+ .discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
.init_IRQ = mpc52xx_init_irq,
.get_irq = mpc52xx_get_irq,
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
index e7da22d1df87..129313b1d021 100644
--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/suspend.h>
+#include <linux/of_address.h>
+
#include <asm/io.h>
#include <asm/time.h>
#include <asm/mpc52xx.h>
diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
index 3a9969c429b3..afee8b1515a8 100644
--- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
+++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
@@ -56,7 +56,7 @@ lite5200_low_power:
/*
* save stuff BDI overwrites
* 0xf0 (0xe0->0x100 gets overwritten when BDI connected;
- * even when CONFIG_BDI* is disabled and MMU XLAT commented; heisenbug?))
+ * even when CONFIG_BDI_SWITCH is disabled and MMU XLAT commented; heisenbug?))
* WARNING: self-refresh doesn't seem to work when BDI2000 is connected,
* possibly because BDI sets SDRAM registers before wakeup code does
*/
@@ -181,7 +181,7 @@ sram_code:
udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
mullw r12, r12, r11
mftb r13 /* start */
- addi r12, r13, r12 /* end */
+ add r12, r13, r12 /* end */
1:
mftb r13 /* current */
cmp cr0, r13, r12
@@ -248,6 +248,7 @@ mmu_on:
blr
+_ASM_NOKPROBE_SYMBOL(lite5200_wakeup)
/* ---------------------------------------------------------------------- */
@@ -391,6 +392,7 @@ restore_regs:
LOAD_SPRN(TBWU, 0x5b);
blr
+_ASM_NOKPROBE_SYMBOL(restore_regs)
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 07c5bc4ed0b5..33a35fff11b5 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -20,8 +20,9 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/mpc52xx.h>
@@ -78,7 +79,7 @@ static struct irq_chip media5200_irq_chip = {
static void media5200_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
- int sub_virq, val;
+ int val;
u32 status, enable;
/* Mask off the cascaded IRQ */
@@ -92,11 +93,10 @@ static void media5200_irq_cascade(struct irq_desc *desc)
enable = in_be32(media5200_irq.regs + MEDIA5200_IRQ_STATUS);
val = ffs((status & enable) >> MEDIA5200_IRQ_SHIFT);
if (val) {
- sub_virq = irq_linear_revmap(media5200_irq.irqhost, val - 1);
- /* pr_debug("%s: virq=%i s=%.8x e=%.8x hwirq=%i subvirq=%i\n",
- * __func__, virq, status, enable, val - 1, sub_virq);
+ generic_handle_domain_irq(media5200_irq.irqhost, val - 1);
+ /* pr_debug("%s: virq=%i s=%.8x e=%.8x hwirq=%i\n",
+ * __func__, virq, status, enable, val - 1);
*/
- generic_handle_irq(sub_virq);
}
/* Processing done; can reenable the cascade now */
@@ -174,6 +174,8 @@ static void __init media5200_init_irq(void)
goto out;
pr_debug("%s: allocated irqhost\n", __func__);
+ of_node_put(fpga_np);
+
irq_set_handler_data(cascade_virq, &media5200_irq);
irq_set_chained_handler(cascade_virq, media5200_irq_cascade);
@@ -181,6 +183,7 @@ static void __init media5200_init_irq(void)
out:
pr_err("Could not find Media5200 FPGA; PCI interrupts will not work\n");
+ of_node_put(fpga_np);
}
/*
@@ -202,8 +205,6 @@ static void __init media5200_setup_arch(void)
/* Some mpc5200 & mpc5200b related configuration */
mpc5200_setup_xlb_arbiter();
- mpc52xx_setup_pci();
-
np = of_find_matching_node(NULL, mpc5200_gpio_ids);
gpio = of_iomap(np, 0);
of_node_put(np);
@@ -244,6 +245,7 @@ define_machine(media5200_platform) {
.name = "media5200-platform",
.probe = media5200_probe,
.setup_arch = media5200_setup_arch,
+ .discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
.init_IRQ = media5200_init_irq,
.get_irq = mpc52xx_get_irq,
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index 2d01e9b2e779..cc349d579061 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -22,8 +22,8 @@
*/
#undef DEBUG
+#include <linux/of.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/mpc52xx.h>
@@ -40,8 +40,6 @@ static void __init mpc5200_simple_setup_arch(void)
/* Some mpc5200 & mpc5200b related configuration */
mpc5200_setup_xlb_arbiter();
-
- mpc52xx_setup_pci();
}
/* list of the supported boards */
@@ -73,6 +71,7 @@ define_machine(mpc5200_simple_platform) {
.name = "mpc5200-simple-platform",
.probe = mpc5200_simple_probe,
.setup_arch = mpc5200_simple_setup_arch,
+ .discover_phbs = mpc52xx_setup_pci,
.init = mpc52xx_declare_of_platform_devices,
.init_IRQ = mpc52xx_init_irq,
.get_irq = mpc52xx_get_irq,
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 565e3a83dc9e..409c0ec06265 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -15,11 +15,11 @@
#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/export.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/mpc52xx.h>
/* MPC5200 device tree match tables */
@@ -204,43 +204,6 @@ int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv)
EXPORT_SYMBOL(mpc52xx_set_psc_clkdiv);
/**
- * mpc52xx_get_xtal_freq - Get SYS_XTAL_IN frequency for a device
- *
- * @node: device node
- *
- * Returns the frequency of the external oscillator clock connected
- * to the SYS_XTAL_IN pin, or 0 if it cannot be determined.
- */
-unsigned int mpc52xx_get_xtal_freq(struct device_node *node)
-{
- u32 val;
- unsigned int freq;
-
- if (!mpc52xx_cdm)
- return 0;
-
- freq = mpc5xxx_get_bus_frequency(node);
- if (!freq)
- return 0;
-
- if (in_8(&mpc52xx_cdm->ipb_clk_sel) & 0x1)
- freq *= 2;
-
- val = in_be32(&mpc52xx_cdm->rstcfg);
- if (val & (1 << 5))
- freq *= 8;
- else
- freq *= 4;
- if (val & (1 << 6))
- freq /= 12;
- else
- freq /= 16;
-
- return freq;
-}
-EXPORT_SYMBOL(mpc52xx_get_xtal_freq);
-
-/**
* mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer
*/
void __noreturn mpc52xx_restart(char *cmd)
@@ -308,7 +271,7 @@ int mpc5200_psc_ac97_gpio_reset(int psc_number)
spin_lock_irqsave(&gpio_lock, flags);
- /* Reconfiure pin-muxing to gpio */
+ /* Reconfigure pin-muxing to gpio */
mux = in_be32(&simple_gpio->port_config);
out_be32(&simple_gpio->port_config, mux & (~gpio));
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index 8c0d324f657e..e43e08d991ea 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -5,7 +5,7 @@
* Copyright (c) 2009 Secret Lab Technologies Ltd.
* Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
- * This file is a driver for the the General Purpose Timer (gpt) devices
+ * This file is a driver for the General Purpose Timer (gpt) devices
* found on the MPC5200 SoC. Each timer has an IO pin which can be used
* for GPIO or can be used to raise interrupts. The timer function can
* be used independently from the IO pin, or it can be used to control
@@ -55,9 +55,12 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/kernel.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/watchdog.h>
@@ -190,14 +193,11 @@ static struct irq_chip mpc52xx_gpt_irq_chip = {
static void mpc52xx_gpt_irq_cascade(struct irq_desc *desc)
{
struct mpc52xx_gpt_priv *gpt = irq_desc_get_handler_data(desc);
- int sub_virq;
u32 status;
status = in_be32(&gpt->regs->status) & MPC52xx_GPT_STATUS_IRQMASK;
- if (status) {
- sub_virq = irq_linear_revmap(gpt->irqhost, 0);
- generic_handle_irq(sub_virq);
- }
+ if (status)
+ generic_handle_domain_irq(gpt->irqhost, 0);
}
static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq,
@@ -317,17 +317,15 @@ mpc52xx_gpt_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
return 0;
}
-static void
-mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
+static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt)
{
int rc;
- /* Only setup GPIO if the device tree claims the GPT is
- * a GPIO controller */
- if (!of_find_property(node, "gpio-controller", NULL))
+ /* Only setup GPIO if the device claims the GPT is a GPIO controller */
+ if (!device_property_present(gpt->dev, "gpio-controller"))
return;
- gpt->gc.label = kasprintf(GFP_KERNEL, "%pOF", node);
+ gpt->gc.label = kasprintf(GFP_KERNEL, "%pfw", dev_fwnode(gpt->dev));
if (!gpt->gc.label) {
dev_err(gpt->dev, "out of memory\n");
return;
@@ -339,7 +337,7 @@ mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
gpt->gc.get = mpc52xx_gpt_gpio_get;
gpt->gc.set = mpc52xx_gpt_gpio_set;
gpt->gc.base = -1;
- gpt->gc.of_node = node;
+ gpt->gc.parent = gpt->dev;
/* Setup external pin in GPIO mode */
clrsetbits_be32(&gpt->regs->mode, MPC52xx_GPT_MODE_MS_MASK,
@@ -352,8 +350,7 @@ mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
dev_dbg(gpt->dev, "%s() complete.\n", __func__);
}
#else /* defined(CONFIG_GPIOLIB) */
-static void
-mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *p, struct device_node *np) { }
+static void mpc52xx_gpt_gpio_setup(struct mpc52xx_gpt_priv *gpt) { }
#endif /* defined(CONFIG_GPIOLIB) */
/***********************************************************************
@@ -401,7 +398,7 @@ static int mpc52xx_gpt_do_start(struct mpc52xx_gpt_priv *gpt, u64 period,
set |= MPC52xx_GPT_MODE_CONTINUOUS;
/* Determine the number of clocks in the requested period. 64 bit
- * arithmatic is done here to preserve the precision until the value
+ * arithmetic is done here to preserve the precision until the value
* is scaled back down into the u32 range. Period is in 'ns', bus
* frequency is in Hz. */
clocks = period * (u64)gpt->ipb_freq;
@@ -505,7 +502,7 @@ u64 mpc52xx_gpt_timer_period(struct mpc52xx_gpt_priv *gpt)
if (prescale == 0)
prescale = 0x10000;
period = period * prescale * 1000000000ULL;
- do_div(period, (u64)gpt->ipb_freq);
+ do_div(period, gpt->ipb_freq);
return period;
}
EXPORT_SYMBOL(mpc52xx_gpt_timer_period);
@@ -582,6 +579,7 @@ static long mpc52xx_wdt_ioctl(struct file *file, unsigned int cmd,
if (ret)
break;
/* fall through and return the timeout */
+ fallthrough;
case WDIOC_GETTIMEOUT:
/* we need to round here as to avoid e.g. the following
@@ -722,14 +720,14 @@ static int mpc52xx_gpt_probe(struct platform_device *ofdev)
raw_spin_lock_init(&gpt->lock);
gpt->dev = &ofdev->dev;
- gpt->ipb_freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node);
+ gpt->ipb_freq = mpc5xxx_get_bus_frequency(&ofdev->dev);
gpt->regs = of_iomap(ofdev->dev.of_node, 0);
if (!gpt->regs)
return -ENOMEM;
dev_set_drvdata(&ofdev->dev, gpt);
- mpc52xx_gpt_gpio_setup(gpt, ofdev->dev.of_node);
+ mpc52xx_gpt_gpio_setup(gpt);
mpc52xx_gpt_irq_setup(gpt, ofdev->dev.of_node);
mutex_lock(&mpc52xx_gpt_list_mutex);
@@ -755,11 +753,6 @@ static int mpc52xx_gpt_probe(struct platform_device *ofdev)
return 0;
}
-static int mpc52xx_gpt_remove(struct platform_device *ofdev)
-{
- return -EBUSY;
-}
-
static const struct of_device_id mpc52xx_gpt_match[] = {
{ .compatible = "fsl,mpc5200-gpt", },
@@ -772,10 +765,10 @@ static const struct of_device_id mpc52xx_gpt_match[] = {
static struct platform_driver mpc52xx_gpt_driver = {
.driver = {
.name = "mpc52xx-gpt",
+ .suppress_bind_attrs = true,
.of_match_table = mpc52xx_gpt_match,
},
.probe = mpc52xx_gpt_probe,
- .remove = mpc52xx_gpt_remove,
};
static int __init mpc52xx_gpt_init(void)
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index 05e19470d523..48038aaedbd3 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -11,11 +11,12 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/mpc52xx.h>
#include <asm/time.h>
@@ -104,7 +105,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
*
* Configure the watermarks so DMA will always complete correctly.
* It may be worth experimenting with the ALARM value to see if
- * there is a performance impacit. However, if it is wrong there
+ * there is a performance impact. However, if it is wrong there
* is a risk of DMA not transferring the last chunk of data
*/
if (write) {
@@ -229,7 +230,7 @@ static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
int dma, write, poll_dma;
spin_lock_irqsave(&lpbfifo.lock, flags);
- ts = get_tbl();
+ ts = mftb();
req = lpbfifo.req;
if (!req) {
@@ -307,7 +308,7 @@ static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
if (irq != 0) /* don't increment on polled case */
req->irq_count++;
- req->irq_ticks += get_tbl() - ts;
+ req->irq_ticks += mftb() - ts;
spin_unlock_irqrestore(&lpbfifo.lock, flags);
/* Spinlock is released; it is now safe to call the callback */
@@ -330,7 +331,7 @@ static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
u32 ts;
spin_lock_irqsave(&lpbfifo.lock, flags);
- ts = get_tbl();
+ ts = mftb();
req = lpbfifo.req;
if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
@@ -361,7 +362,7 @@ static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
lpbfifo.req = NULL;
/* Release the lock before calling out to the callback. */
- req->irq_ticks += get_tbl() - ts;
+ req->irq_ticks += mftb() - ts;
spin_unlock_irqrestore(&lpbfifo.lock, flags);
if (req->callback)
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pci.c b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
index af0f79995214..859e2818c43d 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pci.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pci.c
@@ -13,6 +13,7 @@
#undef DEBUG
#include <linux/pci.h>
+#include <linux/of_address.h>
#include <asm/mpc52xx.h>
#include <asm/delay.h>
#include <asm/machdep.h>
@@ -242,7 +243,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
u32 tmp;
int iwcr0 = 0, iwcr1 = 0, iwcr2 = 0;
- pr_debug("mpc52xx_pci_setup(hose=%p, pci_regs=%p)\n", hose, pci_regs);
+ pr_debug("%s(hose=%p, pci_regs=%p)\n", __func__, hose, pci_regs);
/* pci_process_bridge_OF_ranges() found all our addresses for us;
* now store them in the right places */
@@ -257,11 +258,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
/* Memory windows */
res = &hose->mem_resources[0];
if (res->flags) {
- pr_debug("mem_resource[0] = "
- "{.start=%llx, .end=%llx, .flags=%llx}\n",
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned long long)res->flags);
+ pr_debug("mem_resource[0] = %pr\n", res);
out_be32(&pci_regs->iw0btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
resource_size(res)));
@@ -274,8 +271,7 @@ mpc52xx_pci_setup(struct pci_controller *hose,
res = &hose->mem_resources[1];
if (res->flags) {
- pr_debug("mem_resource[1] = {.start=%x, .end=%x, .flags=%lx}\n",
- res->start, res->end, res->flags);
+ pr_debug("mem_resource[1] = %pr\n", res);
out_be32(&pci_regs->iw1btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(res->start, res->start,
resource_size(res)));
@@ -292,11 +288,8 @@ mpc52xx_pci_setup(struct pci_controller *hose,
printk(KERN_ERR "%s: Didn't find IO resources\n", __FILE__);
return;
}
- pr_debug(".io_resource={.start=%llx,.end=%llx,.flags=%llx} "
- ".io_base_phys=0x%p\n",
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- (unsigned long long)res->flags, (void*)hose->io_base_phys);
+ pr_debug(".io_resource = %pr .io_base_phys=0x%pa\n",
+ res, &hose->io_base_phys);
out_be32(&pci_regs->iw2btar,
MPC52xx_PCI_IWBTAR_TRANSLATION(hose->io_base_phys,
res->start,
@@ -336,8 +329,7 @@ mpc52xx_pci_fixup_resources(struct pci_dev *dev)
{
int i;
- pr_debug("mpc52xx_pci_fixup_resources() %.4x:%.4x\n",
- dev->vendor, dev->device);
+ pr_debug("%s() %.4x:%.4x\n", __func__, dev->vendor, dev->device);
/* We don't rely on boot loader for PCI and resets all
devices */
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index fc98912f42cf..1e0a5e9644dc 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -101,8 +101,9 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/mpc52xx.h>
/* HW IRQ mapping */
@@ -340,7 +341,7 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
{
int l1irq;
int l2irq;
- struct irq_chip *uninitialized_var(irqchip);
+ struct irq_chip *irqchip;
void *hndlr;
int type;
u32 reg;
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
index b1d208ded981..549b3629e39a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
@@ -2,6 +2,8 @@
#include <linux/init.h>
#include <linux/suspend.h>
#include <linux/io.h>
+#include <linux/of_address.h>
+
#include <asm/time.h>
#include <asm/cacheflush.h>
#include <asm/mpc52xx.h>
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c
index 369ebb1b7af1..28e627f8a320 100644
--- a/arch/powerpc/platforms/82xx/ep8248e.c
+++ b/arch/powerpc/platforms/82xx/ep8248e.c
@@ -20,7 +20,6 @@
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/mpc8260.h>
-#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/cpm2_pic.h>
diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c
index 745ed61df5d8..1c8bbf4251d9 100644
--- a/arch/powerpc/platforms/82xx/km82xx.c
+++ b/arch/powerpc/platforms/82xx/km82xx.c
@@ -20,7 +20,6 @@
#include <asm/machdep.h>
#include <linux/time.h>
#include <asm/mpc8260.h>
-#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/cpm2_pic.h>
diff --git a/arch/powerpc/platforms/82xx/mpc8272_ads.c b/arch/powerpc/platforms/82xx/mpc8272_ads.c
index 3fe1a6593280..0b5b9dec16d5 100644
--- a/arch/powerpc/platforms/82xx/mpc8272_ads.c
+++ b/arch/powerpc/platforms/82xx/mpc8272_ads.c
@@ -171,7 +171,6 @@ static void __init mpc8272_ads_setup_arch(void)
iounmap(bcsr);
init_ioports();
- pq2_init_pci();
if (ppc_md.progress)
ppc_md.progress("mpc8272_ads_setup_arch(), finish", 0);
@@ -205,6 +204,7 @@ define_machine(mpc8272_ads)
.name = "Freescale MPC8272 ADS",
.probe = mpc8272_ads_probe,
.setup_arch = mpc8272_ads_setup_arch,
+ .discover_phbs = pq2_init_pci,
.init_IRQ = mpc8272_ads_pic_init,
.get_irq = cpm2_get_irq,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/82xx/pq2.c b/arch/powerpc/platforms/82xx/pq2.c
index 1cdd5ed9d896..3b5cb39a564c 100644
--- a/arch/powerpc/platforms/82xx/pq2.c
+++ b/arch/powerpc/platforms/82xx/pq2.c
@@ -10,6 +10,8 @@
* Copyright (c) 2006 MontaVista Software, Inc.
*/
+#include <linux/kprobes.h>
+
#include <asm/cpm2.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
@@ -29,6 +31,7 @@ void __noreturn pq2_restart(char *cmd)
panic("Restart failed\n");
}
+NOKPROBE_SYMBOL(pq2_restart)
#ifdef CONFIG_PCI
static int pq2_pci_exclude_device(struct pci_controller *hose,
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 096cc0d59fd8..cf3210042a2e 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -14,9 +14,9 @@
#include <linux/irq.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/cpm2.h>
#include "pq2.h"
@@ -91,10 +91,8 @@ static void pq2ads_pci_irq_demux(struct irq_desc *desc)
break;
for (bit = 0; pend != 0; ++bit, pend <<= 1) {
- if (pend & 0x80000000) {
- int virq = irq_linear_revmap(priv->host, bit);
- generic_handle_irq(virq);
- }
+ if (pend & 0x80000000)
+ generic_handle_domain_irq(priv->host, bit);
}
}
}
@@ -123,20 +121,17 @@ int __init pq2ads_pci_init_irq(void)
np = of_find_compatible_node(NULL, NULL, "fsl,pq2ads-pci-pic");
if (!np) {
printk(KERN_ERR "No pci pic node in device tree.\n");
- of_node_put(np);
goto out;
}
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
printk(KERN_ERR "No interrupt in pci pic node.\n");
- of_node_put(np);
- goto out;
+ goto out_put_node;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- of_node_put(np);
ret = -ENOMEM;
goto out_unmap_irq;
}
@@ -161,17 +156,17 @@ int __init pq2ads_pci_init_irq(void)
priv->host = host;
irq_set_handler_data(irq, priv);
irq_set_chained_handler(irq, pq2ads_pci_irq_demux);
-
- of_node_put(np);
- return 0;
+ ret = 0;
+ goto out_put_node;
out_unmap_regs:
iounmap(priv->regs);
out_free_kmalloc:
kfree(priv);
- of_node_put(np);
out_unmap_irq:
irq_dispose_mapping(irq);
+out_put_node:
+ of_node_put(np);
out:
return ret;
}
diff --git a/arch/powerpc/platforms/82xx/pq2fads.c b/arch/powerpc/platforms/82xx/pq2fads.c
index a74082140718..ac9113d524af 100644
--- a/arch/powerpc/platforms/82xx/pq2fads.c
+++ b/arch/powerpc/platforms/82xx/pq2fads.c
@@ -150,8 +150,6 @@ static void __init pq2fads_setup_arch(void)
/* Enable external IRQs */
clrbits32(&cpm2_immr->im_siu_conf.siu_82xx.sc_siumcr, 0x0c000000);
- pq2_init_pci();
-
if (ppc_md.progress)
ppc_md.progress("pq2fads_setup_arch(), finish", 0);
}
@@ -184,6 +182,7 @@ define_machine(pq2fads)
.name = "Freescale PQ2FADS",
.probe = pq2fads_probe,
.setup_arch = pq2fads_setup_arch,
+ .discover_phbs = pq2_init_pci,
.init_IRQ = pq2fads_pic_init,
.get_irq = cpm2_get_irq,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/83xx/asp834x.c b/arch/powerpc/platforms/83xx/asp834x.c
index 28474876f41b..68061c2a57c1 100644
--- a/arch/powerpc/platforms/83xx/asp834x.c
+++ b/arch/powerpc/platforms/83xx/asp834x.c
@@ -44,6 +44,7 @@ define_machine(asp834x) {
.name = "ASP8347E",
.probe = asp834x_probe,
.setup_arch = asp834x_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index ada42f03915a..907acdecc94a 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -29,7 +29,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
@@ -39,7 +38,7 @@
#define SVR_REV(svr) (((svr) >> 0) & 0xFFFF) /* Revision field */
-static void quirk_mpc8360e_qe_enet10(void)
+static void __init quirk_mpc8360e_qe_enet10(void)
{
/*
* handle mpc8360E Erratum QE_ENET10:
@@ -53,17 +52,19 @@ static void quirk_mpc8360e_qe_enet10(void)
np_par = of_find_node_by_name(NULL, "par_io");
if (np_par == NULL) {
- pr_warn("%s couldn;t find par_io node\n", __func__);
+ pr_warn("%s couldn't find par_io node\n", __func__);
return;
}
/* Map Parallel I/O ports registers */
ret = of_address_to_resource(np_par, 0, &res);
if (ret) {
- pr_warn("%s couldn;t map par_io registers\n", __func__);
- return;
+ pr_warn("%s couldn't map par_io registers\n", __func__);
+ goto out;
}
base = ioremap(res.start, resource_size(&res));
+ if (!base)
+ goto out;
/*
* set output delay adjustments to default values according
@@ -111,6 +112,7 @@ static void quirk_mpc8360e_qe_enet10(void)
setbits32((base + 0xac), 0x0000c000);
}
iounmap(base);
+out:
of_node_put(np_par);
}
@@ -177,6 +179,7 @@ define_machine(mpc83xx_km) {
.name = "mpc83xx-km-platform",
.probe = mpc83xx_km_probe,
.setup_arch = mpc83xx_km_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 0967bdfb1691..77ed61306a73 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -8,17 +8,16 @@
*/
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/i2c.h>
#include <linux/gpio/driver.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/kthread.h>
+#include <linux/property.h>
#include <linux/reboot.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
/*
@@ -116,33 +115,28 @@ static int mcu_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static int mcu_gpiochip_add(struct mcu *mcu)
{
- struct device_node *np;
+ struct device *dev = &mcu->client->dev;
struct gpio_chip *gc = &mcu->gc;
- np = of_find_compatible_node(NULL, NULL, "fsl,mcu-mpc8349emitx");
- if (!np)
- return -ENODEV;
-
gc->owner = THIS_MODULE;
- gc->label = kasprintf(GFP_KERNEL, "%pOF", np);
+ gc->label = kasprintf(GFP_KERNEL, "%pfw", dev_fwnode(dev));
gc->can_sleep = 1;
gc->ngpio = MCU_NUM_GPIO;
gc->base = -1;
gc->set = mcu_gpio_set;
gc->direction_output = mcu_gpio_dir_out;
- gc->of_node = np;
+ gc->parent = dev;
return gpiochip_add_data(gc, mcu);
}
-static int mcu_gpiochip_remove(struct mcu *mcu)
+static void mcu_gpiochip_remove(struct mcu *mcu)
{
kfree(mcu->gc.label);
gpiochip_remove(&mcu->gc);
- return 0;
}
-static int mcu_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int mcu_probe(struct i2c_client *client)
{
struct mcu *mcu;
int ret;
@@ -184,10 +178,9 @@ err:
return ret;
}
-static int mcu_remove(struct i2c_client *client)
+static void mcu_remove(struct i2c_client *client)
{
struct mcu *mcu = i2c_get_clientdata(client);
- int ret;
kthread_stop(shutdown_thread);
@@ -198,11 +191,8 @@ static int mcu_remove(struct i2c_client *client)
glob_mcu = NULL;
}
- ret = mcu_gpiochip_remove(mcu);
- if (ret)
- return ret;
+ mcu_gpiochip_remove(mcu);
kfree(mcu);
- return 0;
}
static const struct i2c_device_id mcu_ids[] = {
@@ -221,7 +211,7 @@ static struct i2c_driver mcu_driver = {
.name = "mcu-mpc8349emitx",
.of_match_table = mcu_of_match_table,
},
- .probe = mcu_probe,
+ .probe_new = mcu_probe,
.remove = mcu_remove,
.id_table = mcu_ids,
};
diff --git a/arch/powerpc/platforms/83xx/misc.c b/arch/powerpc/platforms/83xx/misc.c
index a952e91db3ee..2fb2a85d131f 100644
--- a/arch/powerpc/platforms/83xx/misc.c
+++ b/arch/powerpc/platforms/83xx/misc.c
@@ -121,19 +121,15 @@ void __init mpc83xx_setup_pci(void)
void __init mpc83xx_setup_arch(void)
{
+ phys_addr_t immrbase = get_immrbase();
+ int immrsize = IS_ALIGNED(immrbase, SZ_2M) ? SZ_2M : SZ_1M;
+ unsigned long va = fix_to_virt(FIX_IMMR_BASE);
+
if (ppc_md.progress)
ppc_md.progress("mpc83xx_setup_arch()", 0);
- if (!__map_without_bats) {
- phys_addr_t immrbase = get_immrbase();
- int immrsize = IS_ALIGNED(immrbase, SZ_2M) ? SZ_2M : SZ_1M;
- unsigned long va = fix_to_virt(FIX_IMMR_BASE);
-
- setbat(-1, va, immrbase, immrsize, PAGE_KERNEL_NCG);
- update_bats();
- }
-
- mpc83xx_setup_pci();
+ setbat(-1, va, immrbase, immrsize, PAGE_KERNEL_NCG);
+ update_bats();
}
int machine_check_83xx(struct pt_regs *regs)
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
index 51426e88ec67..956d4389effa 100644
--- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c
@@ -48,6 +48,7 @@ define_machine(mpc830x_rdb) {
.name = "MPC830x RDB",
.probe = mpc830x_rdb_probe,
.setup_arch = mpc830x_rdb_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index 5ccd57a48492..3b578f080e3b 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -48,6 +48,7 @@ define_machine(mpc831x_rdb) {
.name = "MPC831x RDB",
.probe = mpc831x_rdb_probe,
.setup_arch = mpc831x_rdb_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc832x_mds.c b/arch/powerpc/platforms/83xx/mpc832x_mds.c
index 6fa5402ebf20..435344405d2c 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_mds.c
@@ -28,7 +28,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
@@ -101,6 +100,7 @@ define_machine(mpc832x_mds) {
.name = "MPC832x MDS",
.probe = mpc832x_sys_probe,
.setup_arch = mpc832x_sys_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc832x_rdb.c b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
index 622c625d5ce4..e12cb44e717f 100644
--- a/arch/powerpc/platforms/83xx/mpc832x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc832x_rdb.c
@@ -15,6 +15,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/mmc_spi.h>
#include <linux/mmc/host.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/fsl_devices.h>
@@ -161,6 +162,8 @@ static struct spi_board_info mpc832x_spi_boardinfo = {
static int __init mpc832x_spi_init(void)
{
+ struct device_node *np;
+
par_io_config_pin(3, 0, 3, 0, 1, 0); /* SPI1 MOSI, I/O */
par_io_config_pin(3, 1, 3, 0, 1, 0); /* SPI1 MISO, I/O */
par_io_config_pin(3, 2, 3, 0, 1, 0); /* SPI1 CLK, I/O */
@@ -174,7 +177,9 @@ static int __init mpc832x_spi_init(void)
* Don't bother with legacy stuff when device tree contains
* mmc-spi-slot node.
*/
- if (of_find_compatible_node(NULL, NULL, "mmc-spi-slot"))
+ np = of_find_compatible_node(NULL, NULL, "mmc-spi-slot");
+ of_node_put(np);
+ if (np)
return 0;
return fsl_spi_init(&mpc832x_spi_boardinfo, 1, mpc83xx_spi_cs_control);
}
@@ -219,6 +224,7 @@ define_machine(mpc832x_rdb) {
.name = "MPC832x RDB",
.probe = mpc832x_rdb_probe,
.setup_arch = mpc832x_rdb_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc834x_itx.c b/arch/powerpc/platforms/83xx/mpc834x_itx.c
index ebfd139bca20..6a110f275304 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_itx.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_itx.c
@@ -27,7 +27,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
@@ -70,6 +69,7 @@ define_machine(mpc834x_itx) {
.name = "MPC834x ITX",
.probe = mpc834x_itx_probe,
.setup_arch = mpc834x_itx_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index 356228e35279..7dde5a75332b 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/root_dev.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/atomic.h>
@@ -27,7 +28,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
@@ -35,7 +35,7 @@
#include "mpc83xx.h"
#define BCSR5_INT_USB 0x02
-static int mpc834xemds_usb_cfg(void)
+static int __init mpc834xemds_usb_cfg(void)
{
struct device_node *np;
void __iomem *bcsr_regs = NULL;
@@ -91,6 +91,7 @@ define_machine(mpc834x_mds) {
.name = "MPC834x MDS",
.probe = mpc834x_mds_probe,
.setup_arch = mpc834x_mds_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc836x_mds.c b/arch/powerpc/platforms/83xx/mpc836x_mds.c
index 90d9cbfae659..b1e6665be5d3 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_mds.c
@@ -35,7 +35,6 @@
#include <asm/machdep.h>
#include <asm/ipic.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
@@ -201,6 +200,7 @@ define_machine(mpc836x_mds) {
.name = "MPC836x MDS",
.probe = mpc836x_mds_probe,
.setup_arch = mpc836x_mds_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc836x_rdk.c b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
index b4aac2cde849..731bc5ce726d 100644
--- a/arch/powerpc/platforms/83xx/mpc836x_rdk.c
+++ b/arch/powerpc/platforms/83xx/mpc836x_rdk.c
@@ -12,7 +12,6 @@
#include <linux/pci.h>
#include <linux/of_platform.h>
#include <linux/io.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/ipic.h>
#include <asm/udbg.h>
@@ -41,6 +40,7 @@ define_machine(mpc836x_rdk) {
.name = "MPC836x RDK",
.probe = mpc836x_rdk_probe,
.setup_arch = mpc836x_rdk_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index 9d3721c965be..fa3538803af7 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -9,12 +9,12 @@
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/ipic.h>
#include <asm/udbg.h>
-#include <asm/prom.h>
#include <sysdev/fsl_pci.h>
#include "mpc83xx.h"
@@ -23,7 +23,7 @@
#define BCSR12_USB_SER_PIN 0x80
#define BCSR12_USB_SER_DEVICE 0x02
-static int mpc837xmds_usb_cfg(void)
+static int __init mpc837xmds_usb_cfg(void)
{
struct device_node *np;
const void *phy_type, *mode;
@@ -93,6 +93,7 @@ define_machine(mpc837x_mds) {
.name = "MPC837x MDS",
.probe = mpc837x_mds_probe,
.setup_arch = mpc837x_mds_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 7c45f7ac2607..5d48c6842098 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -18,7 +18,7 @@
#include "mpc83xx.h"
-static void mpc837x_rdb_sd_cfg(void)
+static void __init mpc837x_rdb_sd_cfg(void)
{
void __iomem *im;
@@ -73,6 +73,7 @@ define_machine(mpc837x_rdb) {
.name = "MPC837x RDB/WLAN",
.probe = mpc837x_rdb_probe,
.setup_arch = mpc837x_rdb_setup_arch,
+ .discover_phbs = mpc83xx_setup_pci,
.init_IRQ = mpc83xx_ipic_init_IRQ,
.get_irq = ipic_get_irq,
.restart = mpc83xx_restart,
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index f37d04332fc7..aea803ba3a15 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -68,15 +68,15 @@
extern void __noreturn mpc83xx_restart(char *cmd);
extern long mpc83xx_time_init(void);
-extern int mpc837x_usb_cfg(void);
-extern int mpc834x_usb_cfg(void);
-extern int mpc831x_usb_cfg(void);
+int __init mpc837x_usb_cfg(void);
+int __init mpc834x_usb_cfg(void);
+int __init mpc831x_usb_cfg(void);
extern void mpc83xx_ipic_init_IRQ(void);
#ifdef CONFIG_PCI
extern void mpc83xx_setup_pci(void);
#else
-#define mpc83xx_setup_pci() do {} while (0)
+#define mpc83xx_setup_pci NULL
#endif
extern int mpc83xx_declare_of_platform_devices(void);
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
index 3acd7470dc5e..bc6bd4d0ae96 100644
--- a/arch/powerpc/platforms/83xx/suspend-asm.S
+++ b/arch/powerpc/platforms/83xx/suspend-asm.S
@@ -548,3 +548,4 @@ mpc83xx_deep_resume:
mtdec r0
rfi
+_ASM_NOKPROBE_SYMBOL(mpc83xx_deep_resume)
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
index bb147d34d4a6..3fa8979ac8a6 100644
--- a/arch/powerpc/platforms/83xx/suspend.c
+++ b/arch/powerpc/platforms/83xx/suspend.c
@@ -100,7 +100,6 @@ struct pmc_type {
int has_deep_sleep;
};
-static struct platform_device *pmc_dev;
static int has_deep_sleep, deep_sleeping;
static int pmc_irq;
static struct mpc83xx_pmc __iomem *pmc_regs;
@@ -319,27 +318,43 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
.end = mpc83xx_suspend_end,
};
-static const struct of_device_id pmc_match[];
+static struct pmc_type pmc_types[] = {
+ {
+ .has_deep_sleep = 1,
+ },
+ {
+ .has_deep_sleep = 0,
+ }
+};
+
+static const struct of_device_id pmc_match[] = {
+ {
+ .compatible = "fsl,mpc8313-pmc",
+ .data = &pmc_types[0],
+ },
+ {
+ .compatible = "fsl,mpc8349-pmc",
+ .data = &pmc_types[1],
+ },
+ {}
+};
+
static int pmc_probe(struct platform_device *ofdev)
{
- const struct of_device_id *match;
struct device_node *np = ofdev->dev.of_node;
struct resource res;
const struct pmc_type *type;
int ret = 0;
- match = of_match_device(pmc_match, &ofdev->dev);
- if (!match)
+ type = of_device_get_match_data(&ofdev->dev);
+ if (!type)
return -EINVAL;
- type = match->data;
-
if (!of_device_is_available(np))
return -ENODEV;
has_deep_sleep = type->has_deep_sleep;
immrbase = get_immrbase();
- pmc_dev = ofdev;
is_pci_agent = mpc83xx_is_pci_agent();
if (is_pci_agent < 0)
@@ -404,39 +419,13 @@ out:
return ret;
}
-static int pmc_remove(struct platform_device *ofdev)
-{
- return -EPERM;
-};
-
-static struct pmc_type pmc_types[] = {
- {
- .has_deep_sleep = 1,
- },
- {
- .has_deep_sleep = 0,
- }
-};
-
-static const struct of_device_id pmc_match[] = {
- {
- .compatible = "fsl,mpc8313-pmc",
- .data = &pmc_types[0],
- },
- {
- .compatible = "fsl,mpc8349-pmc",
- .data = &pmc_types[1],
- },
- {}
-};
-
static struct platform_driver pmc_driver = {
.driver = {
.name = "mpc83xx-pmc",
.of_match_table = pmc_match,
+ .suppress_bind_attrs = true,
},
.probe = pmc_probe,
- .remove = pmc_remove
};
builtin_platform_driver(pmc_driver);
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index 3d247d726ed5..e2a13a052f96 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -11,16 +11,16 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#include "mpc83xx.h"
#ifdef CONFIG_PPC_MPC834x
-int mpc834x_usb_cfg(void)
+int __init mpc834x_usb_cfg(void)
{
unsigned long sccr, sicrl, sicrh;
void __iomem *immap;
@@ -96,7 +96,7 @@ int mpc834x_usb_cfg(void)
#endif /* CONFIG_PPC_MPC834x */
#ifdef CONFIG_PPC_MPC831x
-int mpc831x_usb_cfg(void)
+int __init mpc831x_usb_cfg(void)
{
u32 temp;
void __iomem *immap, *usb_regs;
@@ -209,7 +209,7 @@ out:
#endif /* CONFIG_PPC_MPC831x */
#ifdef CONFIG_PPC_MPC837x
-int mpc837x_usb_cfg(void)
+int __init mpc837x_usb_cfg(void)
{
void __iomem *immap;
struct device_node *np = NULL;
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index fa3d29dcb57e..b92cb2b4d54d 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig FSL_SOC_BOOKE
bool "Freescale Book-E Machine Type"
- depends on PPC_85xx || PPC_BOOK3E
+ depends on PPC_E500
select FSL_SOC
select PPC_UDBG_16550
select MPIC
@@ -16,15 +16,6 @@ if FSL_SOC_BOOKE
if PPC32
-config FSL_85XX_CACHE_SRAM
- bool
- select PPC_LIB_RHEAP
- help
- When selected, this option enables cache-sram support
- for memory allocation on P1/P2 QorIQ platforms.
- cache-sram-size and cache-sram-offset kernel boot
- parameters should be passed when this option is enabled.
-
config BSC9131_RDB
bool "Freescale BSC9131RDB"
select DEFAULT_UIMAGE
@@ -71,13 +62,13 @@ config MPC85xx_CDS
This option enables support for the MPC85xx CDS board
config MPC85xx_MDS
- bool "Freescale MPC85xx MDS"
+ bool "Freescale MPC8568 MDS / MPC8569 MDS / P1021 MDS"
select DEFAULT_UIMAGE
select PHYLIB if NETDEVICES
select HAVE_RAPIDIO
select SWIOTLB
help
- This option enables support for the MPC85xx MDS board
+ This option enables support for the MPC8568 MDS, MPC8569 MDS and P1021 MDS boards
config MPC8536_DS
bool "Freescale MPC8536 DS"
@@ -87,28 +78,30 @@ config MPC8536_DS
This option enables support for the MPC8536 DS board
config MPC85xx_DS
- bool "Freescale MPC85xx DS"
+ bool "Freescale MPC8544 DS / MPC8572 DS / P2020 DS"
select PPC_I8259
select DEFAULT_UIMAGE
select FSL_ULI1575 if PCI
select SWIOTLB
help
- This option enables support for the MPC85xx DS (MPC8544 DS) board
+ This option enables support for the MPC8544 DS, MPC8572 DS and P2020 DS boards
config MPC85xx_RDB
- bool "Freescale MPC85xx RDB"
+ bool "Freescale P102x MBG/UTM/RDB and P2020 RDB"
select PPC_I8259
select DEFAULT_UIMAGE
select FSL_ULI1575 if PCI
select SWIOTLB
help
- This option enables support for the MPC85xx RDB (P2020 RDB) board
+ This option enables support for the P1020 MBG PC, P1020 UTM PC,
+ P1020 RDB PC, P1020 RDB PD, P1020 RDB, P1021 RDB PC, P1024 RDB,
+ P1025 RDB, P2020 RDB and P2020 RDB PC boards
config P1010_RDB
- bool "Freescale P1010RDB"
+ bool "Freescale P1010 RDB"
select DEFAULT_UIMAGE
help
- This option enables support for the MPC85xx RDB (P1010 RDB) board
+ This option enables support for the P1010 RDB board
P1010RDB contains P1010Si, which provides CPU performance up to 800
MHz and 1600 DMIPS, additional functionality and faster interfaces
@@ -160,7 +153,7 @@ config XES_MPC85xx
computers from Extreme Engineering Solutions (X-ES) based on
Freescale MPC85xx processors.
Manufacturer: Extreme Engineering Solutions, Inc.
- URL: <http://www.xes-inc.com/>
+ URL: <https://www.xes-inc.com/>
config STX_GP3
bool "Silicon Turnkey Express GP3"
@@ -208,12 +201,6 @@ config TQM8560
select TQM85xx
select CPM2
-config SBC8548
- bool "Wind River SBC8548"
- select DEFAULT_UIMAGE
- help
- This option enables support for the Wind River SBC8548 board
-
config PPA8548
bool "Prodrive PPA8548"
help
@@ -254,8 +241,6 @@ endif # PPC32
config PPC_QEMU_E500
bool "QEMU generic e500 platform"
select DEFAULT_UIMAGE
- select E500
- select PPC_E500MC if PPC64
help
This option enables support for running as a QEMU guest using
QEMU's generic e500 machine. This is not required if you're
@@ -271,7 +256,6 @@ config PPC_QEMU_E500
config CORENET_GENERIC
bool "Freescale CoreNet Generic"
select DEFAULT_UIMAGE
- select E500
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index d1dd0dca5ebf..260fbad7967b 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -3,7 +3,9 @@
# Makefile for the PowerPC 85xx linux kernel.
#
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_FSL_PMC) += mpc85xx_pm_ops.o
+ifneq ($(CONFIG_FSL_CORENET_RCPM),y)
+obj-$(CONFIG_SMP) += mpc85xx_pm_ops.o
+endif
obj-y += common.o
@@ -26,7 +28,6 @@ obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o
obj-$(CONFIG_FB_FSL_DIU) += t1042rdb_diu.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o
-obj-$(CONFIG_SBC8548) += sbc8548.o
obj-$(CONFIG_PPA8548) += ppa8548.o
obj-$(CONFIG_SOCRATES) += socrates.o socrates_fpga_pic.o
obj-$(CONFIG_KSI8560) += ksi8560.o
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 8d9a2503dd0f..58a398c89e97 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -19,7 +19,7 @@
#include "mpc85xx.h"
-void __init c293_pcie_pic_init(void)
+static void __init c293_pcie_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC ");
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 27ac38f7e1a9..2c539de2d629 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -12,14 +12,13 @@
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/pgtable.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/pgtable.h>
#include <asm/ppc-pci.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/ehv_pic.h>
@@ -37,7 +36,7 @@ void __init corenet_gen_pic_init(void)
unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU |
MPIC_NO_RESET;
- if (ppc_md.get_irq == mpic_get_coreint_irq)
+ if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) && !IS_ENABLED(CONFIG_KEXEC_CORE))
flags |= MPIC_ENABLE_COREINT;
mpic = mpic_alloc(NULL, 0, flags, 0, 512, " OpenPIC ");
@@ -106,6 +105,7 @@ int __init corenet_gen_publish_devices(void)
{
return of_platform_bus_probe(NULL, of_device_ids, NULL);
}
+machine_arch_initcall(corenet_generic, corenet_gen_publish_devices);
static const char * const boards[] __initconst = {
"fsl,P2041RDB",
@@ -200,11 +200,5 @@ define_machine(corenet_generic) {
#endif
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
-#ifdef CONFIG_PPC64
- .power_save = book3e_idle,
-#else
.power_save = e500_idle,
-#endif
};
-
-machine_arch_initcall(corenet_generic, corenet_gen_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index 83a0f7a1f0de..e3e8f18825a1 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -17,13 +17,13 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
@@ -78,7 +78,7 @@ void __init ge_imp3a_pic_init(void)
of_node_put(cascade_node);
}
-static void ge_imp3a_pci_assign_primary(void)
+static void __init ge_imp3a_pci_assign_primary(void)
{
#ifdef CONFIG_PCI
struct device_node *np;
@@ -89,8 +89,10 @@ static void ge_imp3a_pci_assign_primary(void)
of_device_is_compatible(np, "fsl,mpc8548-pcie") ||
of_device_is_compatible(np, "fsl,p2020-pcie")) {
of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == 0x9000)
- fsl_pci_primary = np;
+ if ((rsrc.start & 0xfffff) == 0x9000) {
+ of_node_put(fsl_pci_primary);
+ fsl_pci_primary = of_node_get(np);
+ }
}
}
#endif
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index 6ef8580fdc0e..a22f02b0fc77 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -26,7 +26,6 @@
#include <asm/mpic.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
-#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
@@ -134,6 +133,8 @@ static void __init ksi8560_setup_arch(void)
else
printk(KERN_ERR "Can't find CPLD in device tree\n");
+ of_node_put(cpld);
+
if (ppc_md.progress)
ppc_md.progress("ksi8560_setup_arch()", 0);
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 53bccb8bbcbe..e5d7386ad612 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -18,7 +18,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 6b1436abe9b1..0b8f2101c5fb 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -21,9 +21,11 @@
#include <linux/initrd.h>
#include <linux/interrupt.h>
#include <linux/fsl_devices.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <linux/atomic.h>
#include <asm/time.h>
@@ -33,7 +35,6 @@
#include <asm/pci-bridge.h>
#include <asm/irq.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/i8259.h>
@@ -151,13 +152,14 @@ static void __init mpc85xx_cds_pci_irq_fixup(struct pci_dev *dev)
*/
case PCI_DEVICE_ID_VIA_82C586_2:
/* There are two USB controllers.
- * Identify them by functon number
+ * Identify them by function number
*/
if (PCI_FUNC(dev->devfn) == 3)
dev->irq = 11;
else
dev->irq = 10;
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ break;
default:
break;
}
@@ -218,12 +220,6 @@ static irqreturn_t mpc85xx_8259_cascade_action(int irq, void *dev_id)
{
return IRQ_HANDLED;
}
-
-static struct irqaction mpc85xxcds_8259_irqaction = {
- .handler = mpc85xx_8259_cascade_action,
- .flags = IRQF_SHARED | IRQF_NO_THREAD,
- .name = "8259 cascade",
-};
#endif /* PPC_I8259 */
#endif /* CONFIG_PCI */
@@ -271,7 +267,10 @@ static int mpc85xx_cds_8259_attach(void)
* disabled when the last user of the shared IRQ line frees their
* interrupt.
*/
- if ((ret = setup_irq(cascade_irq, &mpc85xxcds_8259_irqaction))) {
+ ret = request_irq(cascade_irq, mpc85xx_8259_cascade_action,
+ IRQF_SHARED | IRQF_NO_THREAD, "8259 cascade",
+ cascade_node);
+ if (ret) {
printk(KERN_ERR "Failed to setup cascade interrupt\n");
return ret;
}
@@ -285,7 +284,7 @@ machine_device_initcall(mpc85xx_cds, mpc85xx_cds_8259_attach);
#endif /* CONFIG_PPC_I8259 */
-static void mpc85xx_cds_pci_assign_primary(void)
+static void __init mpc85xx_cds_pci_assign_primary(void)
{
#ifdef CONFIG_PCI
struct device_node *np;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 2157a8017aa4..f8d2c97f39bd 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -15,13 +15,13 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/i8259.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 7759eca7d535..3a2ac410af18 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -39,7 +39,6 @@
#include <asm/pci-bridge.h>
#include <asm/irq.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
index 7c0133f558d0..f7ac92a8ae97 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
@@ -15,8 +15,11 @@
#include <asm/io.h>
#include <asm/fsl_pm.h>
+#include "smp.h"
+
static struct ccsr_guts __iomem *guts;
+#ifdef CONFIG_FSL_PMC
static void mpc85xx_irq_mask(int cpu)
{
@@ -49,6 +52,7 @@ static void mpc85xx_cpu_up_prepare(int cpu)
{
}
+#endif
static void mpc85xx_freeze_time_base(bool freeze)
{
@@ -76,10 +80,12 @@ static const struct of_device_id mpc85xx_smp_guts_ids[] = {
static const struct fsl_pm_ops mpc85xx_pm_ops = {
.freeze_time_base = mpc85xx_freeze_time_base,
+#ifdef CONFIG_FSL_PMC
.irq_mask = mpc85xx_irq_mask,
.irq_unmask = mpc85xx_irq_unmask,
.cpu_die = mpc85xx_cpu_die,
.cpu_up_prepare = mpc85xx_cpu_up_prepare,
+#endif
};
int __init mpc85xx_setup_pmc(void)
@@ -94,9 +100,8 @@ int __init mpc85xx_setup_pmc(void)
pr_err("Could not map guts node address\n");
return -ENOMEM;
}
+ qoriq_pm_ops = &mpc85xx_pm_ops;
}
- qoriq_pm_ops = &mpc85xx_pm_ops;
-
return 0;
}
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 80a80174768c..d99aba158235 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -19,7 +19,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <soc/fsl/qe/qe.h>
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index 24855284b14a..8ba9306a96b6 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -16,7 +16,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index 1f1af0557470..537599906146 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -18,6 +18,7 @@
#include <linux/fsl/guts.h>
#include <linux/pci.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/div64.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c
index fd9e3e7ef234..bc58a99164c9 100644
--- a/arch/powerpc/platforms/85xx/p1022_rdk.c
+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c
@@ -14,6 +14,7 @@
#include <linux/fsl/guts.h>
#include <linux/pci.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/div64.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/85xx/p1023_rdb.c b/arch/powerpc/platforms/85xx/p1023_rdb.c
index 3b9cc4979641..c04868eb2eb1 100644
--- a/arch/powerpc/platforms/85xx/p1023_rdb.c
+++ b/arch/powerpc/platforms/85xx/p1023_rdb.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/fsl_devices.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
@@ -22,7 +23,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include "smp.h"
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
index a43b8c30157c..1639e222cc33 100644
--- a/arch/powerpc/platforms/85xx/qemu_e500.c
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -12,9 +12,10 @@
*/
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/time.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
@@ -67,4 +68,5 @@ define_machine(qemu_e500) {
.get_irq = mpic_get_coreint_irq,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
+ .power_save = e500_idle,
};
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
deleted file mode 100644
index dd97ef277276..000000000000
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Wind River SBC8548 setup and early boot code.
- *
- * Copyright 2007 Wind River Systems Inc.
- *
- * By Paul Gortmaker (see MAINTAINERS for contact information)
- *
- * Based largely on the MPC8548CDS support - Copyright 2005 Freescale Inc.
- */
-
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/reboot.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/major.h>
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/seq_file.h>
-#include <linux/initrd.h>
-#include <linux/interrupt.h>
-#include <linux/fsl_devices.h>
-#include <linux/of_platform.h>
-
-#include <asm/pgtable.h>
-#include <asm/page.h>
-#include <linux/atomic.h>
-#include <asm/time.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-#include <asm/ipic.h>
-#include <asm/pci-bridge.h>
-#include <asm/irq.h>
-#include <mm/mmu_decl.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-
-#include "mpc85xx.h"
-
-static int sbc_rev;
-
-static void __init sbc8548_pic_init(void)
-{
- struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
- 0, 256, " OpenPIC ");
- BUG_ON(mpic == NULL);
- mpic_init(mpic);
-}
-
-/* Extract the HW Rev from the EPLD on the board */
-static int __init sbc8548_hw_rev(void)
-{
- struct device_node *np;
- struct resource res;
- unsigned int *rev;
- int board_rev = 0;
-
- np = of_find_compatible_node(NULL, NULL, "hw-rev");
- if (np == NULL) {
- printk("No HW-REV found in DTB.\n");
- return -ENODEV;
- }
-
- of_address_to_resource(np, 0, &res);
- of_node_put(np);
-
- rev = ioremap(res.start,sizeof(unsigned int));
- board_rev = (*rev) >> 28;
- iounmap(rev);
-
- return board_rev;
-}
-
-/*
- * Setup the architecture
- */
-static void __init sbc8548_setup_arch(void)
-{
- if (ppc_md.progress)
- ppc_md.progress("sbc8548_setup_arch()", 0);
-
- fsl_pci_assign_primary();
-
- sbc_rev = sbc8548_hw_rev();
-}
-
-static void sbc8548_show_cpuinfo(struct seq_file *m)
-{
- uint pvid, svid, phid1;
-
- pvid = mfspr(SPRN_PVR);
- svid = mfspr(SPRN_SVR);
-
- seq_printf(m, "Vendor\t\t: Wind River\n");
- seq_printf(m, "Machine\t\t: SBC8548 v%d\n", sbc_rev);
- seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
- seq_printf(m, "SVR\t\t: 0x%x\n", svid);
-
- /* Display cpu Pll setting */
- phid1 = mfspr(SPRN_HID1);
- seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-}
-
-machine_arch_initcall(sbc8548, mpc85xx_common_publish_devices);
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init sbc8548_probe(void)
-{
- return of_machine_is_compatible("SBC8548");
-}
-
-define_machine(sbc8548) {
- .name = "SBC8548",
- .probe = sbc8548_probe,
- .setup_arch = sbc8548_setup_arch,
- .init_IRQ = sbc8548_pic_init,
- .show_cpuinfo = sbc8548_show_cpuinfo,
- .get_irq = mpic_get_irq,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
- .pcibios_fixup_phb = fsl_pcibios_fixup_phb,
-#endif
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
-};
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 98ae64075193..e14d1b74d4e4 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -71,6 +71,7 @@ static int gpio_halt_probe(struct platform_device *pdev)
{
enum of_gpio_flags flags;
struct device_node *node = pdev->dev.of_node;
+ struct device_node *child_node;
int gpio, err, irq;
int trigger;
@@ -78,26 +79,29 @@ static int gpio_halt_probe(struct platform_device *pdev)
return -ENODEV;
/* If there's no matching child, this isn't really an error */
- halt_node = of_find_matching_node(node, child_match);
- if (!halt_node)
+ child_node = of_find_matching_node(node, child_match);
+ if (!child_node)
return 0;
/* Technically we could just read the first one, but punish
* DT writers for invalid form. */
- if (of_gpio_count(halt_node) != 1)
- return -EINVAL;
+ if (of_gpio_count(child_node) != 1) {
+ err = -EINVAL;
+ goto err_put;
+ }
/* Get the gpio number relative to the dynamic base. */
- gpio = of_get_gpio_flags(halt_node, 0, &flags);
- if (!gpio_is_valid(gpio))
- return -EINVAL;
+ gpio = of_get_gpio_flags(child_node, 0, &flags);
+ if (!gpio_is_valid(gpio)) {
+ err = -EINVAL;
+ goto err_put;
+ }
err = gpio_request(gpio, "gpio-halt");
if (err) {
printk(KERN_ERR "gpio-halt: error requesting GPIO %d.\n",
gpio);
- halt_node = NULL;
- return err;
+ goto err_put;
}
trigger = (flags == OF_GPIO_ACTIVE_LOW);
@@ -105,15 +109,14 @@ static int gpio_halt_probe(struct platform_device *pdev)
gpio_direction_output(gpio, !trigger);
/* Now get the IRQ which tells us when the power button is hit */
- irq = irq_of_parse_and_map(halt_node, 0);
+ irq = irq_of_parse_and_map(child_node, 0);
err = request_irq(irq, gpio_halt_irq, IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING, "gpio-halt", halt_node);
+ IRQF_TRIGGER_FALLING, "gpio-halt", child_node);
if (err) {
printk(KERN_ERR "gpio-halt: error requesting IRQ %d for "
"GPIO %d.\n", irq, gpio);
gpio_free(gpio);
- halt_node = NULL;
- return err;
+ goto err_put;
}
/* Register our halt function */
@@ -123,7 +126,12 @@ static int gpio_halt_probe(struct platform_device *pdev)
printk(KERN_INFO "gpio-halt: registered GPIO %d (%d trigger, %d"
" irq).\n", gpio, trigger, irq);
+ halt_node = child_node;
return 0;
+
+err_put:
+ of_node_put(child_node);
+ return err;
}
static int gpio_halt_remove(struct platform_device *pdev)
@@ -139,6 +147,7 @@ static int gpio_halt_remove(struct platform_device *pdev)
gpio_free(gpio);
+ of_node_put(halt_node);
halt_node = NULL;
}
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 48f7d96ae37d..9c43cf32f4c9 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -16,9 +16,9 @@
#include <linux/highmem.h>
#include <linux/cpu.h>
#include <linux/fsl/guts.h>
+#include <linux/pgtable.h>
#include <asm/machdep.h>
-#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mpic.h>
#include <asm/cacheflush.h>
@@ -40,7 +40,6 @@ struct epapr_spin_table {
u32 pir;
};
-#ifdef CONFIG_HOTPLUG_CPU
static u64 timebase;
static int tb_req;
static int tb_valid;
@@ -112,7 +111,8 @@ static void mpc85xx_take_timebase(void)
local_irq_restore(flags);
}
-static void smp_85xx_mach_cpu_die(void)
+#ifdef CONFIG_HOTPLUG_CPU
+static void smp_85xx_cpu_offline_self(void)
{
unsigned int cpu = smp_processor_id();
@@ -208,7 +208,7 @@ static int smp_85xx_start_cpu(int cpu)
* The bootpage and highmem can be accessed via ioremap(), but
* we need to directly access the spinloop if its in lowmem.
*/
- ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
+ ioremappable = *cpu_rel_addr > virt_to_phys(high_memory - 1);
/* Map the spin table */
if (ioremappable)
@@ -220,7 +220,7 @@ static int smp_85xx_start_cpu(int cpu)
local_irq_save(flags);
hard_irq_disable();
- if (qoriq_pm_ops)
+ if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
qoriq_pm_ops->cpu_up_prepare(cpu);
/* if cpu is not spinning, reset it */
@@ -292,7 +292,7 @@ static int smp_85xx_kick_cpu(int nr)
booting_thread_hwid = cpu_thread_in_core(nr);
primary = cpu_first_thread_sibling(nr);
- if (qoriq_pm_ops)
+ if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare)
qoriq_pm_ops->cpu_up_prepare(nr);
/*
@@ -366,7 +366,7 @@ struct smp_ops_t smp_85xx_ops = {
#ifdef CONFIG_PPC32
atomic_t kexec_down_cpus = ATOMIC_INIT(0);
-void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
{
local_irq_disable();
@@ -384,7 +384,7 @@ static void mpc85xx_smp_kexec_down(void *arg)
ppc_md.kexec_cpu_down(0,1);
}
#else
-void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
{
int cpu = smp_processor_id();
int sibling = cpu_last_thread_sibling(cpu);
@@ -495,21 +495,21 @@ void __init mpc85xx_smp_init(void)
smp_85xx_ops.probe = NULL;
}
-#ifdef CONFIG_HOTPLUG_CPU
#ifdef CONFIG_FSL_CORENET_RCPM
+ /* Assign a value to qoriq_pm_ops on PPC_E500MC */
fsl_rcpm_init();
-#endif
-
-#ifdef CONFIG_FSL_PMC
+#else
+ /* Assign a value to qoriq_pm_ops on !PPC_E500MC */
mpc85xx_setup_pmc();
#endif
if (qoriq_pm_ops) {
smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
- ppc_md.cpu_die = smp_85xx_mach_cpu_die;
+#ifdef CONFIG_HOTPLUG_CPU
+ smp_85xx_ops.cpu_offline_self = smp_85xx_cpu_offline_self;
smp_85xx_ops.cpu_die = qoriq_cpu_kill;
- }
#endif
+ }
smp_ops = &smp_85xx_ops;
#ifdef CONFIG_KEXEC_CORE
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index 166b3515ba73..09f64470c765 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -29,7 +29,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 199a137c0ddb..3768c86b9629 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -271,7 +271,7 @@ static const struct irq_domain_ops socrates_fpga_pic_host_ops = {
.xlate = socrates_fpga_pic_host_xlate,
};
-void socrates_fpga_pic_init(struct device_node *pic)
+void __init socrates_fpga_pic_init(struct device_node *pic)
{
unsigned long flags;
int i;
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.h b/arch/powerpc/platforms/85xx/socrates_fpga_pic.h
index c592b8bc94dd..c50b23794a06 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.h
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.h
@@ -6,6 +6,6 @@
#ifndef SOCRATES_FPGA_PIC_H
#define SOCRATES_FPGA_PIC_H
-void socrates_fpga_pic_init(struct device_node *pic);
+void __init socrates_fpga_pic_init(struct device_node *pic);
#endif
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index 69e917e3ba1c..6b1fe7bb3a8c 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -28,7 +28,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 95a1a1118a31..d187f4b8bff6 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -26,7 +26,6 @@
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index d54e1ae56997..5836e4ecb7a0 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -16,13 +16,13 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
@@ -45,7 +45,7 @@ void __init xes_mpc85xx_pic_init(void)
mpic_init(mpic);
}
-static void xes_mpc85xx_configure_l2(void __iomem *l2_base)
+static void __init xes_mpc85xx_configure_l2(void __iomem *l2_base)
{
volatile uint32_t ctl, tmp;
@@ -72,7 +72,7 @@ static void xes_mpc85xx_configure_l2(void __iomem *l2_base)
asm volatile("msync; isync");
}
-static void xes_mpc85xx_fixups(void)
+static void __init xes_mpc85xx_fixups(void)
{
struct device_node *np;
int err;
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 07a9d60c618a..be867abebc83 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -20,12 +20,6 @@ config MPC8641_HPCN
help
This option enables support for the MPC8641 HPCN board.
-config SBC8641D
- bool "Wind River SBC8641D"
- select DEFAULT_UIMAGE
- help
- This option enables support for the WRS SBC8641D board.
-
config MPC8610_HPCD
bool "Freescale MPC8610 HPCD"
select DEFAULT_UIMAGE
@@ -74,7 +68,7 @@ config MPC8641
select FSL_PCI if PCI
select PPC_UDBG_16550
select MPIC
- default y if MPC8641_HPCN || SBC8641D || GEF_SBC610 || GEF_SBC310 || GEF_PPC9A \
+ default y if MPC8641_HPCN || GEF_SBC610 || GEF_SBC310 || GEF_PPC9A \
|| MVME7100
config MPC8610
diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile
index 2c04449be107..5bbe1475bf26 100644
--- a/arch/powerpc/platforms/86xx/Makefile
+++ b/arch/powerpc/platforms/86xx/Makefile
@@ -6,7 +6,6 @@
obj-y := pic.o common.o
obj-$(CONFIG_SMP) += mpc86xx_smp.o
obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o
-obj-$(CONFIG_SBC8641D) += sbc8641d.o
obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o
obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o
obj-$(CONFIG_GEF_SBC310) += gef_sbc310.o
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 44bbbc535e1d..8e358fa0bc41 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -18,12 +18,12 @@
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
@@ -180,7 +180,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*
* This function is called to determine whether the BSP is compatible with the
* supplied device-tree, which is assumed to be the correct one for the actual
- * board. It is expected thati, in the future, a kernel may support multiple
+ * board. It is expected that, in the future, a kernel may support multiple
* boards.
*/
static int __init gef_ppc9a_probe(void)
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 46d6d3d4957a..b5b2733567cb 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -18,12 +18,12 @@
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
@@ -167,7 +167,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*
* This function is called to determine whether the BSP is compatible with the
* supplied device-tree, which is assumed to be the correct one for the actual
- * board. It is expected thati, in the future, a kernel may support multiple
+ * board. It is expected that, in the future, a kernel may support multiple
* boards.
*/
static int __init gef_sbc310_probe(void)
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index acf2c6c3c1eb..bb4c8e6b44d0 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -18,12 +18,12 @@
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
@@ -157,7 +157,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_USB,
*
* This function is called to determine whether the BSP is compatible with the
* supplied device-tree, which is assumed to be the correct one for the actual
- * board. It is expected thati, in the future, a kernel may support multiple
+ * board. It is expected that, in the future, a kernel may support multiple
* boards.
*/
static int __init gef_sbc610_probe(void)
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 7733d0607da2..b593b9afd30a 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -20,12 +20,13 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/fsl/guts.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index b697918b727d..5294394c9c07 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -19,7 +19,6 @@
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
#include <asm/swiotlb.h>
@@ -95,12 +94,6 @@ static int __init mpc86xx_hpcn_probe(void)
if (of_machine_is_compatible("fsl,mpc8641hpcn"))
return 1; /* Looks good */
- /* Be nice and don't give silent boot death. Delete this in 2.6.27 */
- if (of_machine_is_compatible("mpc86xx")) {
- pr_warn("WARNING: your dts/dtb is old. You must update before the next kernel release.\n");
- return 1;
- }
-
return 0;
}
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
index 5b91ea5694e3..8a7e55acf090 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c
@@ -10,13 +10,14 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/pgtable.h>
#include <asm/code-patching.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
#include <asm/cacheflush.h>
+#include <asm/inst.h>
#include <sysdev/fsl_soc.h>
@@ -82,7 +83,7 @@ smp_86xx_kick_cpu(int nr)
mdelay(1);
/* Restore the exception vector */
- patch_instruction(vector, save_vector);
+ patch_instruction(vector, ppc_inst(save_vector));
local_irq_restore(flags);
diff --git a/arch/powerpc/platforms/86xx/mvme7100.c b/arch/powerpc/platforms/86xx/mvme7100.c
index ee983613570c..b2cc32a32d0b 100644
--- a/arch/powerpc/platforms/86xx/mvme7100.c
+++ b/arch/powerpc/platforms/86xx/mvme7100.c
@@ -19,6 +19,7 @@
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/86xx/sbc8641d.c b/arch/powerpc/platforms/86xx/sbc8641d.c
deleted file mode 100644
index dc23dd383d6e..000000000000
--- a/arch/powerpc/platforms/86xx/sbc8641d.c
+++ /dev/null
@@ -1,87 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * SBC8641D board specific routines
- *
- * Copyright 2008 Wind River Systems Inc.
- *
- * By Paul Gortmaker (see MAINTAINERS for contact information)
- *
- * Based largely on the 8641 HPCN support by Freescale Semiconductor Inc.
- */
-
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/delay.h>
-#include <linux/seq_file.h>
-#include <linux/of_platform.h>
-
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <asm/prom.h>
-#include <mm/mmu_decl.h>
-#include <asm/udbg.h>
-
-#include <asm/mpic.h>
-
-#include <sysdev/fsl_pci.h>
-#include <sysdev/fsl_soc.h>
-
-#include "mpc86xx.h"
-
-static void __init
-sbc8641_setup_arch(void)
-{
- if (ppc_md.progress)
- ppc_md.progress("sbc8641_setup_arch()", 0);
-
- printk("SBC8641 board from Wind River\n");
-
-#ifdef CONFIG_SMP
- mpc86xx_smp_init();
-#endif
-
- fsl_pci_assign_primary();
-}
-
-
-static void
-sbc8641_show_cpuinfo(struct seq_file *m)
-{
- uint svid = mfspr(SPRN_SVR);
-
- seq_printf(m, "Vendor\t\t: Wind River Systems\n");
-
- seq_printf(m, "SVR\t\t: 0x%x\n", svid);
-}
-
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init sbc8641_probe(void)
-{
- if (of_machine_is_compatible("wind,sbc8641"))
- return 1; /* Looks good */
-
- return 0;
-}
-
-machine_arch_initcall(sbc8641, mpc86xx_common_publish_devices);
-
-define_machine(sbc8641) {
- .name = "SBC8641D",
- .probe = sbc8641_probe,
- .setup_arch = sbc8641_setup_arch,
- .init_IRQ = mpc86xx_init_irq,
- .show_cpuinfo = sbc8641_show_cpuinfo,
- .get_irq = mpic_get_irq,
- .time_init = mpc86xx_time_init,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
-#endif
-};
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index e0fe670f06f6..60cc5b537a98 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -98,15 +98,6 @@ menu "MPC8xx CPM Options"
# 8xx specific questions.
comment "Generic MPC8xx Options"
-config 8xx_COPYBACK
- bool "Copy-Back Data Cache (else Writethrough)"
- help
- Saying Y here will cause the cache on an MPC8xx processor to be used
- in Copy-Back mode. If you say N here, it is used in Writethrough
- mode.
-
- If in doubt, say Y here.
-
config 8xx_GPIO
bool "GPIO API Support"
select GPIOLIB
@@ -171,4 +162,38 @@ config UCODE_PATCH
default y
depends on !NO_UCODE_PATCH
+menu "8xx advanced setup"
+ depends on PPC_8xx
+
+config PIN_TLB
+ bool "Pinned Kernel TLBs"
+ depends on ADVANCED_OPTIONS
+ help
+ On the 8xx, we have 32 instruction TLBs and 32 data TLBs. In each
+ table 4 TLBs can be pinned.
+
+ It reduces the amount of usable TLBs to 28 (ie by 12%). That's the
+ reason why we make it selectable.
+
+ This option does nothing, it just activate the selection of what
+ to pin.
+
+config PIN_TLB_DATA
+ bool "Pinned TLB for DATA"
+ depends on PIN_TLB
+ default y
+ help
+ This pins the first 32 Mbytes of memory with 8M pages.
+
+config PIN_TLB_IMMR
+ bool "Pinned TLB for IMMR"
+ depends on PIN_TLB
+ default y
+ help
+ This pins the IMMR area with a 512kbytes page. In case
+ CONFIG_PIN_TLB_DATA is also selected, it will reduce
+ CONFIG_PIN_TLB_DATA to 24 Mbytes.
+
+endmenu
+
endmenu
diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile
index 27a7c6f828e0..5a098f7d5d31 100644
--- a/arch/powerpc/platforms/8xx/Makefile
+++ b/arch/powerpc/platforms/8xx/Makefile
@@ -3,7 +3,7 @@
# Makefile for the PowerPC 8xx linux kernel.
#
obj-y += m8xx_setup.o machine_check.o pic.o
-obj-$(CONFIG_CPM1) += cpm1.o
+obj-$(CONFIG_CPM1) += cpm1.o cpm1-ic.o
obj-$(CONFIG_UCODE_PATCH) += micropatch.o
obj-$(CONFIG_MPC885ADS) += mpc885ads_setup.o
obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o
diff --git a/arch/powerpc/platforms/8xx/adder875.c b/arch/powerpc/platforms/8xx/adder875.c
index 651486acb896..10e6e4fe77fc 100644
--- a/arch/powerpc/platforms/8xx/adder875.c
+++ b/arch/powerpc/platforms/8xx/adder875.c
@@ -15,9 +15,9 @@
#include <asm/cpm1.h>
#include <asm/fs_pd.h>
#include <asm/udbg.h>
-#include <asm/prom.h>
#include "mpc8xx.h"
+#include "pic.h"
struct cpm_pin {
int port, pin, flags;
@@ -104,7 +104,7 @@ define_machine(adder875) {
.name = "Adder MPC875",
.probe = adder875_probe,
.setup_arch = adder875_setup,
- .init_IRQ = mpc8xx_pics_init,
+ .init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = generic_calibrate_decr,
diff --git a/arch/powerpc/platforms/8xx/cpm1-ic.c b/arch/powerpc/platforms/8xx/cpm1-ic.c
new file mode 100644
index 000000000000..a18fc7c99f83
--- /dev/null
+++ b/arch/powerpc/platforms/8xx/cpm1-ic.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interrupt controller for the
+ * Communication Processor Module.
+ * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
+ */
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/platform_device.h>
+#include <asm/cpm1.h>
+
+struct cpm_pic_data {
+ cpic8xx_t __iomem *reg;
+ struct irq_domain *host;
+};
+
+static void cpm_mask_irq(struct irq_data *d)
+{
+ struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
+ unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
+
+ clrbits32(&data->reg->cpic_cimr, (1 << cpm_vec));
+}
+
+static void cpm_unmask_irq(struct irq_data *d)
+{
+ struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
+ unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
+
+ setbits32(&data->reg->cpic_cimr, (1 << cpm_vec));
+}
+
+static void cpm_end_irq(struct irq_data *d)
+{
+ struct cpm_pic_data *data = irq_data_get_irq_chip_data(d);
+ unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
+
+ out_be32(&data->reg->cpic_cisr, (1 << cpm_vec));
+}
+
+static struct irq_chip cpm_pic = {
+ .name = "CPM PIC",
+ .irq_mask = cpm_mask_irq,
+ .irq_unmask = cpm_unmask_irq,
+ .irq_eoi = cpm_end_irq,
+};
+
+static int cpm_get_irq(struct irq_desc *desc)
+{
+ struct cpm_pic_data *data = irq_desc_get_handler_data(desc);
+ int cpm_vec;
+
+ /*
+ * Get the vector by setting the ACK bit and then reading
+ * the register.
+ */
+ out_be16(&data->reg->cpic_civr, 1);
+ cpm_vec = in_be16(&data->reg->cpic_civr);
+ cpm_vec >>= 11;
+
+ return irq_linear_revmap(data->host, cpm_vec);
+}
+
+static void cpm_cascade(struct irq_desc *desc)
+{
+ generic_handle_irq(cpm_get_irq(desc));
+}
+
+static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ irq_set_chip_data(virq, h->host_data);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops cpm_pic_host_ops = {
+ .map = cpm_pic_host_map,
+};
+
+static int cpm_pic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq;
+ struct cpm_pic_data *data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->reg = devm_ioremap(dev, res->start, resource_size(res));
+ if (!data->reg)
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* Initialize the CPM interrupt controller. */
+ out_be32(&data->reg->cpic_cicr,
+ (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
+ ((virq_to_hw(irq) / 2) << 13) | CICR_HP_MASK);
+
+ out_be32(&data->reg->cpic_cimr, 0);
+
+ data->host = irq_domain_add_linear(dev->of_node, 64, &cpm_pic_host_ops, data);
+ if (!data->host)
+ return -ENODEV;
+
+ irq_set_handler_data(irq, data);
+ irq_set_chained_handler(irq, cpm_cascade);
+
+ setbits32(&data->reg->cpic_cicr, CICR_IEN);
+
+ return 0;
+}
+
+static const struct of_device_id cpm_pic_match[] = {
+ {
+ .compatible = "fsl,cpm1-pic",
+ }, {
+ .type = "cpm-pic",
+ .compatible = "CPM",
+ }, {},
+};
+
+static struct platform_driver cpm_pic_driver = {
+ .driver = {
+ .name = "cpm-pic",
+ .of_match_table = cpm_pic_match,
+ },
+ .probe = cpm_pic_probe,
+};
+
+static int __init cpm_pic_init(void)
+{
+ return platform_driver_register(&cpm_pic_driver);
+}
+arch_initcall(cpm_pic_init);
+
+/*
+ * The CPM can generate the error interrupt when there is a race condition
+ * between generating and masking interrupts. All we have to do is ACK it
+ * and return. This is a no-op function so we don't need any special
+ * tests in the interrupt handler.
+ */
+static irqreturn_t cpm_error_interrupt(int irq, void *dev)
+{
+ return IRQ_HANDLED;
+}
+
+static int cpm_error_probe(struct platform_device *pdev)
+{
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ return request_irq(irq, cpm_error_interrupt, IRQF_NO_THREAD, "error", NULL);
+}
+
+static const struct of_device_id cpm_error_ids[] = {
+ { .compatible = "fsl,cpm1" },
+ { .type = "cpm" },
+ {},
+};
+
+static struct platform_driver cpm_error_driver = {
+ .driver = {
+ .name = "cpm-error",
+ .of_match_table = cpm_error_ids,
+ },
+ .probe = cpm_error_probe,
+};
+
+static int __init cpm_error_init(void)
+{
+ return platform_driver_register(&cpm_error_driver);
+}
+subsys_initcall(cpm_error_init);
diff --git a/arch/powerpc/platforms/8xx/cpm1.c b/arch/powerpc/platforms/8xx/cpm1.c
index a43ee7d1ff85..bb38c8d8f8de 100644
--- a/arch/powerpc/platforms/8xx/cpm1.c
+++ b/arch/powerpc/platforms/8xx/cpm1.c
@@ -33,13 +33,12 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/8xx_immap.h>
#include <asm/cpm1.h>
#include <asm/io.h>
#include <asm/rheap.h>
-#include <asm/prom.h>
#include <asm/cpm.h>
#include <asm/fs_pd.h>
@@ -52,150 +51,6 @@
cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */
immap_t __iomem *mpc8xx_immr = (void __iomem *)VIRT_IMMR_BASE;
-static cpic8xx_t __iomem *cpic_reg;
-
-static struct irq_domain *cpm_pic_host;
-
-static void cpm_mask_irq(struct irq_data *d)
-{
- unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
-
- clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
-}
-
-static void cpm_unmask_irq(struct irq_data *d)
-{
- unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
-
- setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec));
-}
-
-static void cpm_end_irq(struct irq_data *d)
-{
- unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d);
-
- out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec));
-}
-
-static struct irq_chip cpm_pic = {
- .name = "CPM PIC",
- .irq_mask = cpm_mask_irq,
- .irq_unmask = cpm_unmask_irq,
- .irq_eoi = cpm_end_irq,
-};
-
-int cpm_get_irq(void)
-{
- int cpm_vec;
-
- /*
- * Get the vector by setting the ACK bit and then reading
- * the register.
- */
- out_be16(&cpic_reg->cpic_civr, 1);
- cpm_vec = in_be16(&cpic_reg->cpic_civr);
- cpm_vec >>= 11;
-
- return irq_linear_revmap(cpm_pic_host, cpm_vec);
-}
-
-static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
-
- irq_set_status_flags(virq, IRQ_LEVEL);
- irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq);
- return 0;
-}
-
-/*
- * The CPM can generate the error interrupt when there is a race condition
- * between generating and masking interrupts. All we have to do is ACK it
- * and return. This is a no-op function so we don't need any special
- * tests in the interrupt handler.
- */
-static irqreturn_t cpm_error_interrupt(int irq, void *dev)
-{
- return IRQ_HANDLED;
-}
-
-static struct irqaction cpm_error_irqaction = {
- .handler = cpm_error_interrupt,
- .flags = IRQF_NO_THREAD,
- .name = "error",
-};
-
-static const struct irq_domain_ops cpm_pic_host_ops = {
- .map = cpm_pic_host_map,
-};
-
-unsigned int __init cpm_pic_init(void)
-{
- struct device_node *np = NULL;
- struct resource res;
- unsigned int sirq = 0, hwirq, eirq;
- int ret;
-
- pr_debug("cpm_pic_init\n");
-
- np = of_find_compatible_node(NULL, NULL, "fsl,cpm1-pic");
- if (np == NULL)
- np = of_find_compatible_node(NULL, "cpm-pic", "CPM");
- if (np == NULL) {
- printk(KERN_ERR "CPM PIC init: can not find cpm-pic node\n");
- return sirq;
- }
-
- ret = of_address_to_resource(np, 0, &res);
- if (ret)
- goto end;
-
- cpic_reg = ioremap(res.start, resource_size(&res));
- if (cpic_reg == NULL)
- goto end;
-
- sirq = irq_of_parse_and_map(np, 0);
- if (!sirq)
- goto end;
-
- /* Initialize the CPM interrupt controller. */
- hwirq = (unsigned int)virq_to_hw(sirq);
- out_be32(&cpic_reg->cpic_cicr,
- (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) |
- ((hwirq/2) << 13) | CICR_HP_MASK);
-
- out_be32(&cpic_reg->cpic_cimr, 0);
-
- cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL);
- if (cpm_pic_host == NULL) {
- printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
- sirq = 0;
- goto end;
- }
-
- /* Install our own error handler. */
- np = of_find_compatible_node(NULL, NULL, "fsl,cpm1");
- if (np == NULL)
- np = of_find_node_by_type(NULL, "cpm");
- if (np == NULL) {
- printk(KERN_ERR "CPM PIC init: can not find cpm node\n");
- goto end;
- }
-
- eirq = irq_of_parse_and_map(np, 0);
- if (!eirq)
- goto end;
-
- if (setup_irq(eirq, &cpm_error_irqaction))
- printk(KERN_ERR "Could not allocate CPM error IRQ!");
-
- setbits32(&cpic_reg->cpic_cicr, CICR_IEN);
-
-end:
- of_node_put(np);
- return sirq;
-}
void __init cpm_reset(void)
{
@@ -286,6 +141,7 @@ cpm_setbrg(uint brg, uint rate)
out_be32(bp, (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) |
CPM_BRG_EN | CPM_BRG_DIV16);
}
+EXPORT_SYMBOL(cpm_setbrg);
struct cpm_ioport16 {
__be16 dir, par, odr_sor, dat, intr;
diff --git a/arch/powerpc/platforms/8xx/ep88xc.c b/arch/powerpc/platforms/8xx/ep88xc.c
index ebcf34a14789..b3b22520b435 100644
--- a/arch/powerpc/platforms/8xx/ep88xc.c
+++ b/arch/powerpc/platforms/8xx/ep88xc.c
@@ -20,6 +20,7 @@
#include <asm/cpm1.h>
#include "mpc8xx.h"
+#include "pic.h"
struct cpm_pin {
int port, pin, flags;
@@ -166,7 +167,7 @@ define_machine(ep88xc) {
.name = "Embedded Planet EP88xC",
.probe = ep88xc_probe,
.setup_arch = ep88xc_setup_arch,
- .init_IRQ = mpc8xx_pics_init,
+ .init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index f1c805c8adbc..24f358f86d16 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -17,10 +17,11 @@
#include <linux/time.h>
#include <linux/rtc.h>
#include <linux/fsl_devices.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
#include <asm/8xx_immap.h>
-#include <asm/prom.h>
#include <asm/fs_pd.h>
#include <mm/mmu_decl.h>
@@ -28,9 +29,6 @@
#include "mpc8xx.h"
-extern int cpm_pic_init(void);
-extern int cpm_get_irq(void);
-
/* A place holder for time base interrupts, if they are ever enabled. */
static irqreturn_t timebase_interrupt(int irq, void *dev)
{
@@ -39,12 +37,6 @@ static irqreturn_t timebase_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
-static struct irqaction tbint_irqaction = {
- .handler = timebase_interrupt,
- .flags = IRQF_NO_THREAD,
- .name = "tbint",
-};
-
/* per-board overridable init_internal_rtc() function. */
void __init __attribute__ ((weak))
init_internal_rtc(void)
@@ -157,7 +149,8 @@ void __init mpc8xx_calibrate_decr(void)
(TBSCR_TBF | TBSCR_TBE));
immr_unmap(sys_tmr2);
- if (setup_irq(virq, &tbint_irqaction))
+ if (request_irq(virq, timebase_interrupt, IRQF_NO_THREAD, "tbint",
+ NULL))
panic("Could not allocate timer IRQ!");
}
@@ -212,28 +205,3 @@ void __noreturn mpc8xx_restart(char *cmd)
in_8(&clk_r->res[0]);
panic("Restart failed\n");
}
-
-static void cpm_cascade(struct irq_desc *desc)
-{
- generic_handle_irq(cpm_get_irq());
-}
-
-/* Initialize the internal interrupt controllers. The number of
- * interrupts supported can vary with the processor type, and the
- * 82xx family can have up to 64.
- * External interrupts can be either edge or level triggered, and
- * need to be initialized by the appropriate driver.
- */
-void __init mpc8xx_pics_init(void)
-{
- int irq;
-
- if (mpc8xx_pic_init()) {
- printk(KERN_ERR "Failed interrupt 8xx controller initialization\n");
- return;
- }
-
- irq = cpm_pic_init();
- if (irq)
- irq_set_chained_handler(irq, cpm_cascade);
-}
diff --git a/arch/powerpc/platforms/8xx/machine_check.c b/arch/powerpc/platforms/8xx/machine_check.c
index 88dedf38eccd..656365975895 100644
--- a/arch/powerpc/platforms/8xx/machine_check.c
+++ b/arch/powerpc/platforms/8xx/machine_check.c
@@ -26,7 +26,7 @@ int machine_check_8xx(struct pt_regs *regs)
* to deal with that than having a wart in the mcheck handler.
* -- BenH
*/
- bad_page_fault(regs, regs->dar, SIGBUS);
+ bad_page_fault(regs, SIGBUS);
return 1;
#else
return 0;
diff --git a/arch/powerpc/platforms/8xx/micropatch.c b/arch/powerpc/platforms/8xx/micropatch.c
index c80bd7afd6c5..aef179fcbd4f 100644
--- a/arch/powerpc/platforms/8xx/micropatch.c
+++ b/arch/powerpc/platforms/8xx/micropatch.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/8xx_immap.h>
#include <asm/cpm.h>
#include <asm/cpm1.h>
@@ -361,6 +360,17 @@ void __init cpm_load_patch(cpm8xx_t *cp)
if (IS_ENABLED(CONFIG_SMC_UCODE_PATCH)) {
smc_uart_t *smp;
+ if (IS_ENABLED(CONFIG_PPC_EARLY_DEBUG_CPM)) {
+ int i;
+
+ for (i = 0; i < sizeof(*smp); i += 4) {
+ u32 __iomem *src = (u32 __iomem *)&cp->cp_dparam[PROFF_SMC1 + i];
+ u32 __iomem *dst = (u32 __iomem *)&cp->cp_dparam[PROFF_DSP1 + i];
+
+ out_be32(dst, in_be32(src));
+ }
+ }
+
smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC1];
out_be16(&smp->smc_rpbase, 0x1ec0);
smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC2];
diff --git a/arch/powerpc/platforms/8xx/mpc86xads_setup.c b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
index 8d02f5ff4481..03267e4a44a9 100644
--- a/arch/powerpc/platforms/8xx/mpc86xads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc86xads_setup.c
@@ -29,6 +29,7 @@
#include "mpc86xads.h"
#include "mpc8xx.h"
+#include "pic.h"
struct cpm_pin {
int port, pin, flags;
@@ -140,7 +141,7 @@ define_machine(mpc86x_ads) {
.name = "MPC86x ADS",
.probe = mpc86xads_probe,
.setup_arch = mpc86xads_setup_arch,
- .init_IRQ = mpc8xx_pics_init,
+ .init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
diff --git a/arch/powerpc/platforms/8xx/mpc885ads_setup.c b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
index a0c83c1905c6..b1e39f96de00 100644
--- a/arch/powerpc/platforms/8xx/mpc885ads_setup.c
+++ b/arch/powerpc/platforms/8xx/mpc885ads_setup.c
@@ -42,6 +42,7 @@
#include "mpc885ads.h"
#include "mpc8xx.h"
+#include "pic.h"
static u32 __iomem *bcsr, *bcsr5;
@@ -216,7 +217,7 @@ define_machine(mpc885_ads) {
.name = "Freescale MPC885 ADS",
.probe = mpc885ads_probe,
.setup_arch = mpc885ads_setup_arch,
- .init_IRQ = mpc8xx_pics_init,
+ .init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
diff --git a/arch/powerpc/platforms/8xx/mpc8xx.h b/arch/powerpc/platforms/8xx/mpc8xx.h
index 31cc2ecace42..79fae3324866 100644
--- a/arch/powerpc/platforms/8xx/mpc8xx.h
+++ b/arch/powerpc/platforms/8xx/mpc8xx.h
@@ -15,7 +15,6 @@ extern void __noreturn mpc8xx_restart(char *cmd);
extern void mpc8xx_calibrate_decr(void);
extern int mpc8xx_set_rtc_time(struct rtc_time *tm);
extern void mpc8xx_get_rtc_time(struct rtc_time *tm);
-extern void mpc8xx_pics_init(void);
extern unsigned int mpc8xx_get_irq(void);
#endif /* __MPC8xx_H */
diff --git a/arch/powerpc/platforms/8xx/pic.c b/arch/powerpc/platforms/8xx/pic.c
index f2ba837249d6..ea6b0e523c60 100644
--- a/arch/powerpc/platforms/8xx/pic.c
+++ b/arch/powerpc/platforms/8xx/pic.c
@@ -4,7 +4,8 @@
#include <linux/signal.h>
#include <linux/irq.h>
#include <linux/dma-mapping.h>
-#include <asm/prom.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/8xx_immap.h>
@@ -14,8 +15,6 @@
#define PIC_VEC_SPURRIOUS 15
-extern int cpm_get_irq(struct pt_regs *regs);
-
static struct irq_domain *mpc8xx_pic_host;
static unsigned long mpc8xx_cached_irq_mask;
static sysconf8xx_t __iomem *siu_reg;
@@ -125,7 +124,7 @@ static const struct irq_domain_ops mpc8xx_pic_host_ops = {
.xlate = mpc8xx_pic_host_xlate,
};
-int __init mpc8xx_pic_init(void)
+void __init mpc8xx_pic_init(void)
{
struct resource res;
struct device_node *np;
@@ -136,7 +135,7 @@ int __init mpc8xx_pic_init(void)
np = of_find_node_by_type(NULL, "mpc8xx-pic");
if (np == NULL) {
printk(KERN_ERR "Could not find fsl,pq1-pic node\n");
- return -ENOMEM;
+ return;
}
ret = of_address_to_resource(np, 0, &res);
@@ -144,19 +143,13 @@ int __init mpc8xx_pic_init(void)
goto out;
siu_reg = ioremap(res.start, resource_size(&res));
- if (siu_reg == NULL) {
- ret = -EINVAL;
+ if (!siu_reg)
goto out;
- }
mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL);
- if (mpc8xx_pic_host == NULL) {
+ if (!mpc8xx_pic_host)
printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
- ret = -ENOMEM;
- }
- ret = 0;
out:
of_node_put(np);
- return ret;
}
diff --git a/arch/powerpc/platforms/8xx/pic.h b/arch/powerpc/platforms/8xx/pic.h
index 9fe00eebdc8b..c70f1b446f94 100644
--- a/arch/powerpc/platforms/8xx/pic.h
+++ b/arch/powerpc/platforms/8xx/pic.h
@@ -4,7 +4,7 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
-int mpc8xx_pic_init(void);
+void mpc8xx_pic_init(void);
unsigned int mpc8xx_get_irq(void);
/*
diff --git a/arch/powerpc/platforms/8xx/tqm8xx_setup.c b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
index 4cea8b1afa44..ffcfd17a5fa3 100644
--- a/arch/powerpc/platforms/8xx/tqm8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/tqm8xx_setup.c
@@ -43,6 +43,7 @@
#include <asm/udbg.h>
#include "mpc8xx.h"
+#include "pic.h"
struct cpm_pin {
int port, pin, flags;
@@ -104,6 +105,9 @@ static void __init init_ioports(void)
if (dnode == NULL)
return;
prop = of_find_property(dnode, "ethernet1", &len);
+
+ of_node_put(dnode);
+
if (prop == NULL)
return;
@@ -142,7 +146,7 @@ define_machine(tqm8xx) {
.name = "TQM8xx",
.probe = tqm8xx_probe,
.setup_arch = tqm8xx_setup_arch,
- .init_IRQ = mpc8xx_pics_init,
+ .init_IRQ = mpc8xx_pic_init,
.get_irq = mpc8xx_get_irq,
.restart = mpc8xx_restart,
.calibrate_decr = mpc8xx_calibrate_decr,
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 1f8025383caa..d41dad227de8 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -20,11 +20,13 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
source "arch/powerpc/platforms/44x/Kconfig"
source "arch/powerpc/platforms/40x/Kconfig"
source "arch/powerpc/platforms/amigaone/Kconfig"
+source "arch/powerpc/platforms/book3s/Kconfig"
+source "arch/powerpc/platforms/microwatt/Kconfig"
config KVM_GUEST
bool "KVM Guest support"
select EPAPR_PARAVIRT
- ---help---
+ help
This option enables various optimizations for running under the KVM
hypervisor. Overhead for the kernel when not running inside KVM should
be minimal.
@@ -38,9 +40,9 @@ config EPAPR_PARAVIRT
In case of doubt, say Y
-config PPC_NATIVE
+config PPC_HASH_MMU_NATIVE
bool
- depends on PPC_BOOK3S_32 || PPC64
+ depends on PPC_BOOK3S
help
Support for running natively on the hardware, i.e. without
a hypervisor. This option is not user-selectable but should
@@ -49,6 +51,7 @@ config PPC_NATIVE
config PPC_OF_BOOT_TRAMPOLINE
bool "Support booting from Open Firmware or yaboot"
depends on PPC_BOOK3S_32 || PPC64
+ select RELOCATABLE if PPC64
default y
help
Support from booting from Open Firmware or yaboot using an
@@ -199,21 +202,6 @@ source "drivers/cpuidle/Kconfig"
endmenu
-config PPC601_SYNC_FIX
- bool "Workarounds for PPC601 bugs"
- depends on PPC_BOOK3S_601 && PPC_PMAC
- default y
- help
- Some versions of the PPC601 (the first PowerPC chip) have bugs which
- mean that extra synchronization instructions are required near
- certain instructions, typically those that make major changes to the
- CPU state. These extra instructions reduce performance slightly.
- If you say N here, these extra instructions will not be included,
- resulting in a kernel which will run faster but may not run at all
- on some systems with the PPC601 chip.
-
- If in doubt, say Y here.
-
config TAU
bool "On-chip CPU temperature sensor support"
depends on PPC_BOOK3S_32
@@ -223,30 +211,24 @@ config TAU
temperature within 2-4 degrees Celsius. This option shows the current
on-die temperature in /proc/cpuinfo if the cpu supports it.
- Unfortunately, on some chip revisions, this sensor is very inaccurate
- and in many cases, does not work at all, so don't assume the cpu
- temp is actually what /proc/cpuinfo says it is.
+ Unfortunately, this sensor is very inaccurate when uncalibrated, so
+ don't assume the cpu temp is actually what /proc/cpuinfo says it is.
config TAU_INT
- bool "Interrupt driven TAU driver (DANGEROUS)"
+ bool "Interrupt driven TAU driver (EXPERIMENTAL)"
depends on TAU
- ---help---
+ help
The TAU supports an interrupt driven mode which causes an interrupt
whenever the temperature goes out of range. This is the fastest way
to get notified the temp has exceeded a range. With this option off,
a timer is used to re-check the temperature periodically.
- However, on some cpus it appears that the TAU interrupt hardware
- is buggy and can cause a situation which would lead unexplained hard
- lockups.
-
- Unless you are extending the TAU driver, or enjoy kernel/hardware
- debugging, leave this option off.
+ If in doubt, say N here.
config TAU_AVERAGE
bool "Average high and low temp"
depends on TAU
- ---help---
+ help
The TAU hardware can compare the temperature to an upper and lower
bound. The default behavior is to show both the upper and lower
bound in /proc/cpuinfo. If the range is large, the temperature is
@@ -317,8 +299,4 @@ config MCU_MPC8349EMITX
also register MCU GPIOs with the generic GPIO API, so you'll able
to use MCU pins as GPIOs.
-config XILINX_PCI
- bool "Xilinx PCI host bridge support"
- depends on PCI && XILINX_VIRTEX
-
endmenu
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 6caedc88474f..0c4eed9aea80 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -2,7 +2,6 @@
config PPC32
bool
default y if !PPC64
- select KASAN_VMALLOC if KASAN && MODULES
config PPC64
bool "64-bit kernel"
@@ -11,52 +10,38 @@ config PPC64
This option selects whether a 32-bit or a 64-bit kernel
will be built.
-config PPC_BOOK3S_32
- bool
-
menu "Processor support"
choice
prompt "Processor Type"
depends on PPC32
help
There are five families of 32 bit PowerPC chips supported.
- The most common ones are the desktop and server CPUs (601, 603,
+ The most common ones are the desktop and server CPUs (603,
604, 740, 750, 74xx) CPUs from Freescale and IBM, with their
embedded 512x/52xx/82xx/83xx/86xx counterparts.
- The other embedded parts, namely 4xx, 8xx, e200 (55xx) and e500
+ The other embedded parts, namely 4xx, 8xx and e500
(85xx) each form a family of their own that is not compatible
with the others.
If unsure, select 52xx/6xx/7xx/74xx/82xx/83xx/86xx.
-config PPC_BOOK3S_6xx
- bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx except 601"
- select PPC_BOOK3S_32
- select PPC_FPU
+config PPC_BOOK3S_32
+ bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx"
+ imply PPC_FPU
select PPC_HAVE_PMU_SUPPORT
- select PPC_HAVE_KUEP
- select PPC_HAVE_KUAP
- select HAVE_ARCH_VMAP_STACK
-
-config PPC_BOOK3S_601
- bool "PowerPC 601"
- select PPC_BOOK3S_32
- select PPC_FPU
- select PPC_HAVE_KUAP
select HAVE_ARCH_VMAP_STACK
config PPC_85xx
bool "Freescale 85xx"
- select E500
+ select PPC_E500
config PPC_8xx
bool "Freescale 8xx"
+ select ARCH_SUPPORTS_HUGETLBFS
select FSL_SOC
- select SYS_SUPPORTS_HUGETLBFS
- select PPC_HAVE_KUEP
- select PPC_HAVE_KUAP
- select PPC_MM_SLICES if HUGETLB_PAGE
+ select PPC_KUEP
select HAVE_ARCH_VMAP_STACK
+ select HUGETLBFS
config 40x
bool "AMCC 40x"
@@ -64,6 +49,7 @@ config 40x
select PPC_UDBG_16550
select 4xx_SOC
select HAVE_PCI
+ select PPC_KUEP if PPC_KUAP
config 44x
bool "AMCC 44x, 46x or 47x"
@@ -72,12 +58,26 @@ config 44x
select 4xx_SOC
select HAVE_PCI
select PHYS_64BIT
-
-config E200
- bool "Freescale e200"
+ select PPC_KUEP
endchoice
+config PPC_BOOK3S_603
+ bool "Support for 603 SW loaded TLB"
+ depends on PPC_BOOK3S_32
+ default y
+ help
+ Provide support for processors based on the 603 cores. Those
+ processors don't have a HASH MMU and provide SW TLB loading.
+
+config PPC_BOOK3S_604
+ bool "Support for 604+ HASH MMU" if PPC_BOOK3S_603
+ depends on PPC_BOOK3S_32
+ default y
+ help
+ Provide support for processors not based on the 603 cores.
+ Those processors have a HASH MMU.
+
choice
prompt "Processor Type"
depends on PPC64
@@ -93,18 +93,26 @@ config PPC_BOOK3S_64
bool "Server processors"
select PPC_FPU
select PPC_HAVE_PMU_SUPPORT
- select SYS_SUPPORTS_HUGETLBFS
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
+ select ARCH_ENABLE_SPLIT_PMD_PTLOCK
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
+ select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
+ select HAVE_MOVE_PMD
+ select HAVE_MOVE_PUD
select IRQ_WORK
- select PPC_MM_SLICES
+ select PPC_64S_HASH_MMU if !PPC_RADIX_MMU
+ select KASAN_VMALLOC if KASAN
config PPC_BOOK3E_64
bool "Embedded processors"
+ select PPC_E500
+ select PPC_E500MC
select PPC_FPU # Make it a choice ?
select PPC_SMP_MUXED_IPI
select PPC_DOORBELL
+ select ZONE_DMA
endchoice
@@ -118,52 +126,80 @@ choice
If unsure, select Generic.
config GENERIC_CPU
- bool "Generic (POWER4 and above)"
- depends on PPC64 && !CPU_LITTLE_ENDIAN
+ bool "Generic (POWER5 and PowerPC 970 and above)"
+ depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+ select PPC_64S_HASH_MMU
config GENERIC_CPU
bool "Generic (POWER8 and above)"
- depends on PPC64 && CPU_LITTLE_ENDIAN
+ depends on PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
select ARCH_HAS_FAST_MULTIPLIER
+ select PPC_64S_HASH_MMU
-config GENERIC_CPU
+config POWERPC_CPU
bool "Generic 32 bits powerpc"
- depends on PPC32 && !PPC_8xx
+ depends on PPC_BOOK3S_32
config CELL_CPU
bool "Cell Broadband Engine"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+ select PPC_64S_HASH_MMU
-config POWER5_CPU
- bool "POWER5"
+config PPC_970_CPU
+ bool "PowerPC 970 (including PowerPC G5)"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+ select PPC_64S_HASH_MMU
config POWER6_CPU
bool "POWER6"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+ select PPC_64S_HASH_MMU
config POWER7_CPU
bool "POWER7"
depends on PPC_BOOK3S_64
select ARCH_HAS_FAST_MULTIPLIER
+ select PPC_64S_HASH_MMU
config POWER8_CPU
bool "POWER8"
depends on PPC_BOOK3S_64
select ARCH_HAS_FAST_MULTIPLIER
+ select PPC_64S_HASH_MMU
config POWER9_CPU
bool "POWER9"
depends on PPC_BOOK3S_64
select ARCH_HAS_FAST_MULTIPLIER
+config POWER10_CPU
+ bool "POWER10"
+ depends on PPC_BOOK3S_64
+ select ARCH_HAS_FAST_MULTIPLIER
+
config E5500_CPU
bool "Freescale e5500"
- depends on E500
+ depends on PPC64 && PPC_E500
config E6500_CPU
bool "Freescale e6500"
- depends on E500
+ depends on PPC64 && PPC_E500
+
+config 405_CPU
+ bool "40x family"
+ depends on 40x
+
+config 440_CPU
+ bool "440 (44x family)"
+ depends on 44x
+
+config 464_CPU
+ bool "464 (44x family)"
+ depends on 44x
+
+config 476_CPU
+ bool "476 (47x family)"
+ depends on PPC_47x
config 860_CPU
bool "8xx family"
@@ -182,56 +218,86 @@ config G4_CPU
depends on PPC_BOOK3S_32
select ALTIVEC
+config E500_CPU
+ bool "e500 (8540)"
+ depends on PPC_85xx && !PPC_E500MC
+
+config E500MC_CPU
+ bool "e500mc"
+ depends on PPC_85xx && PPC_E500MC
+
+config TOOLCHAIN_DEFAULT_CPU
+ bool "Rely on the toolchain's implicit default CPU"
+ depends on PPC32
+
endchoice
config TARGET_CPU_BOOL
bool
- default !GENERIC_CPU
+ default !GENERIC_CPU && !TOOLCHAIN_DEFAULT_CPU
config TARGET_CPU
string
depends on TARGET_CPU_BOOL
default "cell" if CELL_CPU
- default "power5" if POWER5_CPU
+ default "970" if PPC_970_CPU
default "power6" if POWER6_CPU
default "power7" if POWER7_CPU
default "power8" if POWER8_CPU
default "power9" if POWER9_CPU
+ default "power10" if POWER10_CPU
+ default "405" if 405_CPU
+ default "440" if 440_CPU
+ default "464" if 464_CPU
+ default "476" if 476_CPU
default "860" if 860_CPU
default "e300c2" if E300C2_CPU
default "e300c3" if E300C3_CPU
default "G4" if G4_CPU
+ default "8540" if E500_CPU
+ default "e500mc" if E500MC_CPU
+ default "powerpc" if POWERPC_CPU
config PPC_BOOK3S
def_bool y
depends on PPC_BOOK3S_32 || PPC_BOOK3S_64
-config PPC_BOOK3E
- def_bool y
- depends on PPC_BOOK3E_64
-
-config E500
+config PPC_E500
select FSL_EMB_PERFMON
- select PPC_FSL_BOOK3E
bool
+ select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
+ select PPC_SMP_MUXED_IPI
+ select PPC_DOORBELL
+ select PPC_KUEP
config PPC_E500MC
bool "e500mc Support"
select PPC_FPU
select COMMON_CLK
- depends on E500
+ depends on PPC_E500
help
This must be enabled for running on e500mc (and derivatives
such as e5500/e6500), and must be disabled for running on
e500v1 or e500v2.
-config PPC_FPU
+config PPC_FPU_REGS
bool
+
+config PPC_FPU
+ bool "Support for Floating Point Unit (FPU)" if PPC_MPC832x
default y if PPC64
+ select PPC_FPU_REGS
+ help
+ This must be enabled to support the Floating Point Unit
+ Most 6xx have an FPU but e300c2 core (mpc832x) don't have
+ an FPU, so when building an embedded kernel for that target
+ you can disable FPU support.
+
+ If unsure say Y.
config FSL_EMB_PERFMON
bool "Freescale Embedded Perfmon"
- depends on E500 || PPC_83xx
+ depends on PPC_E500 || PPC_83xx
help
This is the Performance Monitor support found on the e500 core
and some e300 cores (c3 and c4). Select this only if your
@@ -244,7 +310,7 @@ config FSL_EMB_PERF_EVENT
config FSL_EMB_PERF_EVENT_E500
bool
- depends on FSL_EMB_PERF_EVENT && E500
+ depends on FSL_EMB_PERF_EVENT && PPC_E500
default y
config 4xx
@@ -254,33 +320,24 @@ config 4xx
config BOOKE
bool
- depends on E200 || E500 || 44x || PPC_BOOK3E
+ depends on PPC_E500 || 44x
default y
-config FSL_BOOKE
+config BOOKE_OR_40x
bool
- depends on (E200 || E500) && PPC32
+ depends on BOOKE || 40x
default y
-# this is for common code between PPC32 & PPC64 FSL BOOKE
-config PPC_FSL_BOOK3E
- bool
- select FSL_EMB_PERFMON
- select PPC_SMP_MUXED_IPI
- select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64
- select PPC_DOORBELL
- default y if FSL_BOOKE
-
config PTE_64BIT
bool
- depends on 44x || E500 || PPC_86xx
+ depends on 44x || PPC_E500 || PPC_86xx
default y if PHYS_64BIT
config PHYS_64BIT
- bool 'Large physical address support' if E500 || PPC_86xx
- depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx
+ bool 'Large physical address support' if PPC_E500 || PPC_86xx
+ depends on (44x || PPC_E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx
select PHYS_ADDR_T_64BIT
- ---help---
+ help
This option enables kernel support for larger than 32-bit physical
addresses. This feature may not be available on all cores.
@@ -292,8 +349,9 @@ config PHYS_64BIT
config ALTIVEC
bool "AltiVec Support"
- depends on PPC_BOOK3S_32 || PPC_BOOK3S_64 || (PPC_E500MC && PPC64)
- ---help---
+ depends on PPC_BOOK3S || (PPC_E500MC && PPC64 && !E5500_CPU)
+ select PPC_FPU
+ help
This option enables kernel support for the Altivec extensions to the
PowerPC processor. The kernel currently supports saving and restoring
altivec registers, and turning on the 'altivec enable' bit so user
@@ -309,7 +367,7 @@ config ALTIVEC
config VSX
bool "VSX Support"
depends on PPC_BOOK3S_64 && ALTIVEC && PPC_FPU
- ---help---
+ help
This option enables kernel support for the Vector Scaler extensions
to the PowerPC processor. The kernel currently supports saving and
@@ -324,13 +382,13 @@ config VSX
config SPE_POSSIBLE
def_bool y
- depends on E200 || (E500 && !PPC_E500MC)
+ depends on PPC_E500 && !PPC_E500MC
config SPE
bool "SPE Support"
depends on SPE_POSSIBLE
default y
- ---help---
+ help
This option enables kernel support for the Signal Processing
Extensions (SPE) to the PowerPC processor. The kernel currently
supports saving and restoring SPE registers, and turning on the
@@ -342,16 +400,25 @@ config SPE
If in doubt, say Y here.
-config ARCH_ENABLE_SPLIT_PMD_PTLOCK
- def_bool y
+config PPC_64S_HASH_MMU
+ bool "Hash MMU Support"
depends on PPC_BOOK3S_64
+ default y
+ help
+ Enable support for the Power ISA Hash style MMU. This is implemented
+ by all IBM Power and other 64-bit Book3S CPUs before ISA v3.0. The
+ OpenPOWER ISA does not mandate the hash MMU and some CPUs do not
+ implement it (e.g., Microwatt).
+
+ Note that POWER9 PowerVM platforms only support the hash
+ MMU. From POWER10 radix is also supported by PowerVM.
+
+ If you're unsure, say Y.
config PPC_RADIX_MMU
bool "Radix MMU Support"
depends on PPC_BOOK3S_64
select ARCH_HAS_GIGANTIC_PAGE
- select PPC_HAVE_KUEP
- select PPC_HAVE_KUAP
default y
help
Enable support for the Power ISA 3.0 Radix style MMU. Currently this
@@ -359,7 +426,8 @@ config PPC_RADIX_MMU
you can probably disable this.
config PPC_RADIX_MMU_DEFAULT
- bool "Default to using the Radix MMU when possible"
+ bool "Default to using the Radix MMU when possible" if PPC_64S_HASH_MMU
+ depends on PPC_BOOK3S_64
depends on PPC_RADIX_MMU
default y
help
@@ -371,24 +439,16 @@ config PPC_RADIX_MMU_DEFAULT
If you're unsure, say Y.
-config PPC_HAVE_KUEP
- bool
-
config PPC_KUEP
- bool "Kernel Userspace Execution Prevention"
- depends on PPC_HAVE_KUEP
- default y
+ bool "Kernel Userspace Execution Prevention" if !40x
+ default y if !40x
help
Enable support for Kernel Userspace Execution Prevention (KUEP)
If you're unsure, say Y.
-config PPC_HAVE_KUAP
- bool
-
config PPC_KUAP
bool "Kernel Userspace Access Protection"
- depends on PPC_HAVE_KUAP
default y
help
Enable support for Kernel Userspace Access Protection (KUAP)
@@ -397,34 +457,30 @@ config PPC_KUAP
config PPC_KUAP_DEBUG
bool "Extra debugging for Kernel Userspace Access Protection"
- depends on PPC_HAVE_KUAP && (PPC_RADIX_MMU || PPC_32)
+ depends on PPC_KUAP
help
Add extra debugging for Kernel Userspace Access Protection (KUAP)
If you're unsure, say N.
-config ARCH_ENABLE_HUGEPAGE_MIGRATION
+config PPC_PKEY
def_bool y
- depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION
+ depends on PPC_BOOK3S_64
+ depends on PPC_MEM_KEYS || PPC_KUAP || PPC_KUEP
config PPC_MMU_NOHASH
def_bool y
depends on !PPC_BOOK3S
-config PPC_MMU_NOHASH_32
- def_bool y
- depends on PPC_MMU_NOHASH && PPC32
-
-config PPC_BOOK3E_MMU
- def_bool y
- depends on FSL_BOOKE || PPC_BOOK3E
-
-config PPC_MM_SLICES
- bool
-
config PPC_HAVE_PMU_SUPPORT
bool
+config PMU_SYSFS
+ bool "Create PMU SPRs sysfs file"
+ default n
+ help
+ This option enables sysfs file creation for PMU SPRs like MMCR* and PMC*.
+
config PPC_PERF_CTRS
def_bool y
depends on PERF_EVENTS && PPC_HAVE_PMU_SUPPORT
@@ -437,10 +493,10 @@ config FORCE_SMP
select SMP
config SMP
- depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
+ depends on PPC_BOOK3S || PPC_E500 || PPC_47x
select GENERIC_IRQ_MIGRATION
bool "Symmetric multi-processing support" if !FORCE_SMP
- ---help---
+ help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
than one CPU, say Y. Note that the kernel does not currently
@@ -457,15 +513,15 @@ config SMP
If you don't know what to do here, say N.
config NR_CPUS
- int "Maximum number of CPUs (2-8192)"
- range 2 8192
- depends on SMP
+ int "Maximum number of CPUs (2-8192)" if SMP
+ range 2 8192 if SMP
+ default "1" if !SMP
default "32" if PPC64
default "4"
config NOT_COHERENT_CACHE
bool
- depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \
+ depends on 4xx || PPC_8xx || PPC_MPC512x || \
GAMECUBE_COMMON || AMIGAONE
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
@@ -484,13 +540,12 @@ endmenu
config VDSO32
def_bool y
- depends on PPC32 || CPU_BIG_ENDIAN
+ depends on PPC32 || COMPAT
help
This symbol controls whether we build the 32-bit VDSO. We obviously
want to do that if we're building a 32-bit kernel. If we're building
- a 64-bit kernel then we only want a 32-bit VDSO if we're building for
- big endian. That is because the only little endian configuration we
- support is ppc64le which is 64-bit only.
+ a 64-bit kernel then we only want a 32-bit VDSO if we're also enabling
+ COMPAT.
choice
prompt "Endianness selection"
@@ -519,6 +574,12 @@ config CPU_LITTLE_ENDIAN
endchoice
+config PPC64_ELF_ABI_V1
+ def_bool PPC64 && CPU_BIG_ENDIAN
+
+config PPC64_ELF_ABI_V2
+ def_bool PPC64 && CPU_LITTLE_ENDIAN
+
config PPC64_BOOT_WRAPPER
def_bool n
depends on CPU_LITTLE_ENDIAN
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 143d4417f6cc..94470fb27c99 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -22,3 +22,5 @@ obj-$(CONFIG_PPC_CELL) += cell/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
obj-$(CONFIG_AMIGAONE) += amigaone/
+obj-$(CONFIG_PPC_BOOK3S) += book3s/
+obj-$(CONFIG_PPC_MICROWATT) += microwatt/
diff --git a/arch/powerpc/platforms/amigaone/setup.c b/arch/powerpc/platforms/amigaone/setup.c
index ea5e45e32683..397ce6a40bd0 100644
--- a/arch/powerpc/platforms/amigaone/setup.c
+++ b/arch/powerpc/platforms/amigaone/setup.c
@@ -8,6 +8,7 @@
* Copyright 2003 by Hans-Joerg Frieden and Thomas Frieden
*/
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -66,6 +67,12 @@ static int __init amigaone_add_bridge(struct device_node *dev)
void __init amigaone_setup_arch(void)
{
+ if (ppc_md.progress)
+ ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0);
+}
+
+static void __init amigaone_discover_phbs(void)
+{
struct device_node *np;
int phb = -ENODEV;
@@ -74,9 +81,6 @@ void __init amigaone_setup_arch(void)
phb = amigaone_add_bridge(np);
BUG_ON(phb != 0);
-
- if (ppc_md.progress)
- ppc_md.progress("Linux/PPC "UTS_RELEASE"\n", 0);
}
void __init amigaone_init_IRQ(void)
@@ -146,7 +150,6 @@ static int __init amigaone_probe(void)
*/
cur_cpu_spec->cpu_features &= ~CPU_FTR_NEED_COHERENT;
- ISA_DMA_THRESHOLD = 0x00ffffff;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
@@ -160,6 +163,7 @@ define_machine(amigaone) {
.name = "AmigaOne",
.probe = amigaone_probe,
.setup_arch = amigaone_setup_arch,
+ .discover_phbs = amigaone_discover_phbs,
.show_cpuinfo = amigaone_show_cpuinfo,
.init_IRQ = amigaone_init_IRQ,
.restart = amigaone_restart,
diff --git a/arch/powerpc/platforms/book3s/Kconfig b/arch/powerpc/platforms/book3s/Kconfig
new file mode 100644
index 000000000000..34c931592ef0
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config PPC_VAS
+ bool "IBM Virtual Accelerator Switchboard (VAS)"
+ depends on (PPC_POWERNV || PPC_PSERIES) && PPC_64K_PAGES
+ default y
+ help
+ This enables support for IBM Virtual Accelerator Switchboard (VAS).
+
+ VAS devices are found in POWER9-based and later systems, they
+ provide access to accelerator coprocessors such as NX-GZIP and
+ NX-842. This config allows the kernel to use NX-842 accelerators,
+ and user-mode APIs for the NX-GZIP accelerator on POWER9 PowerNV
+ and POWER10 PowerVM platforms.
+
+ If unsure, say "N".
diff --git a/arch/powerpc/platforms/book3s/Makefile b/arch/powerpc/platforms/book3s/Makefile
new file mode 100644
index 000000000000..e790f1910f61
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PPC_VAS) += vas-api.o
diff --git a/arch/powerpc/platforms/book3s/vas-api.c b/arch/powerpc/platforms/book3s/vas-api.c
new file mode 100644
index 000000000000..40f5ae5e1238
--- /dev/null
+++ b/arch/powerpc/platforms/book3s/vas-api.c
@@ -0,0 +1,636 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * VAS user space API for its accelerators (Only NX-GZIP is supported now)
+ * Copyright (C) 2019 Haren Myneni, IBM Corp
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/mmu_context.h>
+#include <linux/io.h>
+#include <asm/vas.h>
+#include <uapi/asm/vas-api.h>
+
+/*
+ * The driver creates the device node that can be used as follows:
+ * For NX-GZIP
+ *
+ * fd = open("/dev/crypto/nx-gzip", O_RDWR);
+ * rc = ioctl(fd, VAS_TX_WIN_OPEN, &attr);
+ * paste_addr = mmap(NULL, PAGE_SIZE, prot, MAP_SHARED, fd, 0ULL).
+ * vas_copy(&crb, 0, 1);
+ * vas_paste(paste_addr, 0, 1);
+ * close(fd) or exit process to close window.
+ *
+ * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
+ * copy/paste returns to the user space directly. So refer NX hardware
+ * documentation for exact copy/paste usage and completion / error
+ * conditions.
+ */
+
+/*
+ * Wrapper object for the nx-gzip device - there is just one instance of
+ * this node for the whole system.
+ */
+static struct coproc_dev {
+ struct cdev cdev;
+ struct device *device;
+ char *name;
+ dev_t devt;
+ struct class *class;
+ enum vas_cop_type cop_type;
+ const struct vas_user_win_ops *vops;
+} coproc_device;
+
+struct coproc_instance {
+ struct coproc_dev *coproc;
+ struct vas_window *txwin;
+};
+
+static char *coproc_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "crypto/%s", dev_name(dev));
+}
+
+/*
+ * Take reference to pid and mm
+ */
+int get_vas_user_win_ref(struct vas_user_win_ref *task_ref)
+{
+ /*
+ * Window opened by a child thread may not be closed when
+ * it exits. So take reference to its pid and release it
+ * when the window is free by parent thread.
+ * Acquire a reference to the task's pid to make sure
+ * pid will not be re-used - needed only for multithread
+ * applications.
+ */
+ task_ref->pid = get_task_pid(current, PIDTYPE_PID);
+ /*
+ * Acquire a reference to the task's mm.
+ */
+ task_ref->mm = get_task_mm(current);
+ if (!task_ref->mm) {
+ put_pid(task_ref->pid);
+ pr_err("VAS: pid(%d): mm_struct is not found\n",
+ current->pid);
+ return -EPERM;
+ }
+
+ mmgrab(task_ref->mm);
+ mmput(task_ref->mm);
+ /*
+ * Process closes window during exit. In the case of
+ * multithread application, the child thread can open
+ * window and can exit without closing it. So takes tgid
+ * reference until window closed to make sure tgid is not
+ * reused.
+ */
+ task_ref->tgid = find_get_pid(task_tgid_vnr(current));
+
+ return 0;
+}
+
+/*
+ * Successful return must release the task reference with
+ * put_task_struct
+ */
+static bool ref_get_pid_and_task(struct vas_user_win_ref *task_ref,
+ struct task_struct **tskp, struct pid **pidp)
+{
+ struct task_struct *tsk;
+ struct pid *pid;
+
+ pid = task_ref->pid;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (!tsk) {
+ pid = task_ref->tgid;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ /*
+ * Parent thread (tgid) will be closing window when it
+ * exits. So should not get here.
+ */
+ if (WARN_ON_ONCE(!tsk))
+ return false;
+ }
+
+ /* Return if the task is exiting. */
+ if (tsk->flags & PF_EXITING) {
+ put_task_struct(tsk);
+ return false;
+ }
+
+ *tskp = tsk;
+ *pidp = pid;
+
+ return true;
+}
+
+/*
+ * Update the CSB to indicate a translation error.
+ *
+ * User space will be polling on CSB after the request is issued.
+ * If NX can handle the request without any issues, it updates CSB.
+ * Whereas if NX encounters page fault, the kernel will handle the
+ * fault and update CSB with translation error.
+ *
+ * If we are unable to update the CSB means copy_to_user failed due to
+ * invalid csb_addr, send a signal to the process.
+ */
+void vas_update_csb(struct coprocessor_request_block *crb,
+ struct vas_user_win_ref *task_ref)
+{
+ struct coprocessor_status_block csb;
+ struct kernel_siginfo info;
+ struct task_struct *tsk;
+ void __user *csb_addr;
+ struct pid *pid;
+ int rc;
+
+ /*
+ * NX user space windows can not be opened for task->mm=NULL
+ * and faults will not be generated for kernel requests.
+ */
+ if (WARN_ON_ONCE(!task_ref->mm))
+ return;
+
+ csb_addr = (void __user *)be64_to_cpu(crb->csb_addr);
+
+ memset(&csb, 0, sizeof(csb));
+ csb.cc = CSB_CC_FAULT_ADDRESS;
+ csb.ce = CSB_CE_TERMINATION;
+ csb.cs = 0;
+ csb.count = 0;
+
+ /*
+ * NX operates and returns in BE format as defined CRB struct.
+ * So saves fault_storage_addr in BE as NX pastes in FIFO and
+ * expects user space to convert to CPU format.
+ */
+ csb.address = crb->stamp.nx.fault_storage_addr;
+ csb.flags = 0;
+
+ /*
+ * Process closes send window after all pending NX requests are
+ * completed. In multi-thread applications, a child thread can
+ * open a window and can exit without closing it. May be some
+ * requests are pending or this window can be used by other
+ * threads later. We should handle faults if NX encounters
+ * pages faults on these requests. Update CSB with translation
+ * error and fault address. If csb_addr passed by user space is
+ * invalid, send SEGV signal to pid saved in window. If the
+ * child thread is not running, send the signal to tgid.
+ * Parent thread (tgid) will close this window upon its exit.
+ *
+ * pid and mm references are taken when window is opened by
+ * process (pid). So tgid is used only when child thread opens
+ * a window and exits without closing it.
+ */
+
+ if (!ref_get_pid_and_task(task_ref, &tsk, &pid))
+ return;
+
+ kthread_use_mm(task_ref->mm);
+ rc = copy_to_user(csb_addr, &csb, sizeof(csb));
+ /*
+ * User space polls on csb.flags (first byte). So add barrier
+ * then copy first byte with csb flags update.
+ */
+ if (!rc) {
+ csb.flags = CSB_V;
+ /* Make sure update to csb.flags is visible now */
+ smp_mb();
+ rc = copy_to_user(csb_addr, &csb, sizeof(u8));
+ }
+ kthread_unuse_mm(task_ref->mm);
+ put_task_struct(tsk);
+
+ /* Success */
+ if (!rc)
+ return;
+
+
+ pr_debug("Invalid CSB address 0x%p signalling pid(%d)\n",
+ csb_addr, pid_vnr(pid));
+
+ clear_siginfo(&info);
+ info.si_signo = SIGSEGV;
+ info.si_errno = EFAULT;
+ info.si_code = SEGV_MAPERR;
+ info.si_addr = csb_addr;
+ /*
+ * process will be polling on csb.flags after request is sent to
+ * NX. So generally CSB update should not fail except when an
+ * application passes invalid csb_addr. So an error message will
+ * be displayed and leave it to user space whether to ignore or
+ * handle this signal.
+ */
+ rcu_read_lock();
+ rc = kill_pid_info(SIGSEGV, &info, pid);
+ rcu_read_unlock();
+
+ pr_devel("%s(): pid %d kill_proc_info() rc %d\n", __func__,
+ pid_vnr(pid), rc);
+}
+
+void vas_dump_crb(struct coprocessor_request_block *crb)
+{
+ struct data_descriptor_entry *dde;
+ struct nx_fault_stamp *nx;
+
+ dde = &crb->source;
+ pr_devel("SrcDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
+ be64_to_cpu(dde->address), be32_to_cpu(dde->length),
+ dde->count, dde->index, dde->flags);
+
+ dde = &crb->target;
+ pr_devel("TgtDDE: addr 0x%llx, len %d, count %d, idx %d, flags %d\n",
+ be64_to_cpu(dde->address), be32_to_cpu(dde->length),
+ dde->count, dde->index, dde->flags);
+
+ nx = &crb->stamp.nx;
+ pr_devel("NX Stamp: PSWID 0x%x, FSA 0x%llx, flags 0x%x, FS 0x%x\n",
+ be32_to_cpu(nx->pswid),
+ be64_to_cpu(crb->stamp.nx.fault_storage_addr),
+ nx->flags, nx->fault_status);
+}
+
+static int coproc_open(struct inode *inode, struct file *fp)
+{
+ struct coproc_instance *cp_inst;
+
+ cp_inst = kzalloc(sizeof(*cp_inst), GFP_KERNEL);
+ if (!cp_inst)
+ return -ENOMEM;
+
+ cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev,
+ cdev);
+ fp->private_data = cp_inst;
+
+ return 0;
+}
+
+static int coproc_ioc_tx_win_open(struct file *fp, unsigned long arg)
+{
+ void __user *uptr = (void __user *)arg;
+ struct vas_tx_win_open_attr uattr;
+ struct coproc_instance *cp_inst;
+ struct vas_window *txwin;
+ int rc;
+
+ cp_inst = fp->private_data;
+
+ /*
+ * One window for file descriptor
+ */
+ if (cp_inst->txwin)
+ return -EEXIST;
+
+ rc = copy_from_user(&uattr, uptr, sizeof(uattr));
+ if (rc) {
+ pr_err("%s(): copy_from_user() returns %d\n", __func__, rc);
+ return -EFAULT;
+ }
+
+ if (uattr.version != 1) {
+ pr_err("Invalid window open API version\n");
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->open_win) {
+ pr_err("VAS API is not registered\n");
+ return -EACCES;
+ }
+
+ txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags,
+ cp_inst->coproc->cop_type);
+ if (IS_ERR(txwin)) {
+ pr_err("%s() VAS window open failed, %ld\n", __func__,
+ PTR_ERR(txwin));
+ return PTR_ERR(txwin);
+ }
+
+ mutex_init(&txwin->task_ref.mmap_mutex);
+ cp_inst->txwin = txwin;
+
+ return 0;
+}
+
+static int coproc_release(struct inode *inode, struct file *fp)
+{
+ struct coproc_instance *cp_inst = fp->private_data;
+ int rc;
+
+ if (cp_inst->txwin) {
+ if (cp_inst->coproc->vops &&
+ cp_inst->coproc->vops->close_win) {
+ rc = cp_inst->coproc->vops->close_win(cp_inst->txwin);
+ if (rc)
+ return rc;
+ }
+ cp_inst->txwin = NULL;
+ }
+
+ kfree(cp_inst);
+ fp->private_data = NULL;
+
+ /*
+ * We don't know here if user has other receive windows
+ * open, so we can't really call clear_thread_tidr().
+ * So, once the process calls set_thread_tidr(), the
+ * TIDR value sticks around until process exits, resulting
+ * in an extra copy in restore_sprs().
+ */
+
+ return 0;
+}
+
+/*
+ * If the executed instruction that caused the fault was a paste, then
+ * clear regs CR0[EQ], advance NIP, and return 0. Else return error code.
+ */
+static int do_fail_paste(void)
+{
+ struct pt_regs *regs = current->thread.regs;
+ u32 instword;
+
+ if (WARN_ON_ONCE(!regs))
+ return -EINVAL;
+
+ if (WARN_ON_ONCE(!user_mode(regs)))
+ return -EINVAL;
+
+ /*
+ * If we couldn't translate the instruction, the driver should
+ * return success without handling the fault, it will be retried
+ * or the instruction fetch will fault.
+ */
+ if (get_user(instword, (u32 __user *)(regs->nip)))
+ return -EAGAIN;
+
+ /*
+ * Not a paste instruction, driver may fail the fault.
+ */
+ if ((instword & PPC_INST_PASTE_MASK) != PPC_INST_PASTE)
+ return -ENOENT;
+
+ regs->ccr &= ~0xe0000000; /* Clear CR0[0-2] to fail paste */
+ regs_add_return_ip(regs, 4); /* Emulate the paste */
+
+ return 0;
+}
+
+/*
+ * This fault handler is invoked when the core generates page fault on
+ * the paste address. Happens if the kernel closes window in hypervisor
+ * (on pseries) due to lost credit or the paste address is not mapped.
+ */
+static vm_fault_t vas_mmap_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct file *fp = vma->vm_file;
+ struct coproc_instance *cp_inst = fp->private_data;
+ struct vas_window *txwin;
+ vm_fault_t fault;
+ u64 paste_addr;
+ int ret;
+
+ /*
+ * window is not opened. Shouldn't expect this error.
+ */
+ if (!cp_inst || !cp_inst->txwin) {
+ pr_err("%s(): Unexpected fault on paste address with TX window closed\n",
+ __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ txwin = cp_inst->txwin;
+ /*
+ * When the LPAR lost credits due to core removal or during
+ * migration, invalidate the existing mapping for the current
+ * paste addresses and set windows in-active (zap_page_range in
+ * reconfig_close_windows()).
+ * New mapping will be done later after migration or new credits
+ * available. So continue to receive faults if the user space
+ * issue NX request.
+ */
+ if (txwin->task_ref.vma != vmf->vma) {
+ pr_err("%s(): No previous mapping with paste address\n",
+ __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ mutex_lock(&txwin->task_ref.mmap_mutex);
+ /*
+ * The window may be inactive due to lost credit (Ex: core
+ * removal with DLPAR). If the window is active again when
+ * the credit is available, map the new paste address at the
+ * window virtual address.
+ */
+ if (txwin->status == VAS_WIN_ACTIVE) {
+ paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ if (paste_addr) {
+ fault = vmf_insert_pfn(vma, vma->vm_start,
+ (paste_addr >> PAGE_SHIFT));
+ mutex_unlock(&txwin->task_ref.mmap_mutex);
+ return fault;
+ }
+ }
+ mutex_unlock(&txwin->task_ref.mmap_mutex);
+
+ /*
+ * Received this fault due to closing the actual window.
+ * It can happen during migration or lost credits.
+ * Since no mapping, return the paste instruction failure
+ * to the user space.
+ */
+ ret = do_fail_paste();
+ /*
+ * The user space can retry several times until success (needed
+ * for migration) or should fallback to SW compression or
+ * manage with the existing open windows if available.
+ * Looking at sysfs interface, it can determine whether these
+ * failures are coming during migration or core removal:
+ * nr_used_credits > nr_total_credits when lost credits
+ */
+ if (!ret || (ret == -EAGAIN))
+ return VM_FAULT_NOPAGE;
+
+ return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct vas_vm_ops = {
+ .fault = vas_mmap_fault,
+};
+
+static int coproc_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ struct coproc_instance *cp_inst = fp->private_data;
+ struct vas_window *txwin;
+ unsigned long pfn;
+ u64 paste_addr;
+ pgprot_t prot;
+ int rc;
+
+ txwin = cp_inst->txwin;
+
+ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
+ pr_debug("%s(): size 0x%zx, PAGE_SIZE 0x%zx\n", __func__,
+ (vma->vm_end - vma->vm_start), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /* Ensure instance has an open send window */
+ if (!txwin) {
+ pr_err("%s(): No send window open?\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) {
+ pr_err("%s(): VAS API is not registered\n", __func__);
+ return -EACCES;
+ }
+
+ /*
+ * The initial mmap is done after the window is opened
+ * with ioctl. But before mmap(), this window can be closed in
+ * the hypervisor due to lost credit (core removal on pseries).
+ * So if the window is not active, return mmap() failure with
+ * -EACCES and expects the user space reissue mmap() when it
+ * is active again or open new window when the credit is available.
+ * mmap_mutex protects the paste address mmap() with DLPAR
+ * close/open event and allows mmap() only when the window is
+ * active.
+ */
+ mutex_lock(&txwin->task_ref.mmap_mutex);
+ if (txwin->status != VAS_WIN_ACTIVE) {
+ pr_err("%s(): Window is not active\n", __func__);
+ rc = -EACCES;
+ goto out;
+ }
+
+ paste_addr = cp_inst->coproc->vops->paste_addr(txwin);
+ if (!paste_addr) {
+ pr_err("%s(): Window paste address failed\n", __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ pfn = paste_addr >> PAGE_SHIFT;
+
+ /* flags, page_prot from cxl_mmap(), except we want cachable */
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot);
+
+ prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY);
+
+ rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, prot);
+
+ pr_devel("%s(): paste addr %llx at %lx, rc %d\n", __func__,
+ paste_addr, vma->vm_start, rc);
+
+ txwin->task_ref.vma = vma;
+ vma->vm_ops = &vas_vm_ops;
+
+out:
+ mutex_unlock(&txwin->task_ref.mmap_mutex);
+ return rc;
+}
+
+static long coproc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case VAS_TX_WIN_OPEN:
+ return coproc_ioc_tx_win_open(fp, arg);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct file_operations coproc_fops = {
+ .open = coproc_open,
+ .release = coproc_release,
+ .mmap = coproc_mmap,
+ .unlocked_ioctl = coproc_ioctl,
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_coproc_api(struct module *mod, enum vas_cop_type cop_type,
+ const char *name,
+ const struct vas_user_win_ops *vops)
+{
+ int rc = -EINVAL;
+ dev_t devno;
+
+ rc = alloc_chrdev_region(&coproc_device.devt, 1, 1, name);
+ if (rc) {
+ pr_err("Unable to allocate coproc major number: %i\n", rc);
+ return rc;
+ }
+
+ pr_devel("%s device allocated, dev [%i,%i]\n", name,
+ MAJOR(coproc_device.devt), MINOR(coproc_device.devt));
+
+ coproc_device.class = class_create(mod, name);
+ if (IS_ERR(coproc_device.class)) {
+ rc = PTR_ERR(coproc_device.class);
+ pr_err("Unable to create %s class %d\n", name, rc);
+ goto err_class;
+ }
+ coproc_device.class->devnode = coproc_devnode;
+ coproc_device.cop_type = cop_type;
+ coproc_device.vops = vops;
+
+ coproc_fops.owner = mod;
+ cdev_init(&coproc_device.cdev, &coproc_fops);
+
+ devno = MKDEV(MAJOR(coproc_device.devt), 0);
+ rc = cdev_add(&coproc_device.cdev, devno, 1);
+ if (rc) {
+ pr_err("cdev_add() failed %d\n", rc);
+ goto err_cdev;
+ }
+
+ coproc_device.device = device_create(coproc_device.class, NULL,
+ devno, NULL, name, MINOR(devno));
+ if (IS_ERR(coproc_device.device)) {
+ rc = PTR_ERR(coproc_device.device);
+ pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc);
+ goto err;
+ }
+
+ pr_devel("%s: Added dev [%d,%d]\n", __func__, MAJOR(devno),
+ MINOR(devno));
+
+ return 0;
+
+err:
+ cdev_del(&coproc_device.cdev);
+err_cdev:
+ class_destroy(coproc_device.class);
+err_class:
+ unregister_chrdev_region(coproc_device.devt, 1);
+ return rc;
+}
+
+void vas_unregister_coproc_api(void)
+{
+ dev_t devno;
+
+ cdev_del(&coproc_device.cdev);
+ devno = MKDEV(MAJOR(coproc_device.devt), 0);
+ device_destroy(coproc_device.class, devno);
+
+ class_destroy(coproc_device.class);
+ unregister_chrdev_region(coproc_device.devt, 1);
+}
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 0f7c8241912b..34669b060f36 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
config PPC_CELL
+ select PPC_64S_HASH_MMU if PPC64
bool
config PPC_CELL_COMMON
@@ -8,7 +9,7 @@ config PPC_CELL_COMMON
select PPC_DCR_MMIO
select PPC_INDIRECT_PIO
select PPC_INDIRECT_MMIO
- select PPC_NATIVE
+ select PPC_HASH_MMU_NATIVE
select PPC_RTAS
select IRQ_EDGE_EOI_HANDLER
@@ -35,6 +36,7 @@ config PPC_IBM_CELL_BLADE
config AXON_MSI
bool
depends on PPC_IBM_CELL_BLADE && PCI_MSI
+ select IRQ_DOMAIN_NOMAP
default y
menu "Cell Broadband Engine options"
@@ -44,6 +46,7 @@ config SPU_FS
tristate "SPU file system"
default m
depends on PPC_CELL
+ depends on COREDUMP
select SPU_BASE
help
The SPU file system is used to access Synergistic Processing
@@ -99,8 +102,3 @@ config CBE_CPUFREQ_SPU_GOVERNOR
the minimal possible frequency.
endmenu
-
-config OPROFILE_CELL
- def_bool y
- depends on PPC_CELL_NATIVE && (OPROFILE = m || OPROFILE = y) && SPU_BASE
-
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 10064a33ca96..7ea6692f67e2 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -19,7 +19,6 @@ spu-priv1-$(CONFIG_PPC_CELL_COMMON) += spu_priv1_mmio.o
spu-manage-$(CONFIG_PPC_CELL_COMMON) += spu_manage.o
obj-$(CONFIG_SPU_BASE) += spu_callbacks.o spu_base.o \
- spu_notify.o \
spu_syscalls.o \
$(spu-priv1-y) \
$(spu-manage-y) \
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 57c4e0e86c88..5b012abca773 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -12,11 +12,11 @@
#include <linux/export.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
-#include <asm/debugfs.h>
#include <asm/dcr.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include "cell.h"
@@ -199,7 +199,6 @@ out_error:
static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
{
struct device_node *dn;
- struct msi_desc *entry;
int len;
const u32 *prop;
@@ -209,10 +208,8 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
return -ENODEV;
}
- entry = first_pci_msi_entry(dev);
-
for (; dn; dn = of_get_next_parent(dn)) {
- if (entry->msi_attrib.is_64) {
+ if (!dev->no_64bit_msi) {
prop = of_get_property(dn, "msi-address-64", &len);
if (prop)
break;
@@ -226,6 +223,7 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
if (!prop) {
dev_dbg(&dev->dev,
"axon_msi: no msi-address-(32|64) properties found\n");
+ of_node_put(dn);
return -ENOENT;
}
@@ -265,7 +263,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (rc)
return rc;
- for_each_pci_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
virq = irq_create_direct_mapping(msic->irq_domain);
if (!virq) {
dev_warn(&dev->dev,
@@ -288,10 +286,7 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
- for_each_pci_msi_entry(entry, dev) {
- if (!entry->irq)
- continue;
-
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) {
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
}
@@ -480,10 +475,6 @@ void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
- if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
- msic, &fops_msic)) {
- pr_devel("axon_msi: debugfs_create_file failed!\n");
- return;
- }
+ debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic);
}
#endif /* DEBUG */
diff --git a/arch/powerpc/platforms/cell/cbe_powerbutton.c b/arch/powerpc/platforms/cell/cbe_powerbutton.c
index bda589dfb051..a3ee397486f6 100644
--- a/arch/powerpc/platforms/cell/cbe_powerbutton.c
+++ b/arch/powerpc/platforms/cell/cbe_powerbutton.c
@@ -9,9 +9,9 @@
#include <linux/input.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <asm/pmi.h>
-#include <asm/prom.h>
static struct input_dev *button_dev;
static struct platform_device *button_pdev;
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index 0be212a27254..fb4023f9ea6b 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -10,12 +10,12 @@
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/export.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/cell-regs.h>
@@ -23,7 +23,7 @@
* Current implementation uses "cpu" nodes. We build our own mapping
* array of cpu numbers to cpu nodes locally for now to allow interrupt
* time code to have a fast path rather than call of_get_cpu_node(). If
- * we implement cpu hotplug, we'll have to install an appropriate norifier
+ * we implement cpu hotplug, we'll have to install an appropriate notifier
* in order to release references to the cpu going away
*/
static struct cbe_regs_map
@@ -165,7 +165,7 @@ u32 cbe_node_to_cpu(int node)
}
EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
-static struct device_node *cbe_get_be_node(int cpu_id)
+static struct device_node *__init cbe_get_be_node(int cpu_id)
{
struct device_node *np;
@@ -182,9 +182,16 @@ static struct device_node *cbe_get_be_node(int cpu_id)
if (WARN_ON_ONCE(!cpu_handle))
return np;
- for (i=0; i<len; i++)
- if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL))
+ for (i = 0; i < len; i++) {
+ struct device_node *ch_np = of_find_node_by_phandle(cpu_handle[i]);
+ struct device_node *ci_np = of_get_cpu_node(cpu_id, NULL);
+
+ of_node_put(ch_np);
+ of_node_put(ci_np);
+
+ if (ch_np == ci_np)
return np;
+ }
}
return NULL;
@@ -193,21 +200,30 @@ static struct device_node *cbe_get_be_node(int cpu_id)
static void __init cbe_fill_regs_map(struct cbe_regs_map *map)
{
if(map->be_node) {
- struct device_node *be, *np;
+ struct device_node *be, *np, *parent_np;
be = map->be_node;
- for_each_node_by_type(np, "pervasive")
- if (of_get_parent(np) == be)
+ for_each_node_by_type(np, "pervasive") {
+ parent_np = of_get_parent(np);
+ if (parent_np == be)
map->pmd_regs = of_iomap(np, 0);
+ of_node_put(parent_np);
+ }
- for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller")
- if (of_get_parent(np) == be)
+ for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") {
+ parent_np = of_get_parent(np);
+ if (parent_np == be)
map->iic_regs = of_iomap(np, 2);
+ of_node_put(parent_np);
+ }
- for_each_node_by_type(np, "mic-tm")
- if (of_get_parent(np) == be)
+ for_each_node_by_type(np, "mic-tm") {
+ parent_np = of_get_parent(np);
+ if (parent_np == be)
map->mic_tm_regs = of_iomap(np, 0);
+ of_node_put(parent_np);
+ }
} else {
struct device_node *cpu;
/* That hack must die die die ! */
@@ -261,7 +277,8 @@ void __init cbe_regs_init(void)
of_node_put(cpu);
return;
}
- map->cpu_node = cpu;
+ of_node_put(map->cpu_node);
+ map->cpu_node = of_node_get(cpu);
for_each_possible_cpu(i) {
struct cbe_thread_map *thread = &cbe_thread_map[i];
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index 2ece77f49bc3..2f45428e32c8 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -39,7 +39,6 @@
#include <linux/stringify.h>
#include <asm/spu.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/cell-regs.h>
#include "spu_priv1_mmio.h"
@@ -255,7 +254,7 @@ static struct attribute *spu_attributes[] = {
NULL,
};
-static struct attribute_group spu_attribute_group = {
+static const struct attribute_group spu_attribute_group = {
.name = "thermal",
.attrs = spu_attributes,
};
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
index 55b31eadb3c8..ca7849e113d7 100644
--- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c
+++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c
@@ -126,30 +126,8 @@ static struct cpufreq_governor spu_governor = {
.stop = spu_gov_stop,
.owner = THIS_MODULE,
};
-
-/*
- * module init and destoy
- */
-
-static int __init spu_gov_init(void)
-{
- int ret;
-
- ret = cpufreq_register_governor(&spu_governor);
- if (ret)
- printk(KERN_ERR "registration of governor failed\n");
- return ret;
-}
-
-static void __exit spu_gov_exit(void)
-{
- cpufreq_unregister_governor(&spu_governor);
-}
-
-
-module_init(spu_gov_init);
-module_exit(spu_gov_exit);
+cpufreq_governor_init(spu_governor);
+cpufreq_governor_exit(spu_governor);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
-
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 5927ead4aed2..03ee8152ee97 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -18,15 +18,16 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/kernel_stat.h>
+#include <linux/pgtable.h>
+#include <linux/of_address.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/cell-regs.h>
@@ -106,13 +107,9 @@ static void iic_ioexc_cascade(struct irq_desc *desc)
out_be64(&node_iic->iic_is, ack);
/* handle them */
for (cascade = 63; cascade >= 0; cascade--)
- if (bits & (0x8000000000000000UL >> cascade)) {
- unsigned int cirq =
- irq_linear_revmap(iic_host,
+ if (bits & (0x8000000000000000UL >> cascade))
+ generic_handle_domain_irq(iic_host,
base | cascade);
- if (cirq)
- generic_handle_irq(cirq);
- }
/* post-ack level interrupts */
ack = bits & ~IIC_ISR_EDGE_MASK;
if (ack)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index ca9ffc1c8685..8c7133039566 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -12,8 +12,10 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/notifier.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/memblock.h>
@@ -253,7 +255,7 @@ static irqreturn_t ioc_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static int cell_iommu_find_ioc(int nid, unsigned long *base)
+static int __init cell_iommu_find_ioc(int nid, unsigned long *base)
{
struct device_node *np;
struct resource r;
@@ -293,7 +295,7 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base)
return -ENODEV;
}
-static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
+static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu,
unsigned long dbase, unsigned long dsize,
unsigned long fbase, unsigned long fsize)
{
@@ -313,7 +315,7 @@ static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
memset(iommu->stab, 0, stab_size);
}
-static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
+static unsigned long *__init cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
unsigned long base, unsigned long size, unsigned long gap_base,
unsigned long gap_size, unsigned long page_shift)
{
@@ -373,7 +375,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
return ptab;
}
-static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
+static void __init cell_iommu_enable_hardware(struct cbe_iommu *iommu)
{
int ret;
unsigned long reg, xlate_base;
@@ -413,7 +415,7 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
}
-static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
+static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu,
unsigned long base, unsigned long size)
{
cell_iommu_setup_stab(iommu, base, size, 0, 0);
@@ -486,7 +488,8 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
window->table.it_size = size >> window->table.it_page_shift;
window->table.it_ops = &cell_iommu_ops;
- iommu_init_table(&window->table, iommu->nid, 0, 0);
+ if (!iommu_init_table(&window->table, iommu->nid, 0, 0))
+ panic("Failed to initialize iommu table");
pr_debug("\tioid %d\n", window->ioid);
pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
@@ -581,7 +584,7 @@ static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
{
struct device *dev = data;
- /* We are only intereted in device addition */
+ /* We are only interested in device addition */
if (action != BUS_NOTIFY_ADD_DEVICE)
return 0;
@@ -717,8 +720,10 @@ static int __init cell_iommu_init_disabled(void)
cell_disable_iommus();
/* If we have no Axon, we set up the spider DMA magic offset */
- if (of_find_node_by_name(NULL, "axon") == NULL)
+ np = of_find_node_by_name(NULL, "axon");
+ if (!np)
cell_dma_nommu_offset = SPIDER_DMA_OFFSET;
+ of_node_put(np);
/* Now we need to check to see where the memory is mapped
* in PCI space. We assume that all busses use the same dma
@@ -857,7 +862,7 @@ static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask)
cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR;
}
-static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
+static void __init insert_16M_pte(unsigned long addr, unsigned long *ptab,
unsigned long base_pte)
{
unsigned long segment, offset;
@@ -872,7 +877,7 @@ static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
}
-static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
+static void __init cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
struct device_node *np, unsigned long dbase, unsigned long dsize,
unsigned long fbase, unsigned long fsize)
{
@@ -943,7 +948,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
fbase = max(fbase, dbase + dsize);
}
- fbase = _ALIGN_UP(fbase, 1 << IO_SEGMENT_SHIFT);
+ fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT);
fsize = memblock_phys_mem_size();
if ((fbase + fsize) <= 0x800000000ul)
@@ -963,8 +968,8 @@ static int __init cell_iommu_fixed_mapping_init(void)
hend = hbase + htab_size_bytes;
/* The window must start and end on a segment boundary */
- if ((hbase != _ALIGN_UP(hbase, 1 << IO_SEGMENT_SHIFT)) ||
- (hend != _ALIGN_UP(hend, 1 << IO_SEGMENT_SHIFT))) {
+ if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) ||
+ (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) {
pr_debug("iommu: hash window not segment aligned\n");
return -1;
}
@@ -976,6 +981,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
if (hbase < dbase || (hend > (dbase + dsize))) {
pr_debug("iommu: hash window doesn't fit in"
"real DMA window\n");
+ of_node_put(np);
return -1;
}
}
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 6af3a6e600a7..58d967ee38b3 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -15,16 +15,16 @@
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/kallsyms.h>
+#include <linux/pgtable.h>
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/reg.h>
#include <asm/cell-regs.h>
#include <asm/cpu_has_feature.h>
#include "pervasive.h"
+#include "ras.h"
static void cbe_power_save(void)
{
@@ -77,6 +77,7 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEDEC:
set_dec(1);
+ break;
case SRR1_WAKEEE:
/*
* Handle these when interrupts get re-enabled and we take
diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h
index c6fccad6caee..0da74ab10716 100644
--- a/arch/powerpc/platforms/cell/pervasive.h
+++ b/arch/powerpc/platforms/cell/pervasive.h
@@ -13,9 +13,6 @@
#define PERVASIVE_H
extern void cbe_pervasive_init(void);
-extern void cbe_system_error_exception(struct pt_regs *regs);
-extern void cbe_maintenance_exception(struct pt_regs *regs);
-extern void cbe_thermal_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC_IBM_CELL_RESETBUTTON
extern int cbe_sysreset_hack(void);
diff --git a/arch/powerpc/platforms/cell/pmu.c b/arch/powerpc/platforms/cell/pmu.c
index 35bbd15582af..b207a7f99be5 100644
--- a/arch/powerpc/platforms/cell/pmu.c
+++ b/arch/powerpc/platforms/cell/pmu.c
@@ -10,6 +10,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/types.h>
#include <linux/export.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 6ea480539419..8d934ea6270c 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -12,11 +12,11 @@
#include <linux/reboot.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
+#include <linux/of.h>
#include <asm/kexec.h>
#include <asm/reg.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/cell-regs.h>
@@ -49,7 +49,7 @@ static void dump_fir(int cpu)
}
-void cbe_system_error_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(cbe_system_error_exception)
{
int cpu = smp_processor_id();
@@ -58,7 +58,7 @@ void cbe_system_error_exception(struct pt_regs *regs)
dump_stack();
}
-void cbe_maintenance_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(cbe_maintenance_exception)
{
int cpu = smp_processor_id();
@@ -70,7 +70,7 @@ void cbe_maintenance_exception(struct pt_regs *regs)
dump_stack();
}
-void cbe_thermal_exception(struct pt_regs *regs)
+DEFINE_INTERRUPT_HANDLER(cbe_thermal_exception)
{
int cpu = smp_processor_id();
diff --git a/arch/powerpc/platforms/cell/ras.h b/arch/powerpc/platforms/cell/ras.h
index 6c2e6bc0062e..226dbd48efad 100644
--- a/arch/powerpc/platforms/cell/ras.h
+++ b/arch/powerpc/platforms/cell/ras.h
@@ -2,9 +2,12 @@
#ifndef RAS_H
#define RAS_H
-extern void cbe_system_error_exception(struct pt_regs *regs);
-extern void cbe_maintenance_exception(struct pt_regs *regs);
-extern void cbe_thermal_exception(struct pt_regs *regs);
+#include <asm/interrupt.h>
+
+DECLARE_INTERRUPT_HANDLER(cbe_system_error_exception);
+DECLARE_INTERRUPT_HANDLER(cbe_maintenance_exception);
+DECLARE_INTERRUPT_HANDLER(cbe_thermal_exception);
+
extern void cbe_ras_init(void);
#endif /* RAS_H */
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 855eedb8d7d7..47eaf75349f2 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -31,8 +31,6 @@
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
@@ -169,6 +167,8 @@ static int __init cell_publish_devices(void)
of_platform_device_create(np, NULL, NULL);
}
+ of_node_put(root);
+
/* There is no device for the MIC memory controller, thus we create
* a platform device for it to attach the EDAC driver to.
*/
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index 85d795d96a27..31ce00b52a32 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -21,14 +21,13 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
#include <asm/machdep.h>
@@ -78,9 +77,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
pcpu = get_hard_smp_processor_id(lcpu);
- /* Fixup atomic count: it exited inside IRQ handler. */
- task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
-
/*
* If the RTAS start-cpu token does not exist then presume the
* cpu is already spinning.
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c
index 93ea41680f54..e36ebd84f55b 100644
--- a/arch/powerpc/platforms/cell/spider-pci.c
+++ b/arch/powerpc/platforms/cell/spider-pci.c
@@ -8,6 +8,7 @@
#undef DEBUG
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -25,10 +26,9 @@ struct spiderpci_iowa_private {
static void spiderpci_io_flush(struct iowa_bus *bus)
{
struct spiderpci_iowa_private *priv;
- u32 val;
priv = bus->private;
- val = in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
+ in_be32(priv->regs + SPIDER_PCI_DUMMY_READ);
iosync();
}
@@ -82,7 +82,7 @@ static int __init spiderpci_pci_setup_chip(struct pci_controller *phb,
/*
* On CellBlade, we can't know that which XDR memory is used by
* kmalloc() to allocate dummy_page_va.
- * In order to imporve the performance, the XDR which is used to
+ * In order to improve the performance, the XDR which is used to
* allocate dummy_page_va is the nearest the spider-pci.
* We have to select the CBE which is the nearest the spider-pci
* to allocate memory from the best XDR, but I don't know that
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 026b72c0a452..11df737c8c6a 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -10,9 +10,10 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/pgtable.h>
-#include <asm/pgtable.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include "interrupt.h"
@@ -190,16 +191,11 @@ static void spider_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct spider_pic *pic = irq_desc_get_handler_data(desc);
- unsigned int cs, virq;
+ unsigned int cs;
cs = in_be32(pic->regs + TIR_CS) >> 24;
- if (cs == SPIDER_IRQ_INVALID)
- virq = 0;
- else
- virq = irq_linear_revmap(pic->host, cs);
-
- if (virq)
- generic_handle_irq(virq);
+ if (cs != SPIDER_IRQ_INVALID)
+ generic_handle_domain_irq(pic->host, cs);
chip->irq_eoi(&desc->irq_data);
}
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index bc48234443b6..7bd0b563e163 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -24,7 +24,6 @@
#include <asm/spu_priv1.h>
#include <asm/spu_csa.h>
#include <asm/xmon.h>
-#include <asm/prom.h>
#include <asm/kexec.h>
const struct spu_management_ops *spu_management_ops;
@@ -387,7 +386,7 @@ spu_irq_class_2(int irq, void *data)
return stat ? IRQ_HANDLED : IRQ_NONE;
}
-static int spu_request_irqs(struct spu *spu)
+static int __init spu_request_irqs(struct spu *spu)
{
int ret = 0;
@@ -490,7 +489,7 @@ int spu_add_dev_attr(struct device_attribute *attr)
}
EXPORT_SYMBOL_GPL(spu_add_dev_attr);
-int spu_add_dev_attr_group(struct attribute_group *attrs)
+int spu_add_dev_attr_group(const struct attribute_group *attrs)
{
struct spu *spu;
int rc = 0;
@@ -529,7 +528,7 @@ void spu_remove_dev_attr(struct device_attribute *attr)
}
EXPORT_SYMBOL_GPL(spu_remove_dev_attr);
-void spu_remove_dev_attr_group(struct attribute_group *attrs)
+void spu_remove_dev_attr_group(const struct attribute_group *attrs)
{
struct spu *spu;
@@ -540,7 +539,7 @@ void spu_remove_dev_attr_group(struct attribute_group *attrs)
}
EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group);
-static int spu_create_dev(struct spu *spu)
+static int __init spu_create_dev(struct spu *spu)
{
int ret;
@@ -711,7 +710,7 @@ static void crash_kexec_stop_spus(void)
}
}
-static void crash_register_spus(struct list_head *list)
+static void __init crash_register_spus(struct list_head *list)
{
struct spu *spu;
int ret;
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index cbee3666da07..e780c14c5733 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -34,15 +34,15 @@
* mbind, mq_open, ipc, ...
*/
-static void *spu_syscall_table[] = {
-#define __SYSCALL(nr, entry) entry,
+static const syscall_fn spu_syscall_table[] = {
+#define __SYSCALL_WITH_COMPAT(nr, entry, compat) __SYSCALL(nr, entry)
+#define __SYSCALL(nr, entry) [nr] = (void *) entry,
#include <asm/syscall_table_spu.h>
-#undef __SYSCALL
};
long spu_sys_callback(struct spu_syscall_block *s)
{
- long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6);
+ syscall_fn syscall;
if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) {
pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret);
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 8e9ef65240c3..f1ac4c742069 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -16,11 +16,12 @@
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/firmware.h>
-#include <asm/prom.h>
#include "spufs/spufs.h"
#include "interrupt.h"
@@ -186,7 +187,7 @@ err:
return -EINVAL;
}
-static int spu_map_resource(struct spu *spu, int nr,
+static int __init spu_map_resource(struct spu *spu, int nr,
void __iomem** virt, unsigned long *phys)
{
struct device_node *np = spu->devnode;
@@ -361,7 +362,7 @@ static void disable_spu_by_master_run(struct spu_context *ctx)
static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 };
static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
-static struct spu *spu_lookup_reg(int node, u32 reg)
+static struct spu *__init spu_lookup_reg(int node, u32 reg)
{
struct spu *spu;
const u32 *spu_reg;
@@ -374,7 +375,7 @@ static struct spu *spu_lookup_reg(int node, u32 reg)
return NULL;
}
-static void init_affinity_qs20_harcoded(void)
+static void __init init_affinity_qs20_harcoded(void)
{
int node, i;
struct spu *last_spu, *spu;
@@ -396,7 +397,7 @@ static void init_affinity_qs20_harcoded(void)
}
}
-static int of_has_vicinity(void)
+static int __init of_has_vicinity(void)
{
struct device_node *dn;
@@ -409,7 +410,7 @@ static int of_has_vicinity(void)
return 0;
}
-static struct spu *devnode_spu(int cbe, struct device_node *dn)
+static struct spu *__init devnode_spu(int cbe, struct device_node *dn)
{
struct spu *spu;
@@ -419,7 +420,7 @@ static struct spu *devnode_spu(int cbe, struct device_node *dn)
return NULL;
}
-static struct spu *
+static struct spu * __init
neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
{
struct spu *spu;
@@ -440,7 +441,7 @@ neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
return NULL;
}
-static void init_affinity_node(int cbe)
+static void __init init_affinity_node(int cbe)
{
struct spu *spu, *last_spu;
struct device_node *vic_dn, *last_spu_dn;
@@ -457,7 +458,7 @@ static void init_affinity_node(int cbe)
/*
* Walk through each phandle in vicinity property of the spu
- * (tipically two vicinity phandles per spe node)
+ * (typically two vicinity phandles per spe node)
*/
for (i = 0; i < (lenp / sizeof(phandle)); i++) {
if (vic_handles[i] == avoid_ph)
@@ -487,6 +488,8 @@ static void init_affinity_node(int cbe)
avoid_ph = vic_dn->phandle;
}
+ of_node_put(vic_dn);
+
list_add_tail(&spu->aff_list, &last_spu->aff_list);
last_spu = spu;
break;
@@ -494,7 +497,7 @@ static void init_affinity_node(int cbe)
}
}
-static void init_affinity_fw(void)
+static void __init init_affinity_fw(void)
{
int cbe;
diff --git a/arch/powerpc/platforms/cell/spu_notify.c b/arch/powerpc/platforms/cell/spu_notify.c
deleted file mode 100644
index 67870abf3715..000000000000
--- a/arch/powerpc/platforms/cell/spu_notify.c
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Move OProfile dependencies from spufs module to the kernel so it
- * can run on non-cell PPC.
- *
- * Copyright (C) IBM 2005
- */
-
-#undef DEBUG
-
-#include <linux/export.h>
-#include <linux/notifier.h>
-#include <asm/spu.h>
-#include "spufs/spufs.h"
-
-static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
-
-void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
-{
- blocking_notifier_call_chain(&spu_switch_notifier,
- ctx ? ctx->object_id : 0, spu);
-}
-EXPORT_SYMBOL_GPL(spu_switch_notify);
-
-int spu_switch_event_register(struct notifier_block *n)
-{
- int ret;
- ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
- if (!ret)
- notify_spus_active();
- return ret;
-}
-EXPORT_SYMBOL_GPL(spu_switch_event_register);
-
-int spu_switch_event_unregister(struct notifier_block *n)
-{
- return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
-}
-EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
-
-void spu_set_profile_private_kref(struct spu_context *ctx,
- struct kref *prof_info_kref,
- void (* prof_info_release) (struct kref *kref))
-{
- ctx->prof_priv_kref = prof_info_kref;
- ctx->prof_priv_release = prof_info_release;
-}
-EXPORT_SYMBOL_GPL(spu_set_profile_private_kref);
-
-void *spu_get_profile_private_kref(struct spu_context *ctx)
-{
- return ctx->prof_priv_kref;
-}
-EXPORT_SYMBOL_GPL(spu_get_profile_private_kref);
-
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 0c2e6bb6fe51..d150e3987304 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -19,7 +19,6 @@
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/firmware.h>
-#include <asm/prom.h>
#include "interrupt.h"
#include "spu_priv1_mmio.h"
diff --git a/arch/powerpc/platforms/cell/spufs/.gitignore b/arch/powerpc/platforms/cell/spufs/.gitignore
index a09ee8d84d6c..5f3eb224f653 100644
--- a/arch/powerpc/platforms/cell/spufs/.gitignore
+++ b/arch/powerpc/platforms/cell/spufs/.gitignore
@@ -1,2 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
spu_save_dump.h
spu_restore_dump.h
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index 8b3296b62f65..1a587618015c 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -21,22 +21,6 @@
#include "spufs.h"
-static ssize_t do_coredump_read(int num, struct spu_context *ctx, void *buffer,
- size_t size, loff_t *off)
-{
- u64 data;
- int ret;
-
- if (spufs_coredump_read[num].read)
- return spufs_coredump_read[num].read(ctx, buffer, size, off);
-
- data = spufs_coredump_read[num].get(ctx);
- ret = snprintf(buffer, size, "0x%.16llx", data);
- if (ret >= size)
- return size;
- return ++ret; /* count trailing NULL */
-}
-
static int spufs_ctx_note_size(struct spu_context *ctx, int dfd)
{
int i, sz, total = 0;
@@ -82,13 +66,20 @@ static int match_context(const void *v, struct file *file, unsigned fd)
*/
static struct spu_context *coredump_next_context(int *fd)
{
+ struct spu_context *ctx;
struct file *file;
int n = iterate_fd(current->files, *fd, match_context, NULL);
if (!n)
return NULL;
*fd = n - 1;
- file = fcheck(*fd);
- return SPUFS_I(file_inode(file))->i_ctx;
+
+ rcu_read_lock();
+ file = lookup_fd_rcu(*fd);
+ ctx = SPUFS_I(file_inode(file))->i_ctx;
+ get_spu_context(ctx);
+ rcu_read_unlock();
+
+ return ctx;
}
int spufs_coredump_extra_notes_size(void)
@@ -99,17 +90,23 @@ int spufs_coredump_extra_notes_size(void)
fd = 0;
while ((ctx = coredump_next_context(&fd)) != NULL) {
rc = spu_acquire_saved(ctx);
- if (rc)
+ if (rc) {
+ put_spu_context(ctx);
break;
+ }
+
rc = spufs_ctx_note_size(ctx, fd);
spu_release_saved(ctx);
- if (rc < 0)
+ if (rc < 0) {
+ put_spu_context(ctx);
break;
+ }
size += rc;
/* start searching the next fd next time */
fd++;
+ put_spu_context(ctx);
}
return size;
@@ -118,58 +115,42 @@ int spufs_coredump_extra_notes_size(void)
static int spufs_arch_write_note(struct spu_context *ctx, int i,
struct coredump_params *cprm, int dfd)
{
- loff_t pos = 0;
- int sz, rc, total = 0;
- const int bufsz = PAGE_SIZE;
- char *name;
- char fullname[80], *buf;
+ size_t sz = spufs_coredump_read[i].size;
+ char fullname[80];
struct elf_note en;
- size_t skip;
-
- buf = (void *)get_zeroed_page(GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- name = spufs_coredump_read[i].name;
- sz = spufs_coredump_read[i].size;
+ int ret;
- sprintf(fullname, "SPU/%d/%s", dfd, name);
+ sprintf(fullname, "SPU/%d/%s", dfd, spufs_coredump_read[i].name);
en.n_namesz = strlen(fullname) + 1;
en.n_descsz = sz;
en.n_type = NT_SPU;
if (!dump_emit(cprm, &en, sizeof(en)))
- goto Eio;
-
+ return -EIO;
if (!dump_emit(cprm, fullname, en.n_namesz))
- goto Eio;
-
+ return -EIO;
if (!dump_align(cprm, 4))
- goto Eio;
-
- do {
- rc = do_coredump_read(i, ctx, buf, bufsz, &pos);
- if (rc > 0) {
- if (!dump_emit(cprm, buf, rc))
- goto Eio;
- total += rc;
- }
- } while (rc == bufsz && total < sz);
-
- if (rc < 0)
- goto out;
-
- skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
- if (!dump_skip(cprm, skip))
- goto Eio;
-
- rc = 0;
-out:
- free_page((unsigned long)buf);
- return rc;
-Eio:
- free_page((unsigned long)buf);
- return -EIO;
+ return -EIO;
+
+ if (spufs_coredump_read[i].dump) {
+ ret = spufs_coredump_read[i].dump(ctx, cprm);
+ if (ret < 0)
+ return ret;
+ } else {
+ char buf[32];
+
+ ret = snprintf(buf, sizeof(buf), "0x%.16llx",
+ spufs_coredump_read[i].get(ctx));
+ if (ret >= sizeof(buf))
+ return sizeof(buf);
+
+ /* count trailing the NULL: */
+ if (!dump_emit(cprm, buf, ret + 1))
+ return -EIO;
+ }
+
+ dump_skip_to(cprm, roundup(cprm->pos - ret + sz, 4));
+ return 0;
}
int spufs_coredump_extra_notes_write(struct coredump_params *cprm)
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index c0f950a3f4e1..62d90a5e23d1 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -9,6 +9,7 @@
#undef DEBUG
+#include <linux/coredump.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/export.h>
@@ -129,6 +130,14 @@ out:
return ret;
}
+static ssize_t spufs_dump_emit(struct coredump_params *cprm, void *buf,
+ size_t size)
+{
+ if (!dump_emit(cprm, buf, size))
+ return -EIO;
+ return size;
+}
+
#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) \
static int __fops ## _open(struct inode *inode, struct file *file) \
{ \
@@ -172,12 +181,9 @@ spufs_mem_release(struct inode *inode, struct file *file)
}
static ssize_t
-__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
- size_t size, loff_t *pos)
+spufs_mem_dump(struct spu_context *ctx, struct coredump_params *cprm)
{
- char *local_store = ctx->ops->get_ls(ctx);
- return simple_read_from_buffer(buffer, size, pos, local_store,
- LS_SIZE);
+ return spufs_dump_emit(cprm, ctx->ops->get_ls(ctx), LS_SIZE);
}
static ssize_t
@@ -190,7 +196,8 @@ spufs_mem_read(struct file *file, char __user *buffer,
ret = spu_acquire(ctx);
if (ret)
return ret;
- ret = __spufs_mem_read(ctx, buffer, size, pos);
+ ret = simple_read_from_buffer(buffer, size, pos, ctx->ops->get_ls(ctx),
+ LS_SIZE);
spu_release(ctx);
return ret;
@@ -318,7 +325,7 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
return VM_FAULT_SIGBUS;
/*
- * Because we release the mmap_sem, the context may be destroyed while
+ * Because we release the mmap_lock, the context may be destroyed while
* we're in spu_wait. Grab an extra reference so it isn't destroyed
* in the meantime.
*/
@@ -327,8 +334,8 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
/*
* We have to wait for context to be loaded before we have
* pages to hand out to the user, but we don't want to wait
- * with the mmap_sem held.
- * It is possible to drop the mmap_sem here, but then we need
+ * with the mmap_lock held.
+ * It is possible to drop the mmap_lock here, but then we need
* to return VM_FAULT_NOPAGE because the mappings may have
* hanged.
*/
@@ -336,11 +343,11 @@ static vm_fault_t spufs_ps_fault(struct vm_fault *vmf,
goto refault;
if (ctx->state == SPU_STATE_SAVED) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
spu_context_nospu_trace(spufs_ps_fault__sleep, ctx);
err = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu);
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
} else {
area = ctx->spu->problem_phys + ps_offs;
ret = vmf_insert_pfn(vmf->vma, vmf->address,
@@ -459,12 +466,10 @@ spufs_regs_open(struct inode *inode, struct file *file)
}
static ssize_t
-__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
- size_t size, loff_t *pos)
+spufs_regs_dump(struct spu_context *ctx, struct coredump_params *cprm)
{
- struct spu_lscsa *lscsa = ctx->csa.lscsa;
- return simple_read_from_buffer(buffer, size, pos,
- lscsa->gprs, sizeof lscsa->gprs);
+ return spufs_dump_emit(cprm, ctx->csa.lscsa->gprs,
+ sizeof(ctx->csa.lscsa->gprs));
}
static ssize_t
@@ -482,7 +487,8 @@ spufs_regs_read(struct file *file, char __user *buffer,
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
- ret = __spufs_regs_read(ctx, buffer, size, pos);
+ ret = simple_read_from_buffer(buffer, size, pos, ctx->csa.lscsa->gprs,
+ sizeof(ctx->csa.lscsa->gprs));
spu_release_saved(ctx);
return ret;
}
@@ -517,12 +523,10 @@ static const struct file_operations spufs_regs_fops = {
};
static ssize_t
-__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
- size_t size, loff_t * pos)
+spufs_fpcr_dump(struct spu_context *ctx, struct coredump_params *cprm)
{
- struct spu_lscsa *lscsa = ctx->csa.lscsa;
- return simple_read_from_buffer(buffer, size, pos,
- &lscsa->fpcr, sizeof(lscsa->fpcr));
+ return spufs_dump_emit(cprm, &ctx->csa.lscsa->fpcr,
+ sizeof(ctx->csa.lscsa->fpcr));
}
static ssize_t
@@ -535,7 +539,8 @@ spufs_fpcr_read(struct file *file, char __user * buffer,
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
- ret = __spufs_fpcr_read(ctx, buffer, size, pos);
+ ret = simple_read_from_buffer(buffer, size, pos, &ctx->csa.lscsa->fpcr,
+ sizeof(ctx->csa.lscsa->fpcr));
spu_release_saved(ctx);
return ret;
}
@@ -590,17 +595,12 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 mbox_data, __user *udata;
+ u32 mbox_data, __user *udata = (void __user *)buf;
ssize_t count;
if (len < 4)
return -EINVAL;
- if (!access_ok(buf, len))
- return -EFAULT;
-
- udata = (void __user *)buf;
-
count = spu_acquire(ctx);
if (count)
return count;
@@ -616,7 +616,7 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
* but still need to return the data we have
* read successfully so far.
*/
- ret = __put_user(mbox_data, udata);
+ ret = put_user(mbox_data, udata);
if (ret) {
if (!count)
count = -EFAULT;
@@ -698,17 +698,12 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 ibox_data, __user *udata;
+ u32 ibox_data, __user *udata = (void __user *)buf;
ssize_t count;
if (len < 4)
return -EINVAL;
- if (!access_ok(buf, len))
- return -EFAULT;
-
- udata = (void __user *)buf;
-
count = spu_acquire(ctx);
if (count)
goto out;
@@ -727,7 +722,7 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
}
/* if we can't write at all, return -EFAULT */
- count = __put_user(ibox_data, udata);
+ count = put_user(ibox_data, udata);
if (count)
goto out_unlock;
@@ -741,7 +736,7 @@ static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
* but still need to return the data we have
* read successfully so far.
*/
- ret = __put_user(ibox_data, udata);
+ ret = put_user(ibox_data, udata);
if (ret)
break;
}
@@ -836,17 +831,13 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- u32 wbox_data, __user *udata;
+ u32 wbox_data, __user *udata = (void __user *)buf;
ssize_t count;
if (len < 4)
return -EINVAL;
- udata = (void __user *)buf;
- if (!access_ok(buf, len))
- return -EFAULT;
-
- if (__get_user(wbox_data, udata))
+ if (get_user(wbox_data, udata))
return -EFAULT;
count = spu_acquire(ctx);
@@ -873,7 +864,7 @@ static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
/* write as much as possible */
for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
int ret;
- ret = __get_user(wbox_data, udata);
+ ret = get_user(wbox_data, udata);
if (ret)
break;
@@ -967,28 +958,26 @@ spufs_signal1_release(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
- size_t len, loff_t *pos)
+static ssize_t spufs_signal1_dump(struct spu_context *ctx,
+ struct coredump_params *cprm)
{
- int ret = 0;
- u32 data;
+ if (!ctx->csa.spu_chnlcnt_RW[3])
+ return 0;
+ return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[3],
+ sizeof(ctx->csa.spu_chnldata_RW[3]));
+}
- if (len < 4)
+static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf,
+ size_t len)
+{
+ if (len < sizeof(ctx->csa.spu_chnldata_RW[3]))
return -EINVAL;
-
- if (ctx->csa.spu_chnlcnt_RW[3]) {
- data = ctx->csa.spu_chnldata_RW[3];
- ret = 4;
- }
-
- if (!ret)
- goto out;
-
- if (copy_to_user(buf, &data, 4))
+ if (!ctx->csa.spu_chnlcnt_RW[3])
+ return 0;
+ if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[3],
+ sizeof(ctx->csa.spu_chnldata_RW[3])))
return -EFAULT;
-
-out:
- return ret;
+ return sizeof(ctx->csa.spu_chnldata_RW[3]);
}
static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
@@ -1000,7 +989,7 @@ static ssize_t spufs_signal1_read(struct file *file, char __user *buf,
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
- ret = __spufs_signal1_read(ctx, buf, len, pos);
+ ret = __spufs_signal1_read(ctx, buf, len);
spu_release_saved(ctx);
return ret;
@@ -1104,28 +1093,26 @@ spufs_signal2_release(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
- size_t len, loff_t *pos)
+static ssize_t spufs_signal2_dump(struct spu_context *ctx,
+ struct coredump_params *cprm)
{
- int ret = 0;
- u32 data;
+ if (!ctx->csa.spu_chnlcnt_RW[4])
+ return 0;
+ return spufs_dump_emit(cprm, &ctx->csa.spu_chnldata_RW[4],
+ sizeof(ctx->csa.spu_chnldata_RW[4]));
+}
- if (len < 4)
+static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf,
+ size_t len)
+{
+ if (len < sizeof(ctx->csa.spu_chnldata_RW[4]))
return -EINVAL;
-
- if (ctx->csa.spu_chnlcnt_RW[4]) {
- data = ctx->csa.spu_chnldata_RW[4];
- ret = 4;
- }
-
- if (!ret)
- goto out;
-
- if (copy_to_user(buf, &data, 4))
+ if (!ctx->csa.spu_chnlcnt_RW[4])
+ return 0;
+ if (copy_to_user(buf, &ctx->csa.spu_chnldata_RW[4],
+ sizeof(ctx->csa.spu_chnldata_RW[4])))
return -EFAULT;
-
-out:
- return ret;
+ return sizeof(ctx->csa.spu_chnldata_RW[4]);
}
static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
@@ -1137,7 +1124,7 @@ static ssize_t spufs_signal2_read(struct file *file, char __user *buf,
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
- ret = __spufs_signal2_read(ctx, buf, len, pos);
+ ret = __spufs_signal2_read(ctx, buf, len);
spu_release_saved(ctx);
return ret;
@@ -1961,38 +1948,36 @@ static const struct file_operations spufs_caps_fops = {
.release = single_release,
};
-static ssize_t __spufs_mbox_info_read(struct spu_context *ctx,
- char __user *buf, size_t len, loff_t *pos)
+static ssize_t spufs_mbox_info_dump(struct spu_context *ctx,
+ struct coredump_params *cprm)
{
- u32 data;
-
- /* EOF if there's no entry in the mbox */
if (!(ctx->csa.prob.mb_stat_R & 0x0000ff))
return 0;
-
- data = ctx->csa.prob.pu_mb_R;
-
- return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
+ return spufs_dump_emit(cprm, &ctx->csa.prob.pu_mb_R,
+ sizeof(ctx->csa.prob.pu_mb_R));
}
static ssize_t spufs_mbox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
- int ret;
struct spu_context *ctx = file->private_data;
-
- if (!access_ok(buf, len))
- return -EFAULT;
+ u32 stat, data;
+ int ret;
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_mbox_info_read(ctx, buf, len, pos);
+ stat = ctx->csa.prob.mb_stat_R;
+ data = ctx->csa.prob.pu_mb_R;
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ /* EOF if there's no entry in the mbox */
+ if (!(stat & 0x0000ff))
+ return 0;
+
+ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
}
static const struct file_operations spufs_mbox_info_fops = {
@@ -2001,38 +1986,36 @@ static const struct file_operations spufs_mbox_info_fops = {
.llseek = generic_file_llseek,
};
-static ssize_t __spufs_ibox_info_read(struct spu_context *ctx,
- char __user *buf, size_t len, loff_t *pos)
+static ssize_t spufs_ibox_info_dump(struct spu_context *ctx,
+ struct coredump_params *cprm)
{
- u32 data;
-
- /* EOF if there's no entry in the ibox */
if (!(ctx->csa.prob.mb_stat_R & 0xff0000))
return 0;
-
- data = ctx->csa.priv2.puint_mb_R;
-
- return simple_read_from_buffer(buf, len, pos, &data, sizeof data);
+ return spufs_dump_emit(cprm, &ctx->csa.priv2.puint_mb_R,
+ sizeof(ctx->csa.priv2.puint_mb_R));
}
static ssize_t spufs_ibox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
+ u32 stat, data;
int ret;
- if (!access_ok(buf, len))
- return -EFAULT;
-
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_ibox_info_read(ctx, buf, len, pos);
+ stat = ctx->csa.prob.mb_stat_R;
+ data = ctx->csa.priv2.puint_mb_R;
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ /* EOF if there's no entry in the ibox */
+ if (!(stat & 0xff0000))
+ return 0;
+
+ return simple_read_from_buffer(buf, len, pos, &data, sizeof(data));
}
static const struct file_operations spufs_ibox_info_fops = {
@@ -2041,41 +2024,36 @@ static const struct file_operations spufs_ibox_info_fops = {
.llseek = generic_file_llseek,
};
-static ssize_t __spufs_wbox_info_read(struct spu_context *ctx,
- char __user *buf, size_t len, loff_t *pos)
+static size_t spufs_wbox_info_cnt(struct spu_context *ctx)
{
- int i, cnt;
- u32 data[4];
- u32 wbox_stat;
-
- wbox_stat = ctx->csa.prob.mb_stat_R;
- cnt = 4 - ((wbox_stat & 0x00ff00) >> 8);
- for (i = 0; i < cnt; i++) {
- data[i] = ctx->csa.spu_mailbox_data[i];
- }
+ return (4 - ((ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8)) * sizeof(u32);
+}
- return simple_read_from_buffer(buf, len, pos, &data,
- cnt * sizeof(u32));
+static ssize_t spufs_wbox_info_dump(struct spu_context *ctx,
+ struct coredump_params *cprm)
+{
+ return spufs_dump_emit(cprm, &ctx->csa.spu_mailbox_data,
+ spufs_wbox_info_cnt(ctx));
}
static ssize_t spufs_wbox_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
- int ret;
-
- if (!access_ok(buf, len))
- return -EFAULT;
+ u32 data[ARRAY_SIZE(ctx->csa.spu_mailbox_data)];
+ int ret, count;
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_wbox_info_read(ctx, buf, len, pos);
+ count = spufs_wbox_info_cnt(ctx);
+ memcpy(&data, &ctx->csa.spu_mailbox_data, sizeof(data));
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ return simple_read_from_buffer(buf, len, pos, &data,
+ count * sizeof(u32));
}
static const struct file_operations spufs_wbox_info_fops = {
@@ -2084,50 +2062,53 @@ static const struct file_operations spufs_wbox_info_fops = {
.llseek = generic_file_llseek,
};
-static ssize_t __spufs_dma_info_read(struct spu_context *ctx,
- char __user *buf, size_t len, loff_t *pos)
+static void spufs_get_dma_info(struct spu_context *ctx,
+ struct spu_dma_info *info)
{
- struct spu_dma_info info;
- struct mfc_cq_sr *qp, *spuqp;
int i;
- info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
- info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
- info.dma_info_status = ctx->csa.spu_chnldata_RW[24];
- info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
- info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
+ info->dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW;
+ info->dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0];
+ info->dma_info_status = ctx->csa.spu_chnldata_RW[24];
+ info->dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25];
+ info->dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27];
for (i = 0; i < 16; i++) {
- qp = &info.dma_info_command_data[i];
- spuqp = &ctx->csa.priv2.spuq[i];
+ struct mfc_cq_sr *qp = &info->dma_info_command_data[i];
+ struct mfc_cq_sr *spuqp = &ctx->csa.priv2.spuq[i];
qp->mfc_cq_data0_RW = spuqp->mfc_cq_data0_RW;
qp->mfc_cq_data1_RW = spuqp->mfc_cq_data1_RW;
qp->mfc_cq_data2_RW = spuqp->mfc_cq_data2_RW;
qp->mfc_cq_data3_RW = spuqp->mfc_cq_data3_RW;
}
+}
- return simple_read_from_buffer(buf, len, pos, &info,
- sizeof info);
+static ssize_t spufs_dma_info_dump(struct spu_context *ctx,
+ struct coredump_params *cprm)
+{
+ struct spu_dma_info info;
+
+ spufs_get_dma_info(ctx, &info);
+ return spufs_dump_emit(cprm, &info, sizeof(info));
}
static ssize_t spufs_dma_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
+ struct spu_dma_info info;
int ret;
- if (!access_ok(buf, len))
- return -EFAULT;
-
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_dma_info_read(ctx, buf, len, pos);
+ spufs_get_dma_info(ctx, &info);
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ return simple_read_from_buffer(buf, len, pos, &info,
+ sizeof(info));
}
static const struct file_operations spufs_dma_info_fops = {
@@ -2136,52 +2117,55 @@ static const struct file_operations spufs_dma_info_fops = {
.llseek = no_llseek,
};
-static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx,
- char __user *buf, size_t len, loff_t *pos)
+static void spufs_get_proxydma_info(struct spu_context *ctx,
+ struct spu_proxydma_info *info)
{
- struct spu_proxydma_info info;
- struct mfc_cq_sr *qp, *puqp;
- int ret = sizeof info;
int i;
- if (len < ret)
- return -EINVAL;
-
- if (!access_ok(buf, len))
- return -EFAULT;
+ info->proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
+ info->proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
+ info->proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
- info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW;
- info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW;
- info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R;
for (i = 0; i < 8; i++) {
- qp = &info.proxydma_info_command_data[i];
- puqp = &ctx->csa.priv2.puq[i];
+ struct mfc_cq_sr *qp = &info->proxydma_info_command_data[i];
+ struct mfc_cq_sr *puqp = &ctx->csa.priv2.puq[i];
qp->mfc_cq_data0_RW = puqp->mfc_cq_data0_RW;
qp->mfc_cq_data1_RW = puqp->mfc_cq_data1_RW;
qp->mfc_cq_data2_RW = puqp->mfc_cq_data2_RW;
qp->mfc_cq_data3_RW = puqp->mfc_cq_data3_RW;
}
+}
- return simple_read_from_buffer(buf, len, pos, &info,
- sizeof info);
+static ssize_t spufs_proxydma_info_dump(struct spu_context *ctx,
+ struct coredump_params *cprm)
+{
+ struct spu_proxydma_info info;
+
+ spufs_get_proxydma_info(ctx, &info);
+ return spufs_dump_emit(cprm, &info, sizeof(info));
}
static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct spu_context *ctx = file->private_data;
+ struct spu_proxydma_info info;
int ret;
+ if (len < sizeof(info))
+ return -EINVAL;
+
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
spin_lock(&ctx->csa.register_lock);
- ret = __spufs_proxydma_info_read(ctx, buf, len, pos);
+ spufs_get_proxydma_info(ctx, &info);
spin_unlock(&ctx->csa.register_lock);
spu_release_saved(ctx);
- return ret;
+ return simple_read_from_buffer(buf, len, pos, &info,
+ sizeof(info));
}
static const struct file_operations spufs_proxydma_info_fops = {
@@ -2625,23 +2609,23 @@ const struct spufs_tree_descr spufs_dir_debug_contents[] = {
};
const struct spufs_coredump_reader spufs_coredump_read[] = {
- { "regs", __spufs_regs_read, NULL, sizeof(struct spu_reg128[128])},
- { "fpcr", __spufs_fpcr_read, NULL, sizeof(struct spu_reg128) },
+ { "regs", spufs_regs_dump, NULL, sizeof(struct spu_reg128[128])},
+ { "fpcr", spufs_fpcr_dump, NULL, sizeof(struct spu_reg128) },
{ "lslr", NULL, spufs_lslr_get, 19 },
{ "decr", NULL, spufs_decr_get, 19 },
{ "decr_status", NULL, spufs_decr_status_get, 19 },
- { "mem", __spufs_mem_read, NULL, LS_SIZE, },
- { "signal1", __spufs_signal1_read, NULL, sizeof(u32) },
+ { "mem", spufs_mem_dump, NULL, LS_SIZE, },
+ { "signal1", spufs_signal1_dump, NULL, sizeof(u32) },
{ "signal1_type", NULL, spufs_signal1_type_get, 19 },
- { "signal2", __spufs_signal2_read, NULL, sizeof(u32) },
+ { "signal2", spufs_signal2_dump, NULL, sizeof(u32) },
{ "signal2_type", NULL, spufs_signal2_type_get, 19 },
{ "event_mask", NULL, spufs_event_mask_get, 19 },
{ "event_status", NULL, spufs_event_status_get, 19 },
- { "mbox_info", __spufs_mbox_info_read, NULL, sizeof(u32) },
- { "ibox_info", __spufs_ibox_info_read, NULL, sizeof(u32) },
- { "wbox_info", __spufs_wbox_info_read, NULL, 4 * sizeof(u32)},
- { "dma_info", __spufs_dma_info_read, NULL, sizeof(struct spu_dma_info)},
- { "proxydma_info", __spufs_proxydma_info_read,
+ { "mbox_info", spufs_mbox_info_dump, NULL, sizeof(u32) },
+ { "ibox_info", spufs_ibox_info_dump, NULL, sizeof(u32) },
+ { "wbox_info", spufs_wbox_info_dump, NULL, 4 * sizeof(u32)},
+ { "dma_info", spufs_dma_info_dump, NULL, sizeof(struct spu_dma_info)},
+ { "proxydma_info", spufs_proxydma_info_dump,
NULL, sizeof(struct spu_proxydma_info)},
{ "object-id", NULL, spufs_object_id_get, 19 },
{ "npc", NULL, spufs_npc_get, 19 },
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 25390569e24c..dbcfe361831a 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -21,9 +21,10 @@
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/poll.h>
+#include <linux/of.h>
+#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <asm/prom.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <linux/uaccess.h>
@@ -91,14 +92,15 @@ out:
}
static int
-spufs_setattr(struct dentry *dentry, struct iattr *attr)
+spufs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
+ struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
if ((attr->ia_valid & ATTR_SIZE) &&
(attr->ia_size != inode->i_size))
return -EINVAL;
- setattr_copy(inode, attr);
+ setattr_copy(&init_user_ns, inode, attr);
mark_inode_dirty(inode);
return 0;
}
@@ -235,10 +237,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
if (!inode)
return -ENOSPC;
- if (dir->i_mode & S_ISGID) {
- inode->i_gid = dir->i_gid;
- inode->i_mode &= S_ISGID;
- }
+ inode_init_owner(&init_user_ns, inode, dir, mode | S_IFDIR);
ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
SPUFS_I(inode)->i_ctx = ctx;
if (!ctx) {
@@ -276,7 +275,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
return ret;
}
-static int spufs_context_open(struct path *path)
+static int spufs_context_open(const struct path *path)
{
int ret;
struct file *filp;
@@ -469,10 +468,7 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, umode_t mode)
goto out;
ret = 0;
- if (dir->i_mode & S_ISGID) {
- inode->i_gid = dir->i_gid;
- inode->i_mode &= S_ISGID;
- }
+ inode_init_owner(&init_user_ns, inode, dir, mode | S_IFDIR);
gang = alloc_spu_gang();
SPUFS_I(inode)->i_ctx = NULL;
SPUFS_I(inode)->i_gang = gang;
@@ -495,7 +491,7 @@ out:
return ret;
}
-static int spufs_gang_open(struct path *path)
+static int spufs_gang_open(const struct path *path)
{
int ret;
struct file *filp;
@@ -540,7 +536,7 @@ static int spufs_create_gang(struct inode *inode,
static struct file_system_type spufs_type;
-long spufs_create(struct path *path, struct dentry *dentry,
+long spufs_create(const struct path *path, struct dentry *dentry,
unsigned int flags, umode_t mode, struct file *filp)
{
struct inode *dir = d_inode(path->dentry);
@@ -652,7 +648,7 @@ static void spufs_exit_isolated_loader(void)
get_order(isolated_loader_size));
}
-static void
+static void __init
spufs_init_isolated_loader(void)
{
struct device_node *dn;
@@ -664,6 +660,7 @@ spufs_init_isolated_loader(void)
return;
loader = of_get_property(dn, "loader", &size);
+ of_node_put(dn);
if (!loader)
return;
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 3f2380f40f99..ce52b87496d2 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -353,7 +353,6 @@ static int spu_process_callback(struct spu_context *ctx)
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
{
int ret;
- struct spu *spu;
u32 status;
if (mutex_lock_interruptible(&ctx->run_mutex))
@@ -386,13 +385,10 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
mutex_lock(&ctx->state_mutex);
break;
}
- spu = ctx->spu;
if (unlikely(test_and_clear_bit(SPU_SCHED_NOTIFY_ACTIVE,
&ctx->sched_flags))) {
- if (!(status & SPU_STATUS_STOPPED_BY_STOP)) {
- spu_switch_notify(spu, ctx);
+ if (!(status & SPU_STATUS_STOPPED_BY_STOP))
continue;
- }
}
spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index f18d5067cd0f..99bd027a7f7c 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -72,7 +72,7 @@ static struct timer_list spuloadavg_timer;
#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
#define SCALE_PRIO(x, prio) \
- max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
+ max(x * (MAX_PRIO - prio) / (NICE_WIDTH / 2), MIN_SPU_TIMESLICE)
/*
* scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
@@ -181,9 +181,6 @@ void do_notify_spus_active(void)
/*
* Wake up the active spu_contexts.
- *
- * When the awakened processes see their "notify_active" flag is set,
- * they will call spu_switch_notify().
*/
for_each_online_node(node) {
struct spu *spu;
@@ -239,7 +236,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
spu_restore(&ctx->csa, spu);
spu->timestamp = jiffies;
- spu_switch_notify(spu, ctx);
ctx->state = SPU_STATE_RUNNABLE;
spuctx_switch_state(ctx, SPU_UTIL_USER);
@@ -344,8 +340,7 @@ static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
static void aff_set_ref_point_location(struct spu_gang *gang)
{
int mem_aff, gs, lowest_offset;
- struct spu_context *ctx;
- struct spu *tmp;
+ struct spu_context *tmp, *ctx;
mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
lowest_offset = 0;
@@ -440,7 +435,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
*/
atomic_dec_if_positive(&ctx->gang->aff_sched_count);
- spu_switch_notify(spu, NULL);
spu_unmap_mappings(ctx);
spu_save(&ctx->csa, spu);
spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
@@ -1058,6 +1052,7 @@ void spuctx_switch_state(struct spu_context *ctx,
}
}
+#ifdef CONFIG_PROC_FS
static int show_spu_loadavg(struct seq_file *s, void *private)
{
int a, b, c;
@@ -1079,7 +1074,8 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
atomic_read(&nr_spu_contexts),
idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
return 0;
-};
+}
+#endif
int __init spu_sched_init(void)
{
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 413c89afe112..84958487f696 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -76,7 +76,7 @@ struct spu_context {
struct address_space *mss; /* 'mss' area mappings. */
struct address_space *psmap; /* 'psmap' area mappings. */
struct mutex mapping_lock;
- u64 object_id; /* user space pointer for oprofile */
+ u64 object_id; /* user space pointer for GNU Debugger */
enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state;
struct mutex state_mutex;
@@ -232,7 +232,7 @@ extern const struct spufs_tree_descr spufs_dir_debug_contents[];
extern struct spufs_calls spufs_calls;
struct coredump_params;
long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
-long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
+long spufs_create(const struct path *nd, struct dentry *dentry, unsigned int flags,
umode_t mode, struct file *filp);
/* ELF coredump callbacks for writing SPU ELF notes */
extern int spufs_coredump_extra_notes_size(void);
@@ -281,7 +281,6 @@ void spu_del_from_rq(struct spu_context *ctx);
int spu_activate(struct spu_context *ctx, unsigned long flags);
void spu_deactivate(struct spu_context *ctx);
void spu_yield(struct spu_context *ctx);
-void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
u32 type, u32 val);
void spu_set_timeslice(struct spu_context *ctx);
@@ -334,16 +333,13 @@ void spufs_stop_callback(struct spu *spu, int irq);
void spufs_mfc_callback(struct spu *spu);
void spufs_dma_callback(struct spu *spu, int type);
-extern struct spu_coredump_calls spufs_coredump_calls;
struct spufs_coredump_reader {
char *name;
- ssize_t (*read)(struct spu_context *ctx,
- char __user *buffer, size_t size, loff_t *pos);
+ ssize_t (*dump)(struct spu_context *ctx, struct coredump_params *cprm);
u64 (*get)(struct spu_context *ctx);
size_t size;
};
extern const struct spufs_coredump_reader spufs_coredump_read[];
-extern int spufs_coredump_num_notes;
extern int spu_init_csa(struct spu_state *csa);
extern void spu_fini_csa(struct spu_state *csa);
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 5c3f5d088c3b..b41e81b22fdc 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -177,7 +177,7 @@ static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
MFC_CNTL_SUSPEND_COMPLETE);
- /* fall through */
+ fallthrough;
case MFC_CNTL_SUSPEND_COMPLETE:
if (csa)
csa->priv2.mfc_control_RW =
@@ -1657,14 +1657,13 @@ static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_problem __iomem *prob = spu->problem;
- u32 dummy = 0;
/* Restore, Step 66:
* If CSA.MB_Stat[P]=0 (mailbox empty) then
* read from the PPU_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF) == 0) {
- dummy = in_be32(&prob->pu_mb_R);
+ in_be32(&prob->pu_mb_R);
eieio();
}
}
@@ -1672,14 +1671,13 @@ static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
{
struct spu_priv2 __iomem *priv2 = spu->priv2;
- u64 dummy = 0UL;
/* Restore, Step 66:
* If CSA.MB_Stat[I]=0 (mailbox empty) then
* read from the PPUINT_MB register.
*/
if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
- dummy = in_be64(&priv2->puint_mb_R);
+ in_be64(&priv2->puint_mb_R);
eieio();
spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
eieio();
diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig
index 9b5c5505718a..ff30ed579a39 100644
--- a/arch/powerpc/platforms/chrp/Kconfig
+++ b/arch/powerpc/platforms/chrp/Kconfig
@@ -11,6 +11,6 @@ config PPC_CHRP
select RTAS_ERROR_LOGGING
select PPC_MPC106
select PPC_UDBG_16550
- select PPC_NATIVE
+ select PPC_HASH_MMU_NATIVE
select FORCE_PCI
default y
diff --git a/arch/powerpc/platforms/chrp/chrp.h b/arch/powerpc/platforms/chrp/chrp.h
index a5a7c338caf9..6ff4631d9db4 100644
--- a/arch/powerpc/platforms/chrp/chrp.h
+++ b/arch/powerpc/platforms/chrp/chrp.h
@@ -9,4 +9,3 @@ extern int chrp_set_rtc_time(struct rtc_time *);
extern long chrp_time_init(void);
extern void chrp_find_bridges(void);
-extern void chrp_event_scan(unsigned long);
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c
index e820332b59a0..dab78076fedb 100644
--- a/arch/powerpc/platforms/chrp/nvram.c
+++ b/arch/powerpc/platforms/chrp/nvram.c
@@ -10,7 +10,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
-#include <asm/prom.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/rtas.h>
#include "chrp.h"
diff --git a/arch/powerpc/platforms/chrp/pci.c b/arch/powerpc/platforms/chrp/pci.c
index b020c757d2bf..6f6598e771ff 100644
--- a/arch/powerpc/platforms/chrp/pci.c
+++ b/arch/powerpc/platforms/chrp/pci.c
@@ -8,12 +8,12 @@
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/pgtable.h>
+#include <linux/of_address.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/hydra.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/sections.h>
#include <asm/pci-bridge.h>
@@ -131,8 +131,7 @@ static struct pci_ops rtas_pci_ops =
volatile struct Hydra __iomem *Hydra = NULL;
-int __init
-hydra_init(void)
+static int __init hydra_init(void)
{
struct device_node *np;
struct resource r;
@@ -314,6 +313,14 @@ chrp_find_bridges(void)
}
}
of_node_put(root);
+
+ /*
+ * "Temporary" fixes for PCI devices.
+ * -- Geert
+ */
+ hydra_init(); /* Mac I/O */
+
+ pci_create_OF_bus_map();
}
/* SL82C105 IDE Control/Status Register */
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
index 485cf5ef73d4..5c4f1a9ca154 100644
--- a/arch/powerpc/platforms/chrp/pegasos_eth.c
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -113,7 +113,7 @@ static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
static void __iomem *mv643xx_reg_base;
-static int Enable_SRAM(void)
+static int __init Enable_SRAM(void)
{
u32 ALong;
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index fcf6f2342ef4..ec63c0558db6 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -32,10 +32,11 @@
#include <linux/root_dev.h>
#include <linux/initrd.h>
#include <linux/timer.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/dma.h>
#include <asm/machdep.h>
@@ -252,7 +253,7 @@ static void __noreturn briq_restart(char *cmd)
* Per default, input/output-device points to the keyboard/screen
* If no card is installed, the built-in serial port is used as a fallback.
* But unfortunately, the firmware does not connect /chosen/{stdin,stdout}
- * the the built-in serial node. Instead, a /failsafe node is created.
+ * to the built-in serial node. Instead, a /failsafe node is created.
*/
static __init void chrp_init(void)
{
@@ -335,22 +336,11 @@ static void __init chrp_setup_arch(void)
/* On pegasos, enable the L2 cache if not already done by OF */
pegasos_set_l2cr();
- /* Lookup PCI host bridges */
- chrp_find_bridges();
-
- /*
- * Temporary fixes for PCI devices.
- * -- Geert
- */
- hydra_init(); /* Mac I/O */
-
/*
* Fix the Super I/O configuration
*/
sio_init();
- pci_create_OF_bus_map();
-
/*
* Print the banner, then scroll down so boot progress
* can be printed. -- Cort
@@ -451,13 +441,6 @@ static void __init chrp_find_openpic(void)
of_node_put(np);
}
-#if defined(CONFIG_VT) && defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_XMON)
-static struct irqaction xmon_irqaction = {
- .handler = xmon_irq,
- .name = "XMON break",
-};
-#endif
-
static void __init chrp_find_8259(void)
{
struct device_node *np, *pic = NULL;
@@ -541,8 +524,11 @@ static void __init chrp_init_IRQ(void)
if (of_node_is_type(kbd->parent, "adb"))
break;
of_node_put(kbd);
- if (kbd)
- setup_irq(HYDRA_INT_ADB_NMI, &xmon_irqaction);
+ if (kbd) {
+ if (request_irq(HYDRA_INT_ADB_NMI, xmon_irq, 0, "XMON break",
+ NULL))
+ pr_err("Failed to register XMON break interrupt\n");
+ }
#endif
}
@@ -573,7 +559,6 @@ static int __init chrp_probe(void)
if (strcmp(dtype, "chrp"))
return 0;
- ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 0x44;
DMA_MODE_WRITE = 0x48;
@@ -588,6 +573,7 @@ define_machine(chrp) {
.name = "CHRP",
.probe = chrp_probe,
.setup_arch = chrp_setup_arch,
+ .discover_phbs = chrp_find_bridges,
.init = chrp_init2,
.show_cpuinfo = chrp_show_cpuinfo,
.init_IRQ = chrp_init_IRQ,
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c
index f7bb6cb8d1e3..ab95155647a4 100644
--- a/arch/powerpc/platforms/chrp/smp.c
+++ b/arch/powerpc/platforms/chrp/smp.c
@@ -16,15 +16,14 @@
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index acde7bbe0716..d46417e3d8e0 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -21,17 +21,15 @@
#include <linux/init.h>
#include <linux/bcd.h>
#include <linux/ioport.h>
+#include <linux/of_address.h>
#include <asm/io.h>
#include <asm/nvram.h>
-#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/time.h>
#include <platforms/chrp/chrp.h>
-extern spinlock_t rtc_lock;
-
#define NVRAM_AS0 0x74
#define NVRAM_AS1 0x75
#define NVRAM_DATA 0x77
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index c1920961f410..c54786f8461e 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -55,7 +55,7 @@ config MVME5100
select FORCE_PCI
select PPC_INDIRECT_PCI
select PPC_I8259
- select PPC_NATIVE
+ select PPC_HASH_MMU_NATIVE
select PPC_UDBG_16550
help
This option enables support for the Motorola (now Emerson) MVME5100
@@ -71,11 +71,6 @@ config MPC10X_BRIDGE
bool
select PPC_INDIRECT_PCI
-config MV64X60
- bool
- select PPC_INDIRECT_PCI
- select CHECK_CACHE_COHERENCY
-
config GAMECUBE_COMMON
bool
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index d39a9213a3e6..609bda2ad5dd 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/io.h>
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
index ade928f7ea73..5c2575adcc7e 100644
--- a/arch/powerpc/platforms/embedded6xx/gamecube.c
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -16,7 +16,6 @@
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/udbg.h>
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index a1b7f79a8a15..380b4285cce4 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -108,7 +108,6 @@ static const struct irq_domain_ops hlwd_irq_domain_ops = {
static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
{
void __iomem *io_base = h->host_data;
- int irq;
u32 irq_status;
irq_status = in_be32(io_base + HW_BROADWAY_ICR) &
@@ -116,23 +115,22 @@ static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
if (irq_status == 0)
return 0; /* no more IRQs pending */
- irq = __ffs(irq_status);
- return irq_linear_revmap(h, irq);
+ return __ffs(irq_status);
}
static void hlwd_pic_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *irq_domain = irq_desc_get_handler_data(desc);
- unsigned int virq;
+ unsigned int hwirq;
raw_spin_lock(&desc->lock);
chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */
raw_spin_unlock(&desc->lock);
- virq = __hlwd_pic_get_irq(irq_domain);
- if (virq)
- generic_handle_irq(virq);
+ hwirq = __hlwd_pic_get_irq(irq_domain);
+ if (hwirq)
+ generic_handle_domain_irq(irq_domain, hwirq);
else
pr_err("spurious interrupt!\n");
@@ -155,7 +153,7 @@ static void __hlwd_quiesce(void __iomem *io_base)
out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff);
}
-static struct irq_domain *hlwd_pic_init(struct device_node *np)
+static struct irq_domain *__init hlwd_pic_init(struct device_node *np)
{
struct irq_domain *irq_domain;
struct resource res;
@@ -190,7 +188,8 @@ static struct irq_domain *hlwd_pic_init(struct device_node *np)
unsigned int hlwd_pic_get_irq(void)
{
- return __hlwd_pic_get_irq(hlwd_irq_host);
+ unsigned int hwirq = __hlwd_pic_get_irq(hlwd_irq_host);
+ return hwirq ? irq_linear_revmap(hlwd_irq_host, hwirq) : 0;
}
/*
@@ -198,7 +197,7 @@ unsigned int hlwd_pic_get_irq(void)
*
*/
-void hlwd_pic_probe(void)
+void __init hlwd_pic_probe(void)
{
struct irq_domain *host;
struct device_node *np;
@@ -215,6 +214,7 @@ void hlwd_pic_probe(void)
irq_set_chained_handler(cascade_virq,
hlwd_pic_irq_cascade);
hlwd_irq_host = host;
+ of_node_put(np);
break;
}
}
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.h b/arch/powerpc/platforms/embedded6xx/hlwd-pic.h
index f18eeeef0815..c2fa42e191dc 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.h
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.h
@@ -11,7 +11,7 @@
#define __HLWD_PIC_H
extern unsigned int hlwd_pic_get_irq(void);
-extern void hlwd_pic_probe(void);
+void __init hlwd_pic_probe(void);
extern void hlwd_quiesce(void);
#endif
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index d8f2e2c737bb..bebc5a972694 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -22,12 +22,13 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/extable.h>
#include <asm/time.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/tsi108.h>
#include <asm/pci-bridge.h>
@@ -50,7 +51,7 @@ static int holly_exclude_device(struct pci_controller *hose, u_char bus,
return PCIBIOS_SUCCESSFUL;
}
-static void holly_remap_bridge(void)
+static void __init holly_remap_bridge(void)
{
u32 lut_val, lut_addr;
int i;
@@ -108,15 +109,13 @@ static void holly_remap_bridge(void)
tsi108_write_reg(TSI108_PCI_P2O_BAR2, 0x0);
}
-static void __init holly_setup_arch(void)
+static void __init holly_init_pci(void)
{
struct device_node *np;
if (ppc_md.progress)
ppc_md.progress("holly_setup_arch():set_bridge", 0);
- tsi108_csr_vir_base = get_vir_csrbase();
-
/* setup PCI host bridge */
holly_remap_bridge();
@@ -124,9 +123,16 @@ static void __init holly_setup_arch(void)
if (np)
tsi108_setup_pci(np, HOLLY_PCI_CFG_PHYS, 1);
+ of_node_put(np);
+
ppc_md.pci_exclude_device = holly_exclude_device;
if (ppc_md.progress)
ppc_md.progress("tsi108: resources set", 0x100);
+}
+
+static void __init holly_setup_arch(void)
+{
+ tsi108_csr_vir_base = get_vir_csrbase();
printk(KERN_INFO "PPC750GX/CL Platform\n");
}
@@ -180,6 +186,9 @@ static void __init holly_init_IRQ(void)
tsi108_pci_int_init(cascade_node);
irq_set_handler_data(cascade_pci_irq, mpic);
irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
+
+ of_node_put(tsi_pci);
+ of_node_put(cascade_node);
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
@@ -206,6 +215,7 @@ static void __noreturn holly_restart(char *cmd)
if (bridge) {
prop = of_get_property(bridge, "reg", &size);
addr = of_translate_address(bridge, prop);
+ of_node_put(bridge);
}
addr += (TSI108_PB_OFFSET + 0x414);
@@ -248,8 +258,8 @@ static int ppc750_machine_check_exception(struct pt_regs *regs)
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_recoverable(regs);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
return 0;
@@ -259,6 +269,7 @@ define_machine(holly){
.name = "PPC750 GX/CL TSI",
.probe = holly_probe,
.setup_arch = holly_setup_arch,
+ .discover_phbs = holly_init_pci,
.init_IRQ = holly_init_IRQ,
.show_cpuinfo = holly_show_cpuinfo,
.get_irq = mpic_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index f514d5d28cd4..1830e1ac1f8f 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -15,7 +15,6 @@
#include <linux/of_platform.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/mpic.h>
#include <asm/pci-bridge.h>
@@ -64,14 +63,17 @@ static int __init linkstation_add_bridge(struct device_node *dev)
static void __init linkstation_setup_arch(void)
{
+ printk(KERN_INFO "BUFFALO Network Attached Storage Series\n");
+ printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n");
+}
+
+static void __init linkstation_setup_pci(void)
+{
struct device_node *np;
/* Lookup PCI host bridges */
for_each_compatible_node(np, "pci", "mpc10x-pci")
linkstation_add_bridge(np);
-
- printk(KERN_INFO "BUFFALO Network Attached Storage Series\n");
- printk(KERN_INFO "(C) 2002-2005 BUFFALO INC.\n");
}
/*
@@ -153,6 +155,7 @@ define_machine(linkstation){
.name = "Buffalo Linkstation",
.probe = linkstation_probe,
.setup_arch = linkstation_setup_arch,
+ .discover_phbs = linkstation_setup_pci,
.init_IRQ = linkstation_init_IRQ,
.show_cpuinfo = linkstation_show_cpuinfo,
.get_irq = mpic_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/ls_uart.c b/arch/powerpc/platforms/embedded6xx/ls_uart.c
index 9d891bd5df5a..4ecbc55b37c0 100644
--- a/arch/powerpc/platforms/embedded6xx/ls_uart.c
+++ b/arch/powerpc/platforms/embedded6xx/ls_uart.c
@@ -14,8 +14,8 @@
#include <linux/delay.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
+#include <linux/of.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/termbits.h>
#include "mpc10x.h"
@@ -124,6 +124,8 @@ static int __init ls_uarts_init(void)
avr_clock = *(u32*)of_get_property(avr, "clock-frequency", &len);
phys_addr = ((u32*)of_get_property(avr, "reg", &len))[0];
+ of_node_put(avr);
+
if (!avr_clock || !phys_addr)
return -EINVAL;
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index 15437abe1f6d..ddf0c652af80 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -27,10 +27,10 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
+#include <linux/of_irq.h>
#include <asm/time.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/tsi108.h>
#include <asm/pci-bridge.h>
@@ -58,16 +58,14 @@ int mpc7448_hpc2_exclude_device(struct pci_controller *hose,
return PCIBIOS_SUCCESSFUL;
}
-static void __init mpc7448_hpc2_setup_arch(void)
+static void __init mpc7448_hpc2_setup_pci(void)
{
+#ifdef CONFIG_PCI
struct device_node *np;
if (ppc_md.progress)
- ppc_md.progress("mpc7448_hpc2_setup_arch():set_bridge", 0);
-
- tsi108_csr_vir_base = get_vir_csrbase();
+ ppc_md.progress("mpc7448_hpc2_setup_pci():set_bridge", 0);
/* setup PCI host bridge */
-#ifdef CONFIG_PCI
for_each_compatible_node(np, "pci", "tsi108-pci")
tsi108_setup_pci(np, MPC7448HPC2_PCI_CFG_PHYS, 0);
@@ -75,6 +73,11 @@ static void __init mpc7448_hpc2_setup_arch(void)
if (ppc_md.progress)
ppc_md.progress("tsi108: resources set", 0x100);
#endif
+}
+
+static void __init mpc7448_hpc2_setup_arch(void)
+{
+ tsi108_csr_vir_base = get_vir_csrbase();
printk(KERN_INFO "MPC7448HPC2 (TAIGA) Platform\n");
printk(KERN_INFO
@@ -132,6 +135,9 @@ static void __init mpc7448_hpc2_init_IRQ(void)
tsi108_pci_int_init(cascade_node);
irq_set_handler_data(cascade_pci_irq, mpic);
irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade);
+
+ of_node_put(tsi_pci);
+ of_node_put(cascade_node);
#endif
/* Configure MPIC outputs to CPU0 */
tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0);
@@ -147,7 +153,8 @@ static void __noreturn mpc7448_hpc2_restart(char *cmd)
local_irq_disable();
/* Set exception prefix high - to the firmware */
- _nmask_and_or_msr(0, MSR_IP);
+ mtmsr(mfmsr() | MSR_IP);
+ isync();
for (;;) ; /* Spin until reset happens */
}
@@ -169,8 +176,8 @@ static int mpc7448_machine_check_exception(struct pt_regs *regs)
/* Are we prepared to handle this fault */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
tsi108_clear_pci_cfg_error();
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_recoverable(regs);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
return 0;
@@ -180,6 +187,7 @@ define_machine(mpc7448_hpc2){
.name = "MPC7448 HPC2",
.probe = mpc7448_hpc2_probe,
.setup_arch = mpc7448_hpc2_setup_arch,
+ .discover_phbs = mpc7448_hpc2_setup_pci,
.init_IRQ = mpc7448_hpc2_init_IRQ,
.show_cpuinfo = mpc7448_hpc2_show_cpuinfo,
.get_irq = mpic_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/mvme5100.c b/arch/powerpc/platforms/embedded6xx/mvme5100.c
index 1cd488daa0bf..4854cc592cec 100644
--- a/arch/powerpc/platforms/embedded6xx/mvme5100.c
+++ b/arch/powerpc/platforms/embedded6xx/mvme5100.c
@@ -12,12 +12,12 @@
* Author: Stephen Chivers <schivers@csc.com>
*/
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/i8259.h>
#include <asm/pci-bridge.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#include <asm/udbg.h>
@@ -154,17 +154,19 @@ static const struct of_device_id mvme5100_of_bus_ids[] __initconst = {
*/
static void __init mvme5100_setup_arch(void)
{
- struct device_node *np;
-
if (ppc_md.progress)
ppc_md.progress("mvme5100_setup_arch()", 0);
- for_each_compatible_node(np, "pci", "hawk-pci")
- mvme5100_add_bridge(np);
-
restart = ioremap(BOARD_MODRST_REG, 4);
}
+static void __init mvme5100_setup_pci(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, "pci", "hawk-pci")
+ mvme5100_add_bridge(np);
+}
static void mvme5100_show_cpuinfo(struct seq_file *m)
{
@@ -205,6 +207,7 @@ define_machine(mvme5100) {
.name = "MVME5100",
.probe = mvme5100_probe,
.setup_arch = mvme5100_setup_arch,
+ .discover_phbs = mvme5100_setup_pci,
.init_IRQ = mvme5100_pic_init,
.show_cpuinfo = mvme5100_show_cpuinfo,
.get_irq = mpic_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index ed1914dd34bb..5f16e80b6ed6 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/mpic.h>
#include <asm/pci-bridge.h>
@@ -66,13 +65,16 @@ static int __init storcenter_add_bridge(struct device_node *dev)
static void __init storcenter_setup_arch(void)
{
+ printk(KERN_INFO "IOMEGA StorCenter\n");
+}
+
+static void __init storcenter_setup_pci(void)
+{
struct device_node *np;
/* Lookup PCI host bridges */
for_each_compatible_node(np, "pci", "mpc10x-pci")
storcenter_add_bridge(np);
-
- printk(KERN_INFO "IOMEGA StorCenter\n");
}
/*
@@ -101,7 +103,8 @@ static void __noreturn storcenter_restart(char *cmd)
local_irq_disable();
/* Set exception prefix high - to the firmware */
- _nmask_and_or_msr(0, MSR_IP);
+ mtmsr(mfmsr() | MSR_IP);
+ isync();
/* Wait for reset to happen */
for (;;) ;
@@ -116,6 +119,7 @@ define_machine(storcenter){
.name = "IOMEGA StorCenter",
.probe = storcenter_probe,
.setup_arch = storcenter_setup_arch,
+ .discover_phbs = storcenter_setup_pci,
.init_IRQ = storcenter_init_IRQ,
.get_irq = mpic_get_irq,
.restart = storcenter_restart,
diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
index ed45db70a781..e02bdabf358c 100644
--- a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
+++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
@@ -7,10 +7,11 @@
* Copyright (C) 2008,2009 Albert Herranz
*/
+#include <linux/of_address.h>
+
#include <mm/mmu_decl.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/fixmap.h>
@@ -194,7 +195,7 @@ static int ug_udbg_getc_poll(void)
/*
* Retrieves and prepares the virtual address needed to access the hardware.
*/
-static void __iomem *ug_udbg_setup_exi_io_base(struct device_node *np)
+static void __iomem *__init ug_udbg_setup_exi_io_base(struct device_node *np)
{
void __iomem *exi_io_base = NULL;
phys_addr_t paddr;
@@ -212,7 +213,7 @@ static void __iomem *ug_udbg_setup_exi_io_base(struct device_node *np)
/*
* Checks if a USB Gecko adapter is inserted in any memory card slot.
*/
-static void __iomem *ug_udbg_probe(void __iomem *exi_io_base)
+static void __iomem *__init ug_udbg_probe(void __iomem *exi_io_base)
{
int i;
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 67e48b0a164e..f4e654a9d4ff 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -13,13 +13,11 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
+#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <linux/memblock.h>
-#include <mm/mmu_decl.h>
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/time.h>
#include <asm/udbg.h>
@@ -49,19 +47,6 @@
static void __iomem *hw_ctrl;
static void __iomem *hw_gpio;
-static int __init page_aligned(unsigned long x)
-{
- return !(x & (PAGE_SIZE-1));
-}
-
-void __init wii_memory_fixups(void)
-{
- struct memblock_region *p = memblock.memory.regions;
-
- BUG_ON(memblock.memory.cnt != 2);
- BUG_ON(!page_aligned(p[0].base) || !page_aligned(p[1].base));
-}
-
static void __noreturn wii_spin(void)
{
local_irq_disable();
@@ -69,7 +54,7 @@ static void __noreturn wii_spin(void)
cpu_relax();
}
-static void __iomem *wii_ioremap_hw_regs(char *name, char *compatible)
+static void __iomem *__init wii_ioremap_hw_regs(char *name, char *compatible)
{
void __iomem *hw_regs = NULL;
struct device_node *np;
@@ -172,19 +157,6 @@ static void wii_shutdown(void)
flipper_quiesce();
}
-define_machine(wii) {
- .name = "wii",
- .probe = wii_probe,
- .setup_arch = wii_setup_arch,
- .restart = wii_restart,
- .halt = wii_halt,
- .init_IRQ = wii_pic_probe,
- .get_irq = flipper_pic_get_irq,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
- .machine_shutdown = wii_shutdown,
-};
-
static const struct of_device_id wii_of_bus[] = {
{ .compatible = "nintendo,hollywood", },
{ },
@@ -200,3 +172,15 @@ static int __init wii_device_probe(void)
}
device_initcall(wii_device_probe);
+define_machine(wii) {
+ .name = "wii",
+ .probe = wii_probe,
+ .setup_arch = wii_setup_arch,
+ .restart = wii_restart,
+ .halt = wii_halt,
+ .init_IRQ = wii_pic_probe,
+ .get_irq = flipper_pic_get_irq,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+ .machine_shutdown = wii_shutdown,
+};
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c
index 044a20c1fbde..84afae7a2561 100644
--- a/arch/powerpc/platforms/fsl_uli1575.c
+++ b/arch/powerpc/platforms/fsl_uli1575.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
+#include <linux/of_irq.h>
#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/maple/Kconfig b/arch/powerpc/platforms/maple/Kconfig
index 86ae210bee9a..4c058cc57c90 100644
--- a/arch/powerpc/platforms/maple/Kconfig
+++ b/arch/powerpc/platforms/maple/Kconfig
@@ -9,7 +9,8 @@ config PPC_MAPLE
select GENERIC_TBSYNC
select PPC_UDBG_16550
select PPC_970_NAP
- select PPC_NATIVE
+ select PPC_64S_HASH_MMU
+ select PPC_HASH_MMU_NATIVE
select PPC_RTAS
select MMIO_NVRAM
select ATA_NONSTANDARD if ATA
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index c86a66d5e998..b911b31717cc 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -12,10 +12,10 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/of_irq.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/iommu.h>
@@ -34,7 +34,7 @@ static struct pci_controller *u3_agp, *u3_ht, *u4_pcie;
static int __init fixup_one_level_bus_range(struct device_node *node, int higher)
{
- for (; node != 0;node = node->sibling) {
+ for (; node; node = node->sibling) {
const int *bus_range;
const unsigned int *class_code;
int len;
@@ -536,6 +536,9 @@ static int __init maple_add_bridge(struct device_node *dev)
/* Check for legacy IOs */
isa_bridge_find_early(hose);
+ /* create pci_dn's for DT nodes under this PHB */
+ pci_devs_phb_init_dynamic(hose);
+
return 0;
}
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 6f019df37916..c26c379e1cc8 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -36,13 +36,12 @@
#include <linux/serial.h>
#include <linux/smp.h>
#include <linux/bitops.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/memblock.h>
#include <asm/processor.h>
#include <asm/sections.h>
-#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
@@ -180,9 +179,6 @@ static void __init maple_setup_arch(void)
#ifdef CONFIG_SMP
smp_ops = &maple_smp_ops;
#endif
- /* Lookup PCI hosts */
- maple_pci_init();
-
maple_use_rtas_reboot_and_halt_if_present();
printk(KERN_DEBUG "Using native/NAP idle loop\n");
@@ -291,23 +287,6 @@ static int __init maple_probe(void)
return 1;
}
-define_machine(maple) {
- .name = "Maple",
- .probe = maple_probe,
- .setup_arch = maple_setup_arch,
- .init_IRQ = maple_init_IRQ,
- .pci_irq_fixup = maple_pci_irq_fixup,
- .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
- .restart = maple_restart,
- .halt = maple_halt,
- .get_boot_time = maple_get_boot_time,
- .set_rtc_time = maple_set_rtc_time,
- .get_rtc_time = maple_get_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = maple_progress,
- .power_save = power4_idle,
-};
-
#ifdef CONFIG_EDAC
/*
* Register a platform device for CPC925 memory controller on
@@ -364,3 +343,21 @@ static int __init maple_cpc925_edac_setup(void)
}
machine_device_initcall(maple, maple_cpc925_edac_setup);
#endif
+
+define_machine(maple) {
+ .name = "Maple",
+ .probe = maple_probe,
+ .setup_arch = maple_setup_arch,
+ .discover_phbs = maple_pci_init,
+ .init_IRQ = maple_init_IRQ,
+ .pci_irq_fixup = maple_pci_irq_fixup,
+ .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
+ .restart = maple_restart,
+ .halt = maple_halt,
+ .get_boot_time = maple_get_boot_time,
+ .set_rtc_time = maple_set_rtc_time,
+ .get_rtc_time = maple_get_rtc_time,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = maple_progress,
+ .power_save = power4_idle,
+};
diff --git a/arch/powerpc/platforms/maple/time.c b/arch/powerpc/platforms/maple/time.c
index 701c4e098fe9..91606411d2e0 100644
--- a/arch/powerpc/platforms/maple/time.c
+++ b/arch/powerpc/platforms/maple/time.c
@@ -19,11 +19,10 @@
#include <linux/interrupt.h>
#include <linux/mc146818rtc.h>
#include <linux/bcd.h>
+#include <linux/of_address.h>
#include <asm/sections.h>
-#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/time.h>
@@ -154,6 +153,7 @@ time64_t __init maple_get_boot_time(void)
maple_rtc_addr);
}
bail:
+ of_node_put(rtcs);
if (maple_rtc_addr == 0) {
maple_rtc_addr = RTC_PORT(0); /* legacy address */
printk(KERN_INFO "Maple: No device node for RTC, assuming "
diff --git a/arch/powerpc/platforms/microwatt/Kconfig b/arch/powerpc/platforms/microwatt/Kconfig
new file mode 100644
index 000000000000..6af443a1db99
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/Kconfig
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+config PPC_MICROWATT
+ depends on PPC_BOOK3S_64 && !SMP
+ bool "Microwatt SoC platform"
+ select PPC_XICS
+ select PPC_ICS_NATIVE
+ select PPC_ICP_NATIVE
+ select PPC_UDBG_16550
+ help
+ This option enables support for FPGA-based Microwatt implementations.
+
diff --git a/arch/powerpc/platforms/microwatt/Makefile b/arch/powerpc/platforms/microwatt/Makefile
new file mode 100644
index 000000000000..116d6d3ad3f0
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/Makefile
@@ -0,0 +1 @@
+obj-y += setup.o rng.o
diff --git a/arch/powerpc/platforms/microwatt/microwatt.h b/arch/powerpc/platforms/microwatt/microwatt.h
new file mode 100644
index 000000000000..335417e95e66
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/microwatt.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MICROWATT_H
+#define _MICROWATT_H
+
+void microwatt_rng_init(void);
+
+#endif /* _MICROWATT_H */
diff --git a/arch/powerpc/platforms/microwatt/rng.c b/arch/powerpc/platforms/microwatt/rng.c
new file mode 100644
index 000000000000..8ece87d005c8
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/rng.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Derived from arch/powerpc/platforms/powernv/rng.c, which is:
+ * Copyright 2013, Michael Ellerman, IBM Corporation.
+ */
+
+#define pr_fmt(fmt) "microwatt-rng: " fmt
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <asm/archrandom.h>
+#include <asm/cputable.h>
+#include <asm/machdep.h>
+#include "microwatt.h"
+
+#define DARN_ERR 0xFFFFFFFFFFFFFFFFul
+
+static int microwatt_get_random_darn(unsigned long *v)
+{
+ unsigned long val;
+
+ /* Using DARN with L=1 - 64-bit conditioned random number */
+ asm volatile(PPC_DARN(%0, 1) : "=r"(val));
+
+ if (val == DARN_ERR)
+ return 0;
+
+ *v = val;
+
+ return 1;
+}
+
+void __init microwatt_rng_init(void)
+{
+ unsigned long val;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (microwatt_get_random_darn(&val)) {
+ ppc_md.get_random_seed = microwatt_get_random_darn;
+ return;
+ }
+ }
+}
diff --git a/arch/powerpc/platforms/microwatt/setup.c b/arch/powerpc/platforms/microwatt/setup.c
new file mode 100644
index 000000000000..6b32539395a4
--- /dev/null
+++ b/arch/powerpc/platforms/microwatt/setup.c
@@ -0,0 +1,49 @@
+/*
+ * Microwatt FPGA-based SoC platform setup code.
+ *
+ * Copyright 2020 Paul Mackerras (paulus@ozlabs.org), IBM Corp.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/xics.h>
+#include <asm/udbg.h>
+
+#include "microwatt.h"
+
+static void __init microwatt_init_IRQ(void)
+{
+ xics_init();
+}
+
+static int __init microwatt_probe(void)
+{
+ return of_machine_is_compatible("microwatt-soc");
+}
+
+static int __init microwatt_populate(void)
+{
+ return of_platform_default_populate(NULL, NULL, NULL);
+}
+machine_arch_initcall(microwatt, microwatt_populate);
+
+static void __init microwatt_setup_arch(void)
+{
+ microwatt_rng_init();
+}
+
+define_machine(microwatt) {
+ .name = "microwatt",
+ .probe = microwatt_probe,
+ .init_IRQ = microwatt_init_IRQ,
+ .setup_arch = microwatt_setup_arch,
+ .progress = udbg_progress,
+ .calibrate_decr = generic_calibrate_decr,
+};
diff --git a/arch/powerpc/platforms/pasemi/Kconfig b/arch/powerpc/platforms/pasemi/Kconfig
index c52731a7773f..85ae18ddd911 100644
--- a/arch/powerpc/platforms/pasemi/Kconfig
+++ b/arch/powerpc/platforms/pasemi/Kconfig
@@ -5,7 +5,8 @@ config PPC_PASEMI
select MPIC
select FORCE_PCI
select PPC_UDBG_16550
- select PPC_NATIVE
+ select PPC_64S_HASH_MMU
+ select PPC_HASH_MMU_NATIVE
select MPIC_BROKEN_REGREAD
help
This option enables support for PA Semi's PWRficient line
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 270fa3c0d372..1be1f18f6f09 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -10,6 +10,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/sched.h>
#include <asm/pasemi_dma.h>
@@ -375,7 +377,7 @@ int pasemi_dma_alloc_flag(void)
int bit;
retry:
- bit = find_next_bit(flags_free, MAX_FLAGS, 0);
+ bit = find_first_bit(flags_free, MAX_FLAGS);
if (bit >= MAX_FLAGS)
return -ENOSPC;
if (!test_and_clear_bit(bit, flags_free))
@@ -440,7 +442,7 @@ int pasemi_dma_alloc_fun(void)
int bit;
retry:
- bit = find_next_bit(fun_free, MAX_FLAGS, 0);
+ bit = find_first_bit(fun_free, MAX_FLAGS);
if (bit >= MAX_FLAGS)
return -ENOSPC;
if (!test_and_clear_bit(bit, fun_free))
diff --git a/arch/powerpc/platforms/pasemi/idle.c b/arch/powerpc/platforms/pasemi/idle.c
index 1c954c90b0f4..6087c70ed2ef 100644
--- a/arch/powerpc/platforms/pasemi/idle.c
+++ b/arch/powerpc/platforms/pasemi/idle.c
@@ -37,11 +37,12 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
*/
if (regs->msr & SRR1_WAKEMASK)
- regs->nip = regs->link;
+ regs_set_return_ip(regs, regs->link);
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEDEC:
set_dec(1);
+ break;
case SRR1_WAKEEE:
/*
* Handle these when interrupts get re-enabled and we take
@@ -58,7 +59,7 @@ static int pasemi_system_reset_exception(struct pt_regs *regs)
restore_astate(hard_smp_processor_id());
/* everything handled */
- regs->msr |= MSR_RI;
+ regs_set_recoverable(regs);
return 1;
}
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index b500a6e47e6b..0a38663d44ed 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -11,6 +11,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
+#include <linux/of.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
@@ -146,7 +147,9 @@ static void iommu_table_iobmap_setup(void)
*/
iommu_table_iobmap.it_blocksize = 4;
iommu_table_iobmap.it_ops = &iommu_table_iobmap_ops;
- iommu_init_table(&iommu_table_iobmap, 0, 0, 0);
+ if (!iommu_init_table(&iommu_table_iobmap, 0, 0, 0))
+ panic("Failed to initialize iommu table");
+
pr_debug(" <- %s\n", __func__);
}
diff --git a/arch/powerpc/platforms/pasemi/misc.c b/arch/powerpc/platforms/pasemi/misc.c
index 1cd4ca14b08d..9e9a7e46288a 100644
--- a/arch/powerpc/platforms/pasemi/misc.c
+++ b/arch/powerpc/platforms/pasemi/misc.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/i2c.h>
#ifdef CONFIG_I2C_BOARDINFO
@@ -35,8 +36,7 @@ static int __init find_i2c_driver(struct device_node *node,
for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) {
if (!of_device_is_compatible(node, i2c_devices[i].of_device))
continue;
- if (strlcpy(info->type, i2c_devices[i].i2c_type,
- I2C_NAME_SIZE) >= I2C_NAME_SIZE)
+ if (strscpy(info->type, i2c_devices[i].i2c_type, I2C_NAME_SIZE) < 0)
return -ENOMEM;
return 0;
}
@@ -56,8 +56,7 @@ static int __init pasemi_register_i2c_devices(void)
if (!adap_node)
continue;
- node = NULL;
- while ((node = of_get_next_child(adap_node, node))) {
+ for_each_child_of_node(adap_node, node) {
struct i2c_board_info info = {};
const u32 *addr;
int len;
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
index d38944a1e258..dc1846660005 100644
--- a/arch/powerpc/platforms/pasemi/msi.c
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -9,9 +9,9 @@
*/
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
@@ -62,17 +62,12 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->irq)
- continue;
-
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
}
-
- return;
}
static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -90,7 +85,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
msg.address_hi = 0;
msg.address_lo = PASEMI_MSI_ADDR;
- for_each_pci_msi_entry(entry, pdev) {
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
/* Allocate 16 interrupts for now, since that's the grouping for
* affinity. This can be changed later if it turns out 32 is too
* few MSIs for someone, but restrictions will apply to how the
@@ -135,7 +130,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return 0;
}
-int mpic_pasemi_msi_init(struct mpic *mpic)
+int __init mpic_pasemi_msi_init(struct mpic *mpic)
{
int rc;
struct pci_controller *phb;
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index 70b56048ed1b..3f277a200fd8 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -7,7 +7,7 @@ extern void pas_pci_init(void);
extern void pas_pci_irq_fixup(struct pci_dev *dev);
extern void pas_pci_dma_dev_setup(struct pci_dev *dev);
-extern void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset);
+void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset);
extern void __init pasemi_map_registers(void);
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index 8779b107d872..f27d31414737 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/pci.h>
#include <asm/pci-bridge.h>
@@ -269,25 +270,19 @@ static int __init pas_add_bridge(struct device_node *dev)
void __init pas_pci_init(void)
{
- struct device_node *np, *root;
+ struct device_node *np;
int res;
- root = of_find_node_by_path("/");
- if (!root) {
- pr_crit("pas_pci_init: can't find root of device tree\n");
- return;
- }
-
pci_set_flags(PCI_SCAN_ALL_PCIE_DEVS);
- np = of_find_compatible_node(root, NULL, "pasemi,rootbus");
+ np = of_find_compatible_node(of_root, NULL, "pasemi,rootbus");
if (np) {
res = pas_add_bridge(np);
of_node_put(np);
}
}
-void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
+void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index b612474f8f8e..2aef49e04dd4 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -18,8 +18,8 @@
#include <linux/pci.h>
#include <linux/of_platform.h>
#include <linux/gfp.h>
+#include <linux/irqdomain.h>
-#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/machdep.h>
#include <asm/i8259.h>
@@ -144,8 +144,6 @@ static void __init pas_setup_arch(void)
/* Setup SMP callback */
smp_ops = &pas_smp_ops;
#endif
- /* Lookup PCI hosts */
- pas_pci_init();
/* Remap SDC register for doing reset */
/* XXXOJN This should maybe come out of the device tree */
@@ -214,7 +212,7 @@ static void sb600_8259_cascade(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
-static void nemo_init_IRQ(struct mpic *mpic)
+static void __init nemo_init_IRQ(struct mpic *mpic)
{
struct device_node *np;
int gpio_virq;
@@ -446,6 +444,7 @@ define_machine(pasemi) {
.name = "PA Semi PWRficient",
.probe = pas_probe,
.setup_arch = pas_setup_arch,
+ .discover_phbs = pas_pci_init,
.init_IRQ = pas_init_IRQ,
.get_irq = mpic_get_irq,
.restart = pas_restart,
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index c02d8c503b29..130707ec9f99 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -6,7 +6,8 @@ config PPC_PMAC
select FORCE_PCI
select PPC_INDIRECT_PCI if PPC32
select PPC_MPC106 if PPC32
- select PPC_NATIVE
+ select PPC_64S_HASH_MMU if PPC64
+ select PPC_HASH_MMU_NATIVE
select ZONE_DMA if PPC32
default y
@@ -24,6 +25,7 @@ config PPC_PMAC32_PSURGE
bool "Support for powersurge upgrade cards" if EXPERT
depends on SMP && PPC32 && PPC_PMAC
select PPC_SMP_MUXED_IPI
+ select IRQ_DOMAIN_NOMAP
default y
help
The powersurge cpu boards can be used in the generation
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index f4247ade71ca..cf85f0662d0d 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS_bootx_init.o += -fPIC
-CFLAGS_bootx_init.o += $(call cc-option, -fno-stack-protector)
+CFLAGS_bootx_init.o += -fno-stack-protector
KASAN_SANITIZE_bootx_init.o := n
diff --git a/arch/powerpc/platforms/powermac/backlight.c b/arch/powerpc/platforms/powermac/backlight.c
index 32224cb489d7..aeb79a8b3e10 100644
--- a/arch/powerpc/platforms/powermac/backlight.c
+++ b/arch/powerpc/platforms/powermac/backlight.c
@@ -15,7 +15,6 @@
#include <linux/pmu.h>
#include <linux/atomic.h>
#include <linux/export.h>
-#include <asm/prom.h>
#include <asm/backlight.h>
#define OLD_BACKLIGHT_MAX 15
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index af309ee99114..72eb99aba40f 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
+#include <linux/of_fdt.h>
#include <generated/utsrelease.h>
#include <asm/sections.h>
#include <asm/prom.h>
@@ -108,7 +109,7 @@ static void * __init bootx_early_getprop(unsigned long base,
#define dt_push_token(token, mem) \
do { \
- *(mem) = _ALIGN_UP(*(mem),4); \
+ *(mem) = ALIGN(*(mem),4); \
*((u32 *)*(mem)) = token; \
*(mem) += 4; \
} while(0)
@@ -150,7 +151,7 @@ static void __init bootx_dt_add_prop(char *name, void *data, int size,
/* push property content */
if (size && data) {
memcpy((void *)*mem_end, data, size);
- *mem_end = _ALIGN_UP(*mem_end + size, 4);
+ *mem_end = ALIGN(*mem_end + size, 4);
}
}
@@ -243,7 +244,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base,
DBG(" detected display ! adding properties names !\n");
bootx_dt_add_string("linux,boot-display", mem_end);
bootx_dt_add_string("linux,opened", mem_end);
- strlcpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
+ strscpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
}
/* get and store all property names */
@@ -303,7 +304,7 @@ static void __init bootx_scan_dt_build_struct(unsigned long base,
*lp++ = *p;
}
*lp = 0;
- *mem_end = _ALIGN_UP((unsigned long)lp + 1, 4);
+ *mem_end = ALIGN((unsigned long)lp + 1, 4);
/* get and store all properties */
while (*ppp) {
@@ -356,11 +357,11 @@ static unsigned long __init bootx_flatten_dt(unsigned long start)
/* Start using memory after the big blob passed by BootX, get
* some space for the header
*/
- mem_start = mem_end = _ALIGN_UP(((unsigned long)bi) + start, 4);
+ mem_start = mem_end = ALIGN(((unsigned long)bi) + start, 4);
DBG("Boot params header at: %x\n", mem_start);
hdr = (struct boot_param_header *)mem_start;
mem_end += sizeof(struct boot_param_header);
- rsvmap = (u64 *)(_ALIGN_UP(mem_end, 8));
+ rsvmap = (u64 *)(ALIGN(mem_end, 8));
hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start;
mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64);
@@ -386,7 +387,7 @@ static unsigned long __init bootx_flatten_dt(unsigned long start)
hdr->dt_strings_size = bootx_dt_strend - bootx_dt_strbase;
/* Build structure */
- mem_end = _ALIGN(mem_end, 16);
+ mem_end = ALIGN(mem_end, 16);
DBG("Building device tree structure at: %x\n", mem_end);
hdr->off_dt_struct = mem_end - mem_start;
bootx_scan_dt_build_struct(base, 4, &mem_end);
@@ -404,7 +405,7 @@ static unsigned long __init bootx_flatten_dt(unsigned long start)
* also bump mem_reserve_cnt to cause further reservations to
* fail since it's too late.
*/
- mem_end = _ALIGN(mem_end, PAGE_SIZE);
+ mem_end = ALIGN(mem_end, PAGE_SIZE);
DBG("End of boot params: %x\n", mem_end);
rsvmap[0] = mem_start;
rsvmap[1] = mem_end;
@@ -433,7 +434,7 @@ static void __init btext_welcome(boot_infos_t *bi)
bootx_printf("\nframe buffer at : 0x%x", bi->dispDeviceBase);
bootx_printf(" (phys), 0x%x", bi->logicalDisplayBase);
bootx_printf(" (log)");
- bootx_printf("\nklimit : 0x%x",(unsigned long)klimit);
+ bootx_printf("\nklimit : 0x%x",(unsigned long)_end);
bootx_printf("\nboot_info at : 0x%x", bi);
__asm__ __volatile__ ("mfmsr %0" : "=r" (flags));
bootx_printf("\nMSR : 0x%x", flags);
diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S
index da69e0fcb4f1..b8ae56e9f414 100644
--- a/arch/powerpc/platforms/powermac/cache.S
+++ b/arch/powerpc/platforms/powermac/cache.S
@@ -48,7 +48,7 @@ flush_disable_75x:
/* Stop DST streams */
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
@@ -184,6 +184,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
mtlr r10
blr
+_ASM_NOKPROBE_SYMBOL(flush_disable_75x)
/* This code is for 745x processors */
flush_disable_745x:
@@ -196,7 +197,7 @@ flush_disable_745x:
isync
/* Stop prefetch streams */
- DSSALL
+ PPC_DSSALL
sync
/* Disable L2 prefetching */
@@ -351,4 +352,5 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
mtmsr r11 /* restore DR and EE */
isync
blr
+_ASM_NOKPROBE_SYMBOL(flush_disable_745x)
#endif /* CONFIG_PPC_BOOK3S_32 */
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 181caa3f6717..0382d20b5619 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -31,7 +31,6 @@
#include <asm/keylargo.h>
#include <asm/uninorth.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
#include <asm/dbdma.h>
@@ -1465,7 +1464,7 @@ static long g5_i2s_enable(struct device_node *node, long param, long value)
case 2:
if (macio->type == macio_shasta)
break;
- /* fall through */
+ fallthrough;
default:
return -ENODEV;
}
@@ -1530,7 +1529,7 @@ static long g5_reset_cpu(struct device_node *node, long param, long value)
* This takes the second CPU off the bus on dual CPU machines
* running UP
*/
-void g5_phy_disable_cpu1(void)
+void __init g5_phy_disable_cpu1(void)
{
if (uninorth_maj == 3)
UN_OUT(U3_API_PHY_CONFIG_1, 0);
@@ -2633,31 +2632,31 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ
if (!macio_chips[i].of_node)
break;
if (macio_chips[i].of_node == node)
- return;
+ goto out_put;
}
if (i >= MAX_MACIO_CHIPS) {
printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n");
printk(KERN_ERR "pmac_feature: %pOF skipped\n", node);
- return;
+ goto out_put;
}
addrp = of_get_pci_address(node, 0, &size, NULL);
if (addrp == NULL) {
printk(KERN_ERR "pmac_feature: %pOF: can't find base !\n",
node);
- return;
+ goto out_put;
}
addr = of_translate_address(node, addrp);
if (addr == 0) {
printk(KERN_ERR "pmac_feature: %pOF, can't translate base !\n",
node);
- return;
+ goto out_put;
}
base = ioremap(addr, (unsigned long)size);
if (!base) {
printk(KERN_ERR "pmac_feature: %pOF, can't map mac-io chip !\n",
node);
- return;
+ goto out_put;
}
if (type == macio_keylargo || type == macio_keylargo2) {
const u32 *did = of_get_property(node, "device-id", NULL);
@@ -2678,6 +2677,11 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ
macio_chips[i].rev = *revp;
printk(KERN_INFO "Found a %s mac-io controller, rev: %d, mapped at 0x%p\n",
macio_names[type], macio_chips[i].rev, macio_chips[i].base);
+
+ return;
+
+out_put:
+ of_node_put(node);
}
static int __init
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index bf4be4b53b44..40f3aa432fba 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -40,10 +40,10 @@
#include <linux/mutex.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+#include <linux/of_irq.h>
#include <asm/keylargo.h>
#include <asm/uninorth.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smu.h>
#include <asm/pmac_pfunc.h>
@@ -582,6 +582,7 @@ static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
bus->close = kw_i2c_close;
bus->xfer = kw_i2c_xfer;
mutex_init(&bus->mutex);
+ lockdep_register_key(&bus->lock_key);
lockdep_set_class(&bus->mutex, &bus->lock_key);
if (controller == busnode)
bus->flags = pmac_i2c_multibus;
@@ -626,11 +627,11 @@ static void __init kw_i2c_probe(void)
if (parent == NULL)
continue;
chans = parent->name[0] == 'u' ? 2 : 1;
+ of_node_put(parent);
for (i = 0; i < chans; i++)
kw_i2c_add(host, np, np, i);
} else {
- for (child = NULL;
- (child = of_get_next_child(np, child)) != NULL;) {
+ for_each_child_of_node(np, child) {
const u32 *reg = of_get_property(child,
"reg", NULL);
if (reg == NULL)
@@ -811,6 +812,7 @@ static void __init pmu_i2c_probe(void)
bus->hostdata = bus + 1;
bus->xfer = pmu_i2c_xfer;
mutex_init(&bus->mutex);
+ lockdep_register_key(&bus->lock_key);
lockdep_set_class(&bus->mutex, &bus->lock_key);
bus->flags = pmac_i2c_multibus;
list_add(&bus->link, &pmac_i2c_busses);
@@ -934,6 +936,7 @@ static void __init smu_i2c_probe(void)
bus->hostdata = bus + 1;
bus->xfer = smu_i2c_xfer;
mutex_init(&bus->mutex);
+ lockdep_register_key(&bus->lock_key);
lockdep_set_class(&bus->mutex, &bus->lock_key);
bus->flags = 0;
list_add(&bus->link, &pmac_i2c_busses);
@@ -1193,8 +1196,7 @@ static void pmac_i2c_devscan(void (*callback)(struct device_node *dev,
* platform function instance
*/
list_for_each_entry(bus, &pmac_i2c_busses, link) {
- for (np = NULL;
- (np = of_get_next_child(bus->busnode, np)) != NULL;) {
+ for_each_child_of_node(bus->busnode, np) {
struct whitelist_ent *p;
/* If multibus, check if device is on that bus */
if (bus->flags & pmac_i2c_multibus)
@@ -1471,7 +1473,7 @@ int __init pmac_i2c_init(void)
smu_i2c_probe();
#endif
- /* Now add plaform functions for some known devices */
+ /* Now add platform functions for some known devices */
pmac_i2c_devscan(pmac_i2c_dev_create);
return 0;
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index dc7a5bae8f1c..fe2e0249cbc2 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -17,9 +17,9 @@
#include <linux/memblock.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
+#include <linux/of_address.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/nvram.h>
@@ -55,7 +55,7 @@ struct chrp_header {
u8 cksum;
u16 len;
char name[12];
- u8 data[0];
+ u8 data[];
};
struct core99_header {
@@ -71,7 +71,7 @@ struct core99_header {
static int nvram_naddrs;
static volatile unsigned char __iomem *nvram_data;
static int is_core_99;
-static int core99_bank = 0;
+static int core99_bank;
static int nvram_partitions[3];
// XXX Turn that into a sem
static DEFINE_RAW_SPINLOCK(nv_lock);
@@ -258,7 +258,7 @@ static u32 core99_calc_adler(u8 *buffer)
return (high << 16) | low;
}
-static u32 core99_check(u8* datas)
+static u32 __init core99_check(u8 *datas)
{
struct core99_header* hdr99 = (struct core99_header*)datas;
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index e35eaa9cf938..d71359b5331c 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -12,11 +12,12 @@
#include <linux/string.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
@@ -850,6 +851,10 @@ static int __init pmac_add_bridge(struct device_node *dev)
/* Fixup "bus-range" OF property */
fixup_bus_range(dev);
+ /* create pci_dn's for DT nodes under this PHB */
+ if (IS_ENABLED(CONFIG_PPC64))
+ pci_devs_phb_init_dynamic(hose);
+
return 0;
}
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index 62311e84a423..085e0ad20eba 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -93,7 +93,7 @@ static struct pmf_handlers macio_gpio_handlers = {
.delay = macio_do_delay,
};
-static void macio_gpio_init_one(struct macio_chip *macio)
+static void __init macio_gpio_init_one(struct macio_chip *macio)
{
struct device_node *gparent, *gp;
@@ -114,7 +114,7 @@ static void macio_gpio_init_one(struct macio_chip *macio)
* Ok, got one, we dont need anything special to track them down, so
* we just create them all
*/
- for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) {
+ for_each_child_of_node(gparent, gp) {
const u32 *reg = of_get_property(gp, "reg", NULL);
unsigned long offset;
if (reg == NULL)
@@ -133,9 +133,11 @@ static void macio_gpio_init_one(struct macio_chip *macio)
macio->of_node);
/* And now we run all the init ones */
- for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;)
+ for_each_child_of_node(gparent, gp)
pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL);
+ of_node_put(gparent);
+
/* Note: We do not at this point implement the "at sleep" or "at wake"
* functions. I yet to find any for GPIOs anyway
*/
@@ -265,7 +267,7 @@ static struct pmf_handlers macio_mmio_handlers = {
.delay = macio_do_delay,
};
-static void macio_mmio_init_one(struct macio_chip *macio)
+static void __init macio_mmio_init_one(struct macio_chip *macio)
{
DBG("Installing MMIO functions for macio %pOF\n",
macio->of_node);
@@ -294,7 +296,7 @@ static struct pmf_handlers unin_mmio_handlers = {
.delay = macio_do_delay,
};
-static void uninorth_install_pfunc(void)
+static void __init uninorth_install_pfunc(void)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 94df0a91b46f..22741ddfd5b2 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -12,8 +12,8 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
-#include <asm/prom.h>
#include <asm/pmac_pfunc.h>
/* Debug */
@@ -685,7 +685,7 @@ static int pmf_add_functions(struct pmf_device *dev, void *driverdata)
const int plen = strlen(PP_PREFIX);
int count = 0;
- for (pp = dev->node->properties; pp != 0; pp = pp->next) {
+ for_each_property_of_node(dev->node, pp) {
const char *name;
if (strncmp(pp->name, PP_PREFIX, plen) != 0)
continue;
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 2e969073473d..8c8d8e0a7d13 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -18,12 +18,15 @@
#include <linux/interrupt.h>
#include <linux/syscore_ops.h>
#include <linux/adb.h>
+#include <linux/minmax.h>
#include <linux/pmu.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/sections.h>
#include <asm/io.h>
#include <asm/smp.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/time.h>
#include <asm/pmac_feature.h>
@@ -250,20 +253,6 @@ static unsigned int pmac_pic_get_irq(void)
return irq_linear_revmap(pmac_pic_host, irq);
}
-#ifdef CONFIG_XMON
-static struct irqaction xmon_action = {
- .handler = xmon_irq,
- .flags = IRQF_NO_THREAD,
- .name = "NMI - XMON"
-};
-#endif
-
-static struct irqaction gatwick_cascade_action = {
- .handler = gatwick_action,
- .flags = IRQF_NO_THREAD,
- .name = "cascade",
-};
-
static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node,
enum irq_domain_bus_token bus_token)
{
@@ -325,11 +314,8 @@ static void __init pmac_pic_probe_oldstyle(void)
/* Check ordering of master & slave */
if (of_device_is_compatible(master, "gatwick")) {
- struct device_node *tmp;
BUG_ON(slave == NULL);
- tmp = master;
- master = slave;
- slave = tmp;
+ swap(master, slave);
}
/* We found a slave */
@@ -384,16 +370,21 @@ static void __init pmac_pic_probe_oldstyle(void)
out_le32(&pmac_irq_hw[i]->enable, 0);
/* Hookup cascade irq */
- if (slave && pmac_irq_cascade)
- setup_irq(pmac_irq_cascade, &gatwick_cascade_action);
+ if (slave && pmac_irq_cascade) {
+ if (request_irq(pmac_irq_cascade, gatwick_action,
+ IRQF_NO_THREAD, "cascade", NULL))
+ pr_err("Failed to register cascade interrupt\n");
+ }
printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs);
#ifdef CONFIG_XMON
- setup_irq(irq_create_mapping(NULL, 20), &xmon_action);
+ i = irq_create_mapping(NULL, 20);
+ if (request_irq(i, xmon_irq, IRQF_NO_THREAD, "NMI - XMON", NULL))
+ pr_err("Failed to register NMI-XMON interrupt\n");
#endif
}
-int of_irq_parse_oldworld(struct device_node *device, int index,
+int of_irq_parse_oldworld(const struct device_node *device, int index,
struct of_phandle_args *out_irq)
{
const u32 *ints = NULL;
@@ -441,7 +432,9 @@ static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic)
nmi_irq = irq_of_parse_and_map(pswitch, 0);
if (nmi_irq) {
mpic_irq_set_priority(nmi_irq, 9);
- setup_irq(nmi_irq, &xmon_action);
+ if (request_irq(nmi_irq, xmon_irq, IRQF_NO_THREAD,
+ "NMI - XMON", NULL))
+ pr_err("Failed to register NMI-XMON interrupt\n");
}
of_node_put(pswitch);
}
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 16a52afdb76e..1b696f352640 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -5,6 +5,8 @@
#include <linux/pci.h>
#include <linux/irq.h>
+#include <asm/pmac_feature.h>
+
/*
* Declaration for the various functions exported by the
* pmac_* files. Mostly for use by pmac_setup
@@ -14,6 +16,8 @@ struct rtc_time;
extern int pmac_newworld;
+void g5_phy_disable_cpu1(void);
+
extern long pmac_time_init(void);
extern time64_t pmac_get_boot_time(void);
extern void pmac_get_rtc_time(struct rtc_time *);
@@ -27,14 +31,13 @@ extern void pmac_nvram_update(void);
extern unsigned char pmac_nvram_read_byte(int addr);
extern void pmac_nvram_write_byte(int addr, unsigned char val);
extern void pmac_pcibios_after_init(void);
-extern int of_show_percpuinfo(struct seq_file *m, int i);
extern void pmac_setup_pci_dma(void);
extern void pmac_check_ht_link(void);
extern void pmac_setup_smp(void);
extern int psurge_secondary_virq;
-extern void low_cpu_die(void) __attribute__((noreturn));
+extern void low_cpu_offline_self(void) __attribute__((noreturn));
extern int pmac_nvram_init(void);
extern void pmac_pic_init(void);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index c6d5333729ed..04daa7f0a03c 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -50,8 +50,6 @@
#include <asm/reg.h>
#include <asm/sections.h>
-#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/pci-bridge.h>
#include <asm/ohare.h>
@@ -80,14 +78,8 @@ int pmac_newworld;
static int current_root_goodness = -1;
-extern struct machdep_calls pmac_md;
-
#define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */
-#ifdef CONFIG_PPC64
-int sccdbg;
-#endif
-
sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN;
EXPORT_SYMBOL(sys_ctrler);
@@ -169,7 +161,7 @@ static void pmac_show_cpuinfo(struct seq_file *m)
}
#ifndef CONFIG_ADB_CUDA
-int find_via_cuda(void)
+int __init find_via_cuda(void)
{
struct device_node *dn = of_find_node_by_name(NULL, "via-cuda");
@@ -183,7 +175,7 @@ int find_via_cuda(void)
#endif
#ifndef CONFIG_ADB_PMU
-int find_via_pmu(void)
+int __init find_via_pmu(void)
{
struct device_node *dn = of_find_node_by_name(NULL, "via-pmu");
@@ -197,7 +189,7 @@ int find_via_pmu(void)
#endif
#ifndef CONFIG_PMAC_SMU
-int smu_init(void)
+int __init smu_init(void)
{
/* should check and warn if SMU is present */
return 0;
@@ -285,7 +277,7 @@ static void __init pmac_setup_arch(void)
/* 604, G3, G4 etc. */
loops_per_jiffy = *fp / HZ;
else
- /* 601, 603, etc. */
+ /* 603, etc. */
loops_per_jiffy = *fp / (2 * HZ);
of_node_put(cpu);
break;
@@ -299,9 +291,6 @@ static void __init pmac_setup_arch(void)
of_node_put(ic);
}
- /* Lookup PCI hosts */
- pmac_pci_init();
-
#ifdef CONFIG_PPC32
ohare_init();
l2cr_init();
@@ -331,13 +320,6 @@ static void __init pmac_setup_arch(void)
#endif /* CONFIG_ADB */
}
-#ifdef CONFIG_SCSI
-void note_scsi_host(struct device_node *node, void *host)
-{
-}
-EXPORT_SYMBOL(note_scsi_host);
-#endif
-
static int initializing = 1;
static int pmac_late_init(void)
@@ -586,7 +568,6 @@ static int __init pmac_probe(void)
#ifdef CONFIG_PPC32
/* isa_io_base gets set in pmac_pci_init */
- ISA_DMA_THRESHOLD = ~0L;
DMA_MODE_READ = 1;
DMA_MODE_WRITE = 2;
#endif /* CONFIG_PPC32 */
@@ -602,6 +583,7 @@ define_machine(powermac) {
.name = "PowerMac",
.probe = pmac_probe,
.setup_arch = pmac_setup_arch,
+ .discover_phbs = pmac_pci_init,
.show_cpuinfo = pmac_show_cpuinfo,
.init_IRQ = pmac_pic_init,
.get_irq = NULL, /* changed later */
diff --git a/arch/powerpc/platforms/powermac/sleep.S b/arch/powerpc/platforms/powermac/sleep.S
index bd6085b470b7..d497a60003d2 100644
--- a/arch/powerpc/platforms/powermac/sleep.S
+++ b/arch/powerpc/platforms/powermac/sleep.S
@@ -44,7 +44,8 @@
#define SL_TB 0xa0
#define SL_R2 0xa8
#define SL_CR 0xac
-#define SL_R12 0xb0 /* r12 to r31 */
+#define SL_LR 0xb0
+#define SL_R12 0xb4 /* r12 to r31 */
#define SL_SIZE (SL_R12 + 80)
.section .text
@@ -63,105 +64,107 @@ _GLOBAL(low_sleep_handler)
blr
#else
mflr r0
- stw r0,4(r1)
- stwu r1,-SL_SIZE(r1)
+ lis r11,sleep_storage@ha
+ addi r11,r11,sleep_storage@l
+ stw r0,SL_LR(r11)
mfcr r0
- stw r0,SL_CR(r1)
- stw r2,SL_R2(r1)
- stmw r12,SL_R12(r1)
+ stw r0,SL_CR(r11)
+ stw r1,SL_SP(r11)
+ stw r2,SL_R2(r11)
+ stmw r12,SL_R12(r11)
/* Save MSR & SDR1 */
mfmsr r4
- stw r4,SL_MSR(r1)
+ stw r4,SL_MSR(r11)
mfsdr1 r4
- stw r4,SL_SDR1(r1)
+ stw r4,SL_SDR1(r11)
/* Get a stable timebase and save it */
1: mftbu r4
- stw r4,SL_TB(r1)
+ stw r4,SL_TB(r11)
mftb r5
- stw r5,SL_TB+4(r1)
+ stw r5,SL_TB+4(r11)
mftbu r3
cmpw r3,r4
bne 1b
/* Save SPRGs */
mfsprg r4,0
- stw r4,SL_SPRG0(r1)
+ stw r4,SL_SPRG0(r11)
mfsprg r4,1
- stw r4,SL_SPRG0+4(r1)
+ stw r4,SL_SPRG0+4(r11)
mfsprg r4,2
- stw r4,SL_SPRG0+8(r1)
+ stw r4,SL_SPRG0+8(r11)
mfsprg r4,3
- stw r4,SL_SPRG0+12(r1)
+ stw r4,SL_SPRG0+12(r11)
/* Save BATs */
mfdbatu r4,0
- stw r4,SL_DBAT0(r1)
+ stw r4,SL_DBAT0(r11)
mfdbatl r4,0
- stw r4,SL_DBAT0+4(r1)
+ stw r4,SL_DBAT0+4(r11)
mfdbatu r4,1
- stw r4,SL_DBAT1(r1)
+ stw r4,SL_DBAT1(r11)
mfdbatl r4,1
- stw r4,SL_DBAT1+4(r1)
+ stw r4,SL_DBAT1+4(r11)
mfdbatu r4,2
- stw r4,SL_DBAT2(r1)
+ stw r4,SL_DBAT2(r11)
mfdbatl r4,2
- stw r4,SL_DBAT2+4(r1)
+ stw r4,SL_DBAT2+4(r11)
mfdbatu r4,3
- stw r4,SL_DBAT3(r1)
+ stw r4,SL_DBAT3(r11)
mfdbatl r4,3
- stw r4,SL_DBAT3+4(r1)
+ stw r4,SL_DBAT3+4(r11)
mfibatu r4,0
- stw r4,SL_IBAT0(r1)
+ stw r4,SL_IBAT0(r11)
mfibatl r4,0
- stw r4,SL_IBAT0+4(r1)
+ stw r4,SL_IBAT0+4(r11)
mfibatu r4,1
- stw r4,SL_IBAT1(r1)
+ stw r4,SL_IBAT1(r11)
mfibatl r4,1
- stw r4,SL_IBAT1+4(r1)
+ stw r4,SL_IBAT1+4(r11)
mfibatu r4,2
- stw r4,SL_IBAT2(r1)
+ stw r4,SL_IBAT2(r11)
mfibatl r4,2
- stw r4,SL_IBAT2+4(r1)
+ stw r4,SL_IBAT2+4(r11)
mfibatu r4,3
- stw r4,SL_IBAT3(r1)
+ stw r4,SL_IBAT3(r11)
mfibatl r4,3
- stw r4,SL_IBAT3+4(r1)
+ stw r4,SL_IBAT3+4(r11)
BEGIN_MMU_FTR_SECTION
mfspr r4,SPRN_DBAT4U
- stw r4,SL_DBAT4(r1)
+ stw r4,SL_DBAT4(r11)
mfspr r4,SPRN_DBAT4L
- stw r4,SL_DBAT4+4(r1)
+ stw r4,SL_DBAT4+4(r11)
mfspr r4,SPRN_DBAT5U
- stw r4,SL_DBAT5(r1)
+ stw r4,SL_DBAT5(r11)
mfspr r4,SPRN_DBAT5L
- stw r4,SL_DBAT5+4(r1)
+ stw r4,SL_DBAT5+4(r11)
mfspr r4,SPRN_DBAT6U
- stw r4,SL_DBAT6(r1)
+ stw r4,SL_DBAT6(r11)
mfspr r4,SPRN_DBAT6L
- stw r4,SL_DBAT6+4(r1)
+ stw r4,SL_DBAT6+4(r11)
mfspr r4,SPRN_DBAT7U
- stw r4,SL_DBAT7(r1)
+ stw r4,SL_DBAT7(r11)
mfspr r4,SPRN_DBAT7L
- stw r4,SL_DBAT7+4(r1)
+ stw r4,SL_DBAT7+4(r11)
mfspr r4,SPRN_IBAT4U
- stw r4,SL_IBAT4(r1)
+ stw r4,SL_IBAT4(r11)
mfspr r4,SPRN_IBAT4L
- stw r4,SL_IBAT4+4(r1)
+ stw r4,SL_IBAT4+4(r11)
mfspr r4,SPRN_IBAT5U
- stw r4,SL_IBAT5(r1)
+ stw r4,SL_IBAT5(r11)
mfspr r4,SPRN_IBAT5L
- stw r4,SL_IBAT5+4(r1)
+ stw r4,SL_IBAT5+4(r11)
mfspr r4,SPRN_IBAT6U
- stw r4,SL_IBAT6(r1)
+ stw r4,SL_IBAT6(r11)
mfspr r4,SPRN_IBAT6L
- stw r4,SL_IBAT6+4(r1)
+ stw r4,SL_IBAT6+4(r11)
mfspr r4,SPRN_IBAT7U
- stw r4,SL_IBAT7(r1)
+ stw r4,SL_IBAT7(r11)
mfspr r4,SPRN_IBAT7L
- stw r4,SL_IBAT7+4(r1)
+ stw r4,SL_IBAT7+4(r11)
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
/* Backup various CPU config stuffs */
@@ -180,9 +183,9 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
lis r5,grackle_wake_up@ha
addi r5,r5,grackle_wake_up@l
tophys(r5,r5)
- stw r5,SL_PC(r1)
+ stw r5,SL_PC(r11)
lis r4,KERNELBASE@h
- tophys(r5,r1)
+ tophys(r5,r11)
addi r5,r5,SL_PC
lis r6,MAGIC@ha
addi r6,r6,MAGIC@l
@@ -194,15 +197,9 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
tophys(r3,r3)
stw r3,0x80(r4)
stw r5,0x84(r4)
- /* Store a pointer to our backup storage into
- * a kernel global
- */
- lis r3,sleep_storage@ha
- addi r3,r3,sleep_storage@l
- stw r5,0(r3)
- .globl low_cpu_die
-low_cpu_die:
+ .globl low_cpu_offline_self
+low_cpu_offline_self:
/* Flush & disable all caches */
bl flush_disable_caches
@@ -244,7 +241,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
mtmsr r2
isync
b 1b
-
+_ASM_NOKPROBE_SYMBOL(low_cpu_offline_self)
/*
* Here is the resume code.
*/
@@ -279,9 +276,10 @@ _GLOBAL(core99_wake_up)
lis r3,sleep_storage@ha
addi r3,r3,sleep_storage@l
tophys(r3,r3)
- lwz r1,0(r3)
+ addi r1,r3,SL_PC
/* Pass thru to older resume code ... */
+_ASM_NOKPROBE_SYMBOL(core99_wake_up)
/*
* Here is the resume code for older machines.
* r1 has the physical address of SL_PC(sp).
@@ -293,14 +291,7 @@ grackle_wake_up:
* we do any r1 memory access as we are not sure they
* are in a sane state above the first 256Mb region
*/
- li r0,16 /* load up segment register values */
- mtctr r0 /* for context 0 */
- lis r3,0x2000 /* Ku = 1, VSID = 0 */
- li r4,0
-3: mtsrin r3,r4
- addi r3,r3,0x111 /* increment VSID */
- addis r4,r4,0x1000 /* address of next segment */
- bdnz 3b
+ bl load_segment_registers
sync
isync
@@ -405,13 +396,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
blt 1b
sync
- /* restore the MSR and turn on the MMU */
- lwz r3,SL_MSR(r1)
- bl turn_on_mmu
-
- /* get back the stack pointer */
- tovirt(r1,r1)
-
/* Restore TB */
li r3,0
mttbl r3
@@ -425,26 +409,24 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
mtcr r0
lwz r2,SL_R2(r1)
lmw r12,SL_R12(r1)
- addi r1,r1,SL_SIZE
- lwz r0,4(r1)
- mtlr r0
- blr
-turn_on_mmu:
- mflr r4
- tovirt(r4,r4)
+ /* restore the MSR and SP and turn on the MMU and return */
+ lwz r3,SL_MSR(r1)
+ lwz r4,SL_LR(r1)
+ lwz r1,SL_SP(r1)
mtsrr0 r4
mtsrr1 r3
sync
isync
rfi
+_ASM_NOKPROBE_SYMBOL(grackle_wake_up)
#endif /* defined(CONFIG_PM) || defined(CONFIG_CPU_FREQ) */
- .section .data
+ .section .bss
.balign L1_CACHE_BYTES
sleep_storage:
- .long 0
+ .space SL_SIZE
.balign L1_CACHE_BYTES, 0
#endif /* CONFIG_PPC_BOOK3S_32 */
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index f95fbdee6efe..5b26a9012d2e 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -22,6 +22,7 @@
#include <linux/sched/hotplug.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -30,16 +31,15 @@
#include <linux/hardirq.h>
#include <linux/cpu.h>
#include <linux/compiler.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/code-patching.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
@@ -49,6 +49,7 @@
#include <asm/keylargo.h>
#include <asm/pmac_low_i2c.h>
#include <asm/pmac_pfunc.h>
+#include <asm/inst.h>
#include "pmac.h"
@@ -145,6 +146,7 @@ static inline void psurge_clr_ipi(int cpu)
switch(psurge_type) {
case PSURGE_DUAL:
out_8(psurge_sec_intr, ~0);
+ break;
case PSURGE_NONE:
break;
default:
@@ -184,7 +186,7 @@ static const struct irq_domain_ops psurge_host_ops = {
.map = psurge_host_map,
};
-static int psurge_secondary_ipi_init(void)
+static int __init psurge_secondary_ipi_init(void)
{
int rc = -ENOMEM;
@@ -269,10 +271,6 @@ static void __init smp_psurge_probe(void)
int i, ncpus;
struct device_node *dn;
- /* We don't do SMP on the PPC601 -- paulus */
- if (PVR_VER(mfspr(SPRN_PVR)) == 1)
- return;
-
/*
* The powersurge cpu board can be used in the generation
* of powermacs that have a socket for an upgradeable cpu card,
@@ -399,21 +397,19 @@ static int __init smp_psurge_kick_cpu(int nr)
return 0;
}
-static struct irqaction psurge_irqaction = {
- .handler = psurge_ipi_intr,
- .flags = IRQF_PERCPU | IRQF_NO_THREAD,
- .name = "primary IPI",
-};
-
static void __init smp_psurge_setup_cpu(int cpu_nr)
{
+ unsigned long flags = IRQF_PERCPU | IRQF_NO_THREAD;
+ int irq;
+
if (cpu_nr != 0 || !psurge_start)
return;
/* reset the entry point so if we get another intr we won't
* try to startup again */
out_be32(psurge_start, 0x100);
- if (setup_irq(irq_create_mapping(NULL, 30), &psurge_irqaction))
+ irq = irq_create_mapping(NULL, 30);
+ if (request_irq(irq, psurge_ipi_intr, flags, "primary IPI", NULL))
printk(KERN_ERR "Couldn't get primary IPI interrupt");
}
@@ -660,13 +656,13 @@ static void smp_core99_gpio_tb_freeze(int freeze)
#endif /* !CONFIG_PPC64 */
-/* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
-volatile static long int core99_l2_cache;
-volatile static long int core99_l3_cache;
-
static void core99_init_caches(int cpu)
{
#ifndef CONFIG_PPC64
+ /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */
+ static long int core99_l2_cache;
+ static long int core99_l3_cache;
+
if (!cpu_has_feature(CPU_FTR_L2CR))
return;
@@ -715,6 +711,7 @@ static void __init smp_core99_setup(int ncpus)
printk(KERN_INFO "Processor timebase sync using"
" platform function\n");
}
+ of_node_put(cpus);
}
#else /* CONFIG_PPC64 */
@@ -828,7 +825,7 @@ static int smp_core99_kick_cpu(int nr)
mdelay(1);
/* Restore our exception vector */
- patch_instruction(vector, save_vector);
+ patch_instruction(vector, ppc_inst(save_vector));
local_irq_restore(flags);
if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347);
@@ -879,8 +876,6 @@ static int smp_core99_cpu_online(unsigned int cpu)
static void __init smp_core99_bringup_done(void)
{
- extern void g5_phy_disable_cpu1(void);
-
/* Close i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host)
pmac_i2c_close(pmac_tb_clock_chip_host);
@@ -916,12 +911,14 @@ static int smp_core99_cpu_disable(void)
mpic_cpu_set_priority(0xf);
+ cleanup_cpu_mmu_context();
+
return 0;
}
#ifdef CONFIG_PPC32
-static void pmac_cpu_die(void)
+static void pmac_cpu_offline_self(void)
{
int cpu = smp_processor_id();
@@ -931,12 +928,12 @@ static void pmac_cpu_die(void)
generic_set_cpu_dead(cpu);
smp_wmb();
mb();
- low_cpu_die();
+ low_cpu_offline_self();
}
#else /* CONFIG_PPC32 */
-static void pmac_cpu_die(void)
+static void pmac_cpu_offline_self(void)
{
int cpu = smp_processor_id();
@@ -1021,7 +1018,7 @@ void __init pmac_setup_smp(void)
#endif /* CONFIG_PPC_PMAC32_PSURGE */
#ifdef CONFIG_HOTPLUG_CPU
- ppc_md.cpu_die = pmac_cpu_die;
+ smp_ops->cpu_offline_self = pmac_cpu_offline_self;
#endif
}
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index b36ddee17c87..4c5790aff1b5 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -24,11 +24,10 @@
#include <linux/interrupt.h>
#include <linux/hardirq.h>
#include <linux/rtc.h>
+#include <linux/of_address.h>
#include <asm/sections.h>
-#include <asm/prom.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/machdep.h>
#include <asm/time.h>
#include <asm/nvram.h>
diff --git a/arch/powerpc/platforms/powermac/udbg_adb.c b/arch/powerpc/platforms/powermac/udbg_adb.c
index 12158bb4fed7..b4756defd596 100644
--- a/arch/powerpc/platforms/powermac/udbg_adb.c
+++ b/arch/powerpc/platforms/powermac/udbg_adb.c
@@ -7,11 +7,11 @@
#include <linux/adb.h>
#include <linux/pmu.h>
#include <linux/cuda.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/xmon.h>
-#include <asm/prom.h>
#include <asm/bootx.h>
#include <asm/errno.h>
#include <asm/pmac_feature.h>
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index 6b61a18e8db3..1b7c39e841ee 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -5,10 +5,10 @@
* Copyright (C) 2001-2005 PPC 64 Team, IBM Corp
*/
#include <linux/types.h>
+#include <linux/of.h>
#include <asm/udbg.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pmac_feature.h>
extern u8 real_readb(volatile u8 __iomem *addr);
@@ -62,7 +62,7 @@ static unsigned char scc_inittab[] = {
3, 0xc1, /* rx enable, 8 bits */
};
-void udbg_scc_init(int force_scc)
+void __init udbg_scc_init(int force_scc)
{
const u32 *reg;
unsigned long addr;
@@ -80,11 +80,15 @@ void udbg_scc_init(int force_scc)
path = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (path != NULL)
stdout = of_find_node_by_path(path);
- for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) {
- if (ch == stdout)
+ for_each_child_of_node(escc, ch) {
+ if (ch == stdout) {
+ of_node_put(ch_def);
ch_def = of_node_get(ch);
- if (of_node_name_eq(ch, "ch-a"))
+ }
+ if (of_node_name_eq(ch, "ch-a")) {
+ of_node_put(ch_a);
ch_a = of_node_get(ch);
+ }
}
if (ch_def == NULL && !force_scc)
goto bail;
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 938803eab0ad..ae248a161b43 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -2,7 +2,7 @@
config PPC_POWERNV
depends on PPC64 && PPC_BOOK3S
bool "IBM PowerNV (Non-Virtualized) platform support"
- select PPC_NATIVE
+ select PPC_HASH_MMU_NATIVE if PPC_64S_HASH_MMU
select PPC_XICS
select PPC_ICP_NATIVE
select PPC_XIVE_NATIVE
@@ -12,7 +12,6 @@ config PPC_POWERNV
select EPAPR_BOOT
select PPC_INDIRECT_PIO
select PPC_UDBG_16550
- select ARCH_RANDOM
select CPU_FREQ
select PPC_DOORBELL
select MMU_NOTIFIER
@@ -20,32 +19,18 @@ config PPC_POWERNV
default y
config OPAL_PRD
- tristate 'OPAL PRD driver'
+ tristate "OPAL PRD driver"
depends on PPC_POWERNV
help
This enables the opal-prd driver, a facility to run processor
recovery diagnostics on OpenPower machines
config PPC_MEMTRACE
- bool "Enable removal of RAM from kernel mappings for tracing"
- depends on PPC_POWERNV && MEMORY_HOTREMOVE
+ bool "Enable runtime allocation of RAM for tracing"
+ depends on PPC_POWERNV && MEMORY_HOTPLUG && CONTIG_ALLOC
help
- Enabling this option allows for the removal of memory (RAM)
- from the kernel mappings to be used for hardware tracing.
-
-config PPC_VAS
- bool "IBM Virtual Accelerator Switchboard (VAS)"
- depends on PPC_POWERNV && PPC_64K_PAGES
- default y
- help
- This enables support for IBM Virtual Accelerator Switchboard (VAS).
-
- VAS allows accelerators in co-processors like NX-GZIP and NX-842
- to be accessible to kernel subsystems and user processes.
-
- VAS adapters are found in POWER9 based systems.
-
- If unsure, say N.
+ Enabling this option allows for runtime allocation of memory (RAM)
+ for hardware tracing.
config SCOM_DEBUGFS
bool "Expose SCOM controllers via debugfs"
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index c0f8120045c3..19f0fc5c6f1b 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -1,4 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
+
+# nothing that deals with real mode is safe to KASAN
+# in particular, idle code runs a bunch of things in real mode
+KASAN_SANITIZE_idle.o := n
+KASAN_SANITIZE_pci-ioda.o := n
+KASAN_SANITIZE_pci-ioda-tce.o := n
+# pnv_machine_check_early
+KASAN_SANITIZE_setup.o := n
+
obj-y += setup.o opal-call.o opal-wrappers.o opal.o opal-async.o
obj-y += idle.o opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
@@ -10,14 +19,15 @@ obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_FA_DUMP) += opal-fadump.o
obj-$(CONFIG_PRESERVE_FA_DUMP) += opal-fadump.o
obj-$(CONFIG_OPAL_CORE) += opal-core.o
-obj-$(CONFIG_PCI) += pci.o pci-ioda.o npu-dma.o pci-ioda-tce.o
+obj-$(CONFIG_PCI) += pci.o pci-ioda.o pci-ioda-tce.o
+obj-$(CONFIG_PCI_IOV) += pci-sriov.o
obj-$(CONFIG_CXL_BASE) += pci-cxl.o
obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_OPAL_PRD) += opal-prd.o
obj-$(CONFIG_PERF_EVENTS) += opal-imc.o
obj-$(CONFIG_PPC_MEMTRACE) += memtrace.o
-obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o
+obj-$(CONFIG_PPC_VAS) += vas.o vas-window.o vas-debug.o vas-fault.o
obj-$(CONFIG_OCXL_BASE) += ocxl.o
obj-$(CONFIG_SCOM_DEBUGFS) += opal-xscom.o
obj-$(CONFIG_PPC_SECURE_BOOT) += opal-secvar.o
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 6f300ab7f0e9..a83cb679dd59 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/msi.h>
#include <linux/of.h>
@@ -38,65 +39,10 @@
static int eeh_event_irq = -EINVAL;
-void pnv_pcibios_bus_add_device(struct pci_dev *pdev)
+static void pnv_pcibios_bus_add_device(struct pci_dev *pdev)
{
- struct pci_dn *pdn = pci_get_pdn(pdev);
-
- if (!pdn || eeh_has_flag(EEH_FORCE_DISABLED))
- return;
-
dev_dbg(&pdev->dev, "EEH: Setting up device\n");
- eeh_add_device_early(pdn);
- eeh_add_device_late(pdev);
- eeh_sysfs_add_device(pdev);
-}
-
-static int pnv_eeh_init(void)
-{
- struct pci_controller *hose;
- struct pnv_phb *phb;
- int max_diag_size = PNV_PCI_DIAG_BUF_SIZE;
-
- if (!firmware_has_feature(FW_FEATURE_OPAL)) {
- pr_warn("%s: OPAL is required !\n",
- __func__);
- return -EINVAL;
- }
-
- /* Set probe mode */
- eeh_add_flag(EEH_PROBE_MODE_DEV);
-
- /*
- * P7IOC blocks PCI config access to frozen PE, but PHB3
- * doesn't do that. So we have to selectively enable I/O
- * prior to collecting error log.
- */
- list_for_each_entry(hose, &hose_list, list_node) {
- phb = hose->private_data;
-
- if (phb->model == PNV_PHB_MODEL_P7IOC)
- eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
-
- if (phb->diag_data_size > max_diag_size)
- max_diag_size = phb->diag_data_size;
-
- /*
- * PE#0 should be regarded as valid by EEH core
- * if it's not the reserved one. Currently, we
- * have the reserved PE#255 and PE#127 for PHB3
- * and P7IOC separately. So we should regard
- * PE#0 as valid for PHB3 and P7IOC.
- */
- if (phb->ioda.reserved_pe_idx != 0)
- eeh_add_flag(EEH_VALID_PE_ZERO);
-
- break;
- }
-
- eeh_set_pe_aux_size(max_diag_size);
- ppc_md.pcibios_bus_add_device = pnv_pcibios_bus_add_device;
-
- return 0;
+ eeh_probe_device(pdev);
}
static irqreturn_t pnv_eeh_event(int irq, void *data)
@@ -142,7 +88,7 @@ static ssize_t pnv_eeh_ei_write(struct file *filp,
return -EINVAL;
/* Retrieve PE */
- pe = eeh_pe_get(hose, pe_no, 0);
+ pe = eeh_pe_get(hose, pe_no);
if (!pe)
return -ENODEV;
@@ -197,7 +143,7 @@ PNV_EEH_DBGFS_ENTRY(inbB, 0xE10);
#endif /* CONFIG_DEBUG_FS */
-void pnv_eeh_enable_phbs(void)
+static void pnv_eeh_enable_phbs(void)
{
struct pci_controller *hose;
struct pnv_phb *phb;
@@ -345,28 +291,41 @@ static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
return 0;
}
+static struct eeh_pe *pnv_eeh_get_upstream_pe(struct pci_dev *pdev)
+{
+ struct pci_controller *hose = pdev->bus->sysdata;
+ struct pnv_phb *phb = hose->private_data;
+ struct pci_dev *parent = pdev->bus->self;
+
+#ifdef CONFIG_PCI_IOV
+ /* for VFs we use the PF's PE as the upstream PE */
+ if (pdev->is_virtfn)
+ parent = pdev->physfn;
+#endif
+
+ /* otherwise use the PE of our parent bridge */
+ if (parent) {
+ struct pnv_ioda_pe *ioda_pe = pnv_ioda_get_pe(parent);
+
+ return eeh_pe_get(phb->hose, ioda_pe->pe_number);
+ }
+
+ return NULL;
+}
+
/**
* pnv_eeh_probe - Do probe on PCI device
- * @pdn: PCI device node
- * @data: unused
+ * @pdev: pci_dev to probe
*
- * When EEH module is installed during system boot, all PCI devices
- * are checked one by one to see if it supports EEH. The function
- * is introduced for the purpose. By default, EEH has been enabled
- * on all PCI devices. That's to say, we only need do necessary
- * initialization on the corresponding eeh device and create PE
- * accordingly.
- *
- * It's notable that's unsafe to retrieve the EEH device through
- * the corresponding PCI device. During the PCI device hotplug, which
- * was possiblly triggered by EEH core, the binding between EEH device
- * and the PCI device isn't built yet.
+ * Create, or find the existing, eeh_dev for this pci_dev.
*/
-static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
+static struct eeh_dev *pnv_eeh_probe(struct pci_dev *pdev)
{
+ struct pci_dn *pdn = pci_get_pdn(pdev);
struct pci_controller *hose = pdn->phb;
struct pnv_phb *phb = hose->private_data;
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ struct eeh_pe *upstream_pe;
uint32_t pcie_flags;
int ret;
int config_addr = (pdn->busno << 8) | (pdn->devfn);
@@ -380,20 +339,27 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
if (!edev || edev->pe)
return NULL;
+ /* already configured? */
+ if (edev->pdev) {
+ pr_debug("%s: found existing edev for %04x:%02x:%02x.%01x\n",
+ __func__, hose->global_number, config_addr >> 8,
+ PCI_SLOT(config_addr), PCI_FUNC(config_addr));
+ return edev;
+ }
+
/* Skip for PCI-ISA bridge */
- if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
return NULL;
eeh_edev_dbg(edev, "Probing device\n");
/* Initialize eeh device */
- edev->class_code = pdn->class_code;
edev->mode &= 0xFFFFFF00;
edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
edev->af_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_AF);
edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
- if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
edev->mode |= EEH_DEV_BRIDGE;
if (edev->pcie_cap) {
pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
@@ -408,8 +374,10 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
edev->pe_config_addr = phb->ioda.pe_rmap[config_addr];
+ upstream_pe = pnv_eeh_get_upstream_pe(pdev);
+
/* Create PE */
- ret = eeh_add_to_parent_pe(edev);
+ ret = eeh_pe_tree_insert(edev, upstream_pe);
if (ret) {
eeh_edev_warn(edev, "Failed to add device to PE (code %d)\n", ret);
return NULL;
@@ -423,7 +391,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
* should be blocked until PE reset. MMIO access is dropped
* by hardware certainly. In order to drop PCI config requests,
* one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
- * will be checked in the backend for PE state retrival. If
+ * will be checked in the backend for PE state retrieval. If
* the PE becomes frozen for the first time and the flag has
* been set for the PE, we will set EEH_PE_CFG_BLOCKED for
* that PE to block its config space.
@@ -471,7 +439,7 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
eeh_edev_dbg(edev, "EEH enabled on device\n");
- return NULL;
+ return edev;
}
/**
@@ -544,18 +512,6 @@ static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
return 0;
}
-/**
- * pnv_eeh_get_pe_addr - Retrieve PE address
- * @pe: EEH PE
- *
- * Retrieve the PE address according to the given tranditional
- * PCI BDF (Bus/Device/Function) address.
- */
-static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
-{
- return pe->addr;
-}
-
static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
{
struct pnv_phb *phb = pe->phb->private_data;
@@ -859,32 +815,32 @@ static int __pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
case EEH_RESET_HOT:
/* Don't report linkDown event */
if (aer) {
- eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ eeh_ops->read_config(edev, aer + PCI_ERR_UNCOR_MASK,
4, &ctrl);
ctrl |= PCI_ERR_UNC_SURPDN;
- eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ eeh_ops->write_config(edev, aer + PCI_ERR_UNCOR_MASK,
4, ctrl);
}
- eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
+ eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &ctrl);
ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
+ eeh_ops->write_config(edev, PCI_BRIDGE_CONTROL, 2, ctrl);
msleep(EEH_PE_RST_HOLD_TIME);
break;
case EEH_RESET_DEACTIVATE:
- eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
+ eeh_ops->read_config(edev, PCI_BRIDGE_CONTROL, 2, &ctrl);
ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
+ eeh_ops->write_config(edev, PCI_BRIDGE_CONTROL, 2, ctrl);
msleep(EEH_PE_RST_SETTLE_TIME);
/* Continue reporting linkDown event */
if (aer) {
- eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ eeh_ops->read_config(edev, aer + PCI_ERR_UNCOR_MASK,
4, &ctrl);
ctrl &= ~PCI_ERR_UNC_SURPDN;
- eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ eeh_ops->write_config(edev, aer + PCI_ERR_UNCOR_MASK,
4, ctrl);
}
@@ -953,11 +909,12 @@ void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
static void pnv_eeh_wait_for_pending(struct pci_dn *pdn, const char *type,
int pos, u16 mask)
{
+ struct eeh_dev *edev = pdn->edev;
int i, status = 0;
/* Wait for Transaction Pending bit to be cleared */
for (i = 0; i < 4; i++) {
- eeh_ops->read_config(pdn, pos, 2, &status);
+ eeh_ops->read_config(edev, pos, 2, &status);
if (!(status & mask))
return;
@@ -978,7 +935,7 @@ static int pnv_eeh_do_flr(struct pci_dn *pdn, int option)
if (WARN_ON(!edev->pcie_cap))
return -ENOTTY;
- eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCAP, 4, &reg);
+ eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCAP, 4, &reg);
if (!(reg & PCI_EXP_DEVCAP_FLR))
return -ENOTTY;
@@ -988,18 +945,18 @@ static int pnv_eeh_do_flr(struct pci_dn *pdn, int option)
pnv_eeh_wait_for_pending(pdn, "",
edev->pcie_cap + PCI_EXP_DEVSTA,
PCI_EXP_DEVSTA_TRPND);
- eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
+ eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL,
4, &reg);
reg |= PCI_EXP_DEVCTL_BCR_FLR;
- eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
+ eeh_ops->write_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL,
4, reg);
msleep(EEH_PE_RST_HOLD_TIME);
break;
case EEH_RESET_DEACTIVATE:
- eeh_ops->read_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
+ eeh_ops->read_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL,
4, &reg);
reg &= ~PCI_EXP_DEVCTL_BCR_FLR;
- eeh_ops->write_config(pdn, edev->pcie_cap + PCI_EXP_DEVCTL,
+ eeh_ops->write_config(edev, edev->pcie_cap + PCI_EXP_DEVCTL,
4, reg);
msleep(EEH_PE_RST_SETTLE_TIME);
break;
@@ -1016,7 +973,7 @@ static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option)
if (WARN_ON(!edev->af_cap))
return -ENOTTY;
- eeh_ops->read_config(pdn, edev->af_cap + PCI_AF_CAP, 1, &cap);
+ eeh_ops->read_config(edev, edev->af_cap + PCI_AF_CAP, 1, &cap);
if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
return -ENOTTY;
@@ -1025,18 +982,18 @@ static int pnv_eeh_do_af_flr(struct pci_dn *pdn, int option)
case EEH_RESET_FUNDAMENTAL:
/*
* Wait for Transaction Pending bit to clear. A word-aligned
- * test is used, so we use the conrol offset rather than status
+ * test is used, so we use the control offset rather than status
* and shift the test bit to match.
*/
pnv_eeh_wait_for_pending(pdn, "AF",
edev->af_cap + PCI_AF_CTRL,
PCI_AF_STATUS_TP << 8);
- eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL,
+ eeh_ops->write_config(edev, edev->af_cap + PCI_AF_CTRL,
1, PCI_AF_CTRL_FLR);
msleep(EEH_PE_RST_HOLD_TIME);
break;
case EEH_RESET_DEACTIVATE:
- eeh_ops->write_config(pdn, edev->af_cap + PCI_AF_CTRL, 1, 0);
+ eeh_ops->write_config(edev, edev->af_cap + PCI_AF_CTRL, 1, 0);
msleep(EEH_PE_RST_SETTLE_TIME);
break;
}
@@ -1092,7 +1049,7 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
* frozen state during PE reset. However, the good idea here from
* benh is to keep frozen state before we get PE reset done completely
* (until BAR restore). With the frozen state, HW drops illegal IO
- * or MMIO access, which can incur recrusive frozen PE during PE
+ * or MMIO access, which can incur recursive frozen PE during PE
* reset. The side effect is that EEH core has to clear the frozen
* state explicitly after BAR restore.
*/
@@ -1139,8 +1096,8 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
* bus is behind a hotplug slot and it will use the slot provided
* reset methods to prevent spurious hotplug events during the reset.
*
- * Fundemental resets need to be handled internally to EEH since the
- * PCI core doesn't really have a concept of a fundemental reset,
+ * Fundamental resets need to be handled internally to EEH since the
+ * PCI core doesn't really have a concept of a fundamental reset,
* mainly because there's no standard way to generate one. Only a
* few devices require an FRESET so it should be fine.
*/
@@ -1270,9 +1227,11 @@ static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn)
return false;
}
-static int pnv_eeh_read_config(struct pci_dn *pdn,
+static int pnv_eeh_read_config(struct eeh_dev *edev,
int where, int size, u32 *val)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
+
if (!pdn)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -1284,9 +1243,11 @@ static int pnv_eeh_read_config(struct pci_dn *pdn,
return pnv_pci_cfg_read(pdn, where, size, val);
}
-static int pnv_eeh_write_config(struct pci_dn *pdn,
+static int pnv_eeh_write_config(struct eeh_dev *edev,
int where, int size, u32 val)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
+
if (!pdn)
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -1398,7 +1359,7 @@ static int pnv_eeh_get_pe(struct pci_controller *hose,
}
/* Find the PE according to PE# */
- dev_pe = eeh_pe_get(hose, pe_no, 0);
+ dev_pe = eeh_pe_get(hose, pe_no);
if (!dev_pe)
return -EEXIST;
@@ -1640,34 +1601,24 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
return ret;
}
-static int pnv_eeh_restore_config(struct pci_dn *pdn)
+static int pnv_eeh_restore_config(struct eeh_dev *edev)
{
- struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
struct pnv_phb *phb;
s64 ret = 0;
- int config_addr = (pdn->busno << 8) | (pdn->devfn);
if (!edev)
return -EEXIST;
- /*
- * We have to restore the PCI config space after reset since the
- * firmware can't see SRIOV VFs.
- *
- * FIXME: The MPS, error routing rules, timeout setting are worthy
- * to be exported by firmware in extendible way.
- */
- if (edev->physfn) {
- ret = eeh_restore_vf_config(pdn);
- } else {
- phb = pdn->phb->private_data;
- ret = opal_pci_reinit(phb->opal_id,
- OPAL_REINIT_PCI_DEV, config_addr);
- }
+ if (edev->physfn)
+ return 0;
+
+ phb = edev->controller->private_data;
+ ret = opal_pci_reinit(phb->opal_id,
+ OPAL_REINIT_PCI_DEV, edev->bdfn);
if (ret) {
pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
- __func__, config_addr, ret);
+ __func__, edev->bdfn, ret);
return -EIO;
}
@@ -1676,10 +1627,8 @@ static int pnv_eeh_restore_config(struct pci_dn *pdn)
static struct eeh_ops pnv_eeh_ops = {
.name = "powernv",
- .init = pnv_eeh_init,
.probe = pnv_eeh_probe,
.set_option = pnv_eeh_set_option,
- .get_pe_addr = pnv_eeh_get_pe_addr,
.get_state = pnv_eeh_get_state,
.reset = pnv_eeh_reset,
.get_log = pnv_eeh_get_log,
@@ -1692,24 +1641,6 @@ static struct eeh_ops pnv_eeh_ops = {
.notify_resume = NULL
};
-#ifdef CONFIG_PCI_IOV
-static void pnv_pci_fixup_vf_mps(struct pci_dev *pdev)
-{
- struct pci_dn *pdn = pci_get_pdn(pdev);
- int parent_mps;
-
- if (!pdev->is_virtfn)
- return;
-
- /* Synchronize MPS for VF and PF */
- parent_mps = pcie_get_mps(pdev->physfn);
- if ((128 << pdev->pcie_mpss) >= parent_mps)
- pcie_set_mps(pdev, parent_mps);
- pdn->mps = pcie_get_mps(pdev);
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps);
-#endif /* CONFIG_PCI_IOV */
-
/**
* eeh_powernv_init - Register platform dependent EEH operations
*
@@ -1718,9 +1649,44 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pnv_pci_fixup_vf_mps);
*/
static int __init eeh_powernv_init(void)
{
+ int max_diag_size = PNV_PCI_DIAG_BUF_SIZE;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
int ret = -EINVAL;
- ret = eeh_ops_register(&pnv_eeh_ops);
+ if (!firmware_has_feature(FW_FEATURE_OPAL)) {
+ pr_warn("%s: OPAL is required !\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Set probe mode */
+ eeh_add_flag(EEH_PROBE_MODE_DEV);
+
+ /*
+ * P7IOC blocks PCI config access to frozen PE, but PHB3
+ * doesn't do that. So we have to selectively enable I/O
+ * prior to collecting error log.
+ */
+ list_for_each_entry(hose, &hose_list, list_node) {
+ phb = hose->private_data;
+
+ if (phb->model == PNV_PHB_MODEL_P7IOC)
+ eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
+
+ if (phb->diag_data_size > max_diag_size)
+ max_diag_size = phb->diag_data_size;
+
+ break;
+ }
+
+ /*
+ * eeh_init() allocates the eeh_pe and its aux data buf so the
+ * size needs to be set before calling eeh_init().
+ */
+ eeh_set_pe_aux_size(max_diag_size);
+ ppc_md.pcibios_bus_add_device = pnv_pcibios_bus_add_device;
+
+ ret = eeh_init(&pnv_eeh_ops);
if (!ret)
pr_info("EEH: PowerNV platform initialized\n");
else
@@ -1728,4 +1694,4 @@ static int __init eeh_powernv_init(void)
return ret;
}
-machine_early_initcall(powernv, eeh_powernv_init);
+machine_arch_initcall(powernv, eeh_powernv_init);
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 78599bca66c2..841cb7f31f4f 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -12,8 +12,8 @@
#include <linux/device.h>
#include <linux/cpu.h>
-#include <asm/asm-prototypes.h>
#include <asm/firmware.h>
+#include <asm/interrupt.h>
#include <asm/machdep.h>
#include <asm/opal.h>
#include <asm/cputhreads.h>
@@ -48,7 +48,7 @@ static bool default_stop_found;
* First stop state levels when SPR and TB loss can occur.
*/
static u64 pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
-static u64 pnv_first_spr_loss_level = MAX_STOP_STATE + 1;
+static u64 deep_spr_loss_state = MAX_STOP_STATE + 1;
/*
* psscr value and mask of the deepest stop idle state.
@@ -61,7 +61,7 @@ static bool deepest_stop_found;
static unsigned long power7_offline_type;
-static int pnv_save_sprs_for_deep_states(void)
+static int __init pnv_save_sprs_for_deep_states(void)
{
int cpu;
int rc;
@@ -73,9 +73,6 @@ static int pnv_save_sprs_for_deep_states(void)
*/
uint64_t lpcr_val = mfspr(SPRN_LPCR);
uint64_t hid0_val = mfspr(SPRN_HID0);
- uint64_t hid1_val = mfspr(SPRN_HID1);
- uint64_t hid4_val = mfspr(SPRN_HID4);
- uint64_t hid5_val = mfspr(SPRN_HID5);
uint64_t hmeer_val = mfspr(SPRN_HMEER);
uint64_t msr_val = MSR_IDLE;
uint64_t psscr_val = pnv_deepest_stop_psscr_val;
@@ -115,8 +112,11 @@ static int pnv_save_sprs_for_deep_states(void)
if (rc != 0)
return rc;
- /* Only p8 needs to set extra HID regiters */
+ /* Only p8 needs to set extra HID registers */
if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ uint64_t hid1_val = mfspr(SPRN_HID1);
+ uint64_t hid4_val = mfspr(SPRN_HID4);
+ uint64_t hid5_val = mfspr(SPRN_HID5);
rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
if (rc != 0)
@@ -145,9 +145,13 @@ EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
static void pnv_fastsleep_workaround_apply(void *info)
{
+ int cpu = smp_processor_id();
int rc;
int *err = info;
+ if (cpu_first_thread_sibling(cpu) != cpu)
+ return;
+
rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
OPAL_CONFIG_IDLE_APPLY);
if (rc)
@@ -174,7 +178,6 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- cpumask_t primary_thread_mask;
int err;
u8 val;
@@ -198,12 +201,9 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
*/
power7_fastsleep_workaround_exit = false;
- get_online_cpus();
- primary_thread_mask = cpu_online_cores_map();
- on_each_cpu_mask(&primary_thread_mask,
- pnv_fastsleep_workaround_apply,
- &err, 1);
- put_online_cpus();
+ cpus_read_lock();
+ on_each_cpu(pnv_fastsleep_workaround_apply, &err, 1);
+ cpus_read_unlock();
if (err) {
pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
goto fail;
@@ -305,8 +305,8 @@ struct p7_sprs {
/* per thread SPRs that get lost in shallow states */
u64 amr;
u64 iamr;
- u64 amor;
u64 uamor;
+ /* amor is restored to constant ~0 */
};
static unsigned long power7_idle_insn(unsigned long type)
@@ -377,7 +377,6 @@ static unsigned long power7_idle_insn(unsigned long type)
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
sprs.amr = mfspr(SPRN_AMR);
sprs.iamr = mfspr(SPRN_IAMR);
- sprs.amor = mfspr(SPRN_AMOR);
sprs.uamor = mfspr(SPRN_UAMOR);
}
@@ -396,7 +395,7 @@ static unsigned long power7_idle_insn(unsigned long type)
*/
mtspr(SPRN_AMR, sprs.amr);
mtspr(SPRN_IAMR, sprs.iamr);
- mtspr(SPRN_AMOR, sprs.amor);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_UAMOR, sprs.uamor);
}
}
@@ -491,12 +490,14 @@ subcore_woken:
mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
+#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* The SLB has to be restored here, but it sometimes still
* contains entries, so the __ variant must be used to prevent
* multi hits.
*/
__slb_restore_bolted_realmode();
+#endif
return srr1;
}
@@ -565,7 +566,7 @@ void power7_idle_type(unsigned long type)
irq_set_pending_from_srr1(srr1);
}
-void power7_idle(void)
+static void power7_idle(void)
{
if (!powersave_nap)
return;
@@ -588,7 +589,7 @@ struct p9_sprs {
u64 purr;
u64 spurr;
u64 dscr;
- u64 wort;
+ u64 ciabr;
u64 mmcra;
u32 mmcr0;
@@ -602,7 +603,7 @@ struct p9_sprs {
u64 uamor;
};
-static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
+static unsigned long power9_idle_stop(unsigned long psscr)
{
int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu);
@@ -611,14 +612,13 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
unsigned long srr1;
unsigned long pls;
unsigned long mmcr0 = 0;
+ unsigned long mmcra = 0;
struct p9_sprs sprs = {}; /* avoid false used-uninitialised */
bool sprs_saved = false;
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */
- BUG_ON(!mmu_on);
-
/*
* Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason!
@@ -657,7 +657,8 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
*/
mmcr0 = mfspr(SPRN_MMCR0);
}
- if ((psscr & PSSCR_RL_MASK) >= pnv_first_spr_loss_level) {
+
+ if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) {
sprs.lpcr = mfspr(SPRN_LPCR);
sprs.hfscr = mfspr(SPRN_HFSCR);
sprs.fscr = mfspr(SPRN_FSCR);
@@ -665,7 +666,7 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
sprs.purr = mfspr(SPRN_PURR);
sprs.spurr = mfspr(SPRN_SPURR);
sprs.dscr = mfspr(SPRN_DSCR);
- sprs.wort = mfspr(SPRN_WORT);
+ sprs.ciabr = mfspr(SPRN_CIABR);
sprs.mmcra = mfspr(SPRN_MMCRA);
sprs.mmcr0 = mfspr(SPRN_MMCR0);
@@ -685,7 +686,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
sprs.amr = mfspr(SPRN_AMR);
sprs.iamr = mfspr(SPRN_IAMR);
- sprs.amor = mfspr(SPRN_AMOR);
sprs.uamor = mfspr(SPRN_UAMOR);
srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */
@@ -700,15 +700,13 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
- unsigned long mmcra;
-
/*
* We don't need an isync after the mtsprs here because the
* upcoming mtmsrd is execution synchronizing.
*/
mtspr(SPRN_AMR, sprs.amr);
mtspr(SPRN_IAMR, sprs.iamr);
- mtspr(SPRN_AMOR, sprs.amor);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_UAMOR, sprs.uamor);
/*
@@ -741,7 +739,7 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
* just always test PSSCR for SPR/TB state loss.
*/
pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
- if (likely(pls < pnv_first_spr_loss_level)) {
+ if (likely(pls < deep_spr_loss_state)) {
if (sprs_saved)
atomic_stop_thread_idle();
goto out;
@@ -784,7 +782,7 @@ core_woken:
mtspr(SPRN_PURR, sprs.purr);
mtspr(SPRN_SPURR, sprs.spurr);
mtspr(SPRN_DSCR, sprs.dscr);
- mtspr(SPRN_WORT, sprs.wort);
+ mtspr(SPRN_CIABR, sprs.ciabr);
mtspr(SPRN_MMCRA, sprs.mmcra);
mtspr(SPRN_MMCR0, sprs.mmcr0);
@@ -799,78 +797,10 @@ core_woken:
__slb_restore_bolted_realmode();
out:
- if (mmu_on)
- mtmsr(MSR_KERNEL);
-
- return srr1;
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static unsigned long power9_offline_stop(unsigned long psscr)
-{
- unsigned long srr1;
-
-#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- __ppc64_runlatch_off();
- srr1 = power9_idle_stop(psscr, true);
- __ppc64_runlatch_on();
-#else
- /*
- * Tell KVM we're entering idle.
- * This does not have to be done in real mode because the P9 MMU
- * is independent per-thread. Some steppings share radix/hash mode
- * between threads, but in that case KVM has a barrier sync in real
- * mode before and after switching between radix and hash.
- *
- * kvm_start_guest must still be called in real mode though, hence
- * the false argument.
- */
- local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
-
- __ppc64_runlatch_off();
- srr1 = power9_idle_stop(psscr, false);
- __ppc64_runlatch_on();
-
- local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
- /* Order setting hwthread_state vs. testing hwthread_req */
- smp_mb();
- if (local_paca->kvm_hstate.hwthread_req)
- srr1 = idle_kvm_start_guest(srr1);
mtmsr(MSR_KERNEL);
-#endif
return srr1;
}
-#endif
-
-void power9_idle_type(unsigned long stop_psscr_val,
- unsigned long stop_psscr_mask)
-{
- unsigned long psscr;
- unsigned long srr1;
-
- if (!prep_irq_for_idle_irqsoff())
- return;
-
- psscr = mfspr(SPRN_PSSCR);
- psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
-
- __ppc64_runlatch_off();
- srr1 = power9_idle_stop(psscr, true);
- __ppc64_runlatch_on();
-
- fini_irq_for_idle_irqsoff();
-
- irq_set_pending_from_srr1(srr1);
-}
-
-/*
- * Used for ppc_md.power_save which needs a function with no parameters
- */
-void power9_idle(void)
-{
- power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
-}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
@@ -944,6 +874,165 @@ void pnv_power9_force_smt4_release(void)
EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+struct p10_sprs {
+ /*
+ * SPRs that get lost in shallow states:
+ *
+ * P10 loses CR, LR, CTR, FPSCR, VSCR, XER, TAR, SPRG2, and HSPRG1
+ * isa300 idle routines restore CR, LR.
+ * CTR is volatile
+ * idle thread doesn't use FP or VEC
+ * kernel doesn't use TAR
+ * HSPRG1 is only live in HV interrupt entry
+ * SPRG2 is only live in KVM guests, KVM handles it.
+ */
+};
+
+static unsigned long power10_idle_stop(unsigned long psscr)
+{
+ int cpu = raw_smp_processor_id();
+ int first = cpu_first_thread_sibling(cpu);
+ unsigned long *state = &paca_ptrs[first]->idle_state;
+ unsigned long core_thread_mask = (1UL << threads_per_core) - 1;
+ unsigned long srr1;
+ unsigned long pls;
+// struct p10_sprs sprs = {}; /* avoid false used-uninitialised */
+ bool sprs_saved = false;
+
+ if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
+ /* EC=ESL=0 case */
+
+ /*
+ * Wake synchronously. SRESET via xscom may still cause
+ * a 0x100 powersave wakeup with SRR1 reason!
+ */
+ srr1 = isa300_idle_stop_noloss(psscr); /* go idle */
+ if (likely(!srr1))
+ return 0;
+
+ /*
+ * Registers not saved, can't recover!
+ * This would be a hardware bug
+ */
+ BUG_ON((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS);
+
+ goto out;
+ }
+
+ /* EC=ESL=1 case */
+ if ((psscr & PSSCR_RL_MASK) >= deep_spr_loss_state) {
+ /* XXX: save SPRs for deep state loss here. */
+
+ sprs_saved = true;
+
+ atomic_start_thread_idle();
+ }
+
+ srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */
+
+ psscr = mfspr(SPRN_PSSCR);
+
+ WARN_ON_ONCE(!srr1);
+ WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
+
+ if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
+ hmi_exception_realmode(NULL);
+
+ /*
+ * On POWER10, SRR1 bits do not match exactly as expected.
+ * SRR1_WS_GPRLOSS (10b) can also result in SPR loss, so
+ * just always test PSSCR for SPR/TB state loss.
+ */
+ pls = (psscr & PSSCR_PLS) >> PSSCR_PLS_SHIFT;
+ if (likely(pls < deep_spr_loss_state)) {
+ if (sprs_saved)
+ atomic_stop_thread_idle();
+ goto out;
+ }
+
+ /* HV state loss */
+ BUG_ON(!sprs_saved);
+
+ atomic_lock_thread_idle();
+
+ if ((*state & core_thread_mask) != 0)
+ goto core_woken;
+
+ /* XXX: restore per-core SPRs here */
+
+ if (pls >= pnv_first_tb_loss_level) {
+ /* TB loss */
+ if (opal_resync_timebase() != OPAL_SUCCESS)
+ BUG();
+ }
+
+ /*
+ * isync after restoring shared SPRs and before unlocking. Unlock
+ * only contains hwsync which does not necessarily do the right
+ * thing for SPRs.
+ */
+ isync();
+
+core_woken:
+ atomic_unlock_and_stop_thread_idle();
+
+ /* XXX: restore per-thread SPRs here */
+
+ if (!radix_enabled())
+ __slb_restore_bolted_realmode();
+
+out:
+ mtmsr(MSR_KERNEL);
+
+ return srr1;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static unsigned long arch300_offline_stop(unsigned long psscr)
+{
+ unsigned long srr1;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ srr1 = power10_idle_stop(psscr);
+ else
+ srr1 = power9_idle_stop(psscr);
+
+ return srr1;
+}
+#endif
+
+void arch300_idle_type(unsigned long stop_psscr_val,
+ unsigned long stop_psscr_mask)
+{
+ unsigned long psscr;
+ unsigned long srr1;
+
+ if (!prep_irq_for_idle_irqsoff())
+ return;
+
+ psscr = mfspr(SPRN_PSSCR);
+ psscr = (psscr & ~stop_psscr_mask) | stop_psscr_val;
+
+ __ppc64_runlatch_off();
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ srr1 = power10_idle_stop(psscr);
+ else
+ srr1 = power9_idle_stop(psscr);
+ __ppc64_runlatch_on();
+
+ fini_irq_for_idle_irqsoff();
+
+ irq_set_pending_from_srr1(srr1);
+}
+
+/*
+ * Used for ppc_md.power_save which needs a function with no parameters
+ */
+static void arch300_idle(void)
+{
+ arch300_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
@@ -977,7 +1066,7 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
psscr = mfspr(SPRN_PSSCR);
psscr = (psscr & ~pnv_deepest_stop_psscr_mask) |
pnv_deepest_stop_psscr_val;
- srr1 = power9_offline_stop(psscr);
+ srr1 = arch300_offline_stop(psscr);
} else if (cpu_has_feature(CPU_FTR_ARCH_206) && power7_offline_type) {
srr1 = power7_offline();
} else {
@@ -1033,7 +1122,7 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
* stop instruction
*/
-int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
+int __init validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
{
int err = 0;
@@ -1075,11 +1164,15 @@ int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
* @dt_idle_states: Number of idle state entries
* Returns 0 on success
*/
-static void __init pnv_power9_idle_init(void)
+static void __init pnv_arch300_idle_init(void)
{
u64 max_residency_ns = 0;
int i;
+ /* stop is not really architected, we only have p9,p10 drivers */
+ if (!pvr_version_is(PVR_POWER10) && !pvr_version_is(PVR_POWER9))
+ return;
+
/*
* pnv_deepest_stop_{val,mask} should be set to values corresponding to
* the deepest stop state.
@@ -1088,31 +1181,36 @@ static void __init pnv_power9_idle_init(void)
* the deepest loss-less (OPAL_PM_STOP_INST_FAST) stop state.
*/
pnv_first_tb_loss_level = MAX_STOP_STATE + 1;
- pnv_first_spr_loss_level = MAX_STOP_STATE + 1;
+ deep_spr_loss_state = MAX_STOP_STATE + 1;
for (i = 0; i < nr_pnv_idle_states; i++) {
int err;
struct pnv_idle_states_t *state = &pnv_idle_states[i];
u64 psscr_rl = state->psscr_val & PSSCR_RL_MASK;
+ /* No deep loss driver implemented for POWER10 yet */
+ if (pvr_version_is(PVR_POWER10) &&
+ state->flags & (OPAL_PM_TIMEBASE_STOP|OPAL_PM_LOSE_FULL_CONTEXT))
+ continue;
+
if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
(pnv_first_tb_loss_level > psscr_rl))
pnv_first_tb_loss_level = psscr_rl;
if ((state->flags & OPAL_PM_LOSE_FULL_CONTEXT) &&
- (pnv_first_spr_loss_level > psscr_rl))
- pnv_first_spr_loss_level = psscr_rl;
+ (deep_spr_loss_state > psscr_rl))
+ deep_spr_loss_state = psscr_rl;
/*
* The idle code does not deal with TB loss occurring
* in a shallower state than SPR loss, so force it to
* behave like SPRs are lost if TB is lost. POWER9 would
- * never encouter this, but a POWER8 core would if it
+ * never encounter this, but a POWER8 core would if it
* implemented the stop instruction. So this is for forward
* compatibility.
*/
if ((state->flags & OPAL_PM_TIMEBASE_STOP) &&
- (pnv_first_spr_loss_level > psscr_rl))
- pnv_first_spr_loss_level = psscr_rl;
+ (deep_spr_loss_state > psscr_rl))
+ deep_spr_loss_state = psscr_rl;
err = validate_psscr_val_mask(&state->psscr_val,
&state->psscr_mask,
@@ -1144,7 +1242,7 @@ static void __init pnv_power9_idle_init(void)
if (unlikely(!default_stop_found)) {
pr_warn("cpuidle-powernv: No suitable default stop state found. Disabling platform idle.\n");
} else {
- ppc_md.power_save = power9_idle;
+ ppc_md.power_save = arch300_idle;
pr_info("cpuidle-powernv: Default stop: psscr = 0x%016llx,mask=0x%016llx\n",
pnv_default_stop_val, pnv_default_stop_mask);
}
@@ -1158,7 +1256,7 @@ static void __init pnv_power9_idle_init(void)
}
pr_info("cpuidle-powernv: First stop level that may lose SPRs = 0x%llx\n",
- pnv_first_spr_loss_level);
+ deep_spr_loss_state);
pr_info("cpuidle-powernv: First stop level that may lose timebase = 0x%llx\n",
pnv_first_tb_loss_level);
@@ -1206,7 +1304,7 @@ static void __init pnv_probe_idle_states(void)
}
if (cpu_has_feature(CPU_FTR_ARCH_300))
- pnv_power9_idle_init();
+ pnv_arch300_idle_init();
for (i = 0; i < nr_pnv_idle_states; i++)
supported_cpuidle_states |= pnv_idle_states[i].flags;
@@ -1218,7 +1316,7 @@ static void __init pnv_probe_idle_states(void)
* which is the number of cpuidle states discovered through device-tree.
*/
-static int pnv_parse_cpuidle_dt(void)
+static int __init pnv_parse_cpuidle_dt(void)
{
struct device_node *np;
int nr_idle_states, i;
@@ -1270,14 +1368,14 @@ static int pnv_parse_cpuidle_dt(void)
/* Read residencies */
if (of_property_read_u32_array(np, "ibm,cpu-idle-state-residency-ns",
temp_u32, nr_idle_states)) {
- pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-residency-ns in DT\n");
rc = -EINVAL;
goto out;
}
for (i = 0; i < nr_idle_states; i++)
pnv_idle_states[i].residency_ns = temp_u32[i];
- /* For power9 */
+ /* For power9 and later */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
/* Read pm_crtl_val */
if (of_property_read_u64_array(np, "ibm,cpu-idle-state-psscr",
@@ -1313,7 +1411,7 @@ static int pnv_parse_cpuidle_dt(void)
goto out;
}
for (i = 0; i < nr_idle_states; i++)
- strlcpy(pnv_idle_states[i].name, temp_string[i],
+ strscpy(pnv_idle_states[i].name, temp_string[i],
PNV_IDLE_NAME_LEN);
nr_pnv_idle_states = nr_idle_states;
rc = 0;
@@ -1321,6 +1419,7 @@ out:
kfree(temp_u32);
kfree(temp_u64);
kfree(temp_string);
+ of_node_put(np);
return rc;
}
@@ -1340,8 +1439,8 @@ static int __init pnv_init_idle_states(void)
if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
/* P7/P8 nap */
p->thread_idle_state = PNV_THREAD_RUNNING;
- } else {
- /* P9 stop */
+ } else if (pvr_version_is(PVR_POWER9)) {
+ /* P9 stop workarounds */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
p->requested_psscr = 0;
atomic_set(&p->dont_stop, 0);
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index eb2e75dac369..877720c64515 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -18,7 +18,7 @@
#include <linux/memory_hotplug.h>
#include <linux/numa.h>
#include <asm/machdep.h>
-#include <asm/debugfs.h>
+#include <asm/cacheflush.h>
/* This enables us to keep track of the memory removed from each node. */
struct memtrace_entry {
@@ -30,6 +30,7 @@ struct memtrace_entry {
char name[16];
};
+static DEFINE_MUTEX(memtrace_mutex);
static u64 memtrace_size;
static struct memtrace_entry *memtrace_array;
@@ -44,90 +45,102 @@ static ssize_t memtrace_read(struct file *filp, char __user *ubuf,
return simple_read_from_buffer(ubuf, count, ppos, ent->mem, ent->size);
}
+static int memtrace_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct memtrace_entry *ent = filp->private_data;
+
+ if (ent->size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (vma->vm_pgoff << PAGE_SHIFT >= ent->size)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return remap_pfn_range(vma, vma->vm_start, PHYS_PFN(ent->start) + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
static const struct file_operations memtrace_fops = {
.llseek = default_llseek,
.read = memtrace_read,
.open = simple_open,
+ .mmap = memtrace_mmap,
};
-static int check_memblock_online(struct memory_block *mem, void *arg)
-{
- if (mem->state != MEM_ONLINE)
- return -1;
-
- return 0;
-}
-
-static int change_memblock_state(struct memory_block *mem, void *arg)
+#define FLUSH_CHUNK_SIZE SZ_1G
+/**
+ * flush_dcache_range_chunked(): Write any modified data cache blocks out to
+ * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE
+ * Does not invalidate the corresponding instruction cache blocks.
+ *
+ * @start: the start address
+ * @stop: the stop address (exclusive)
+ * @chunk: the max size of the chunks
+ */
+static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
+ unsigned long chunk)
{
- unsigned long state = (unsigned long)arg;
-
- mem->state = state;
+ unsigned long i;
- return 0;
+ for (i = start; i < stop; i += chunk) {
+ flush_dcache_range(i, min(stop, i + chunk));
+ cond_resched();
+ }
}
-/* called with device_hotplug_lock held */
-static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
+static void memtrace_clear_range(unsigned long start_pfn,
+ unsigned long nr_pages)
{
- const unsigned long start = PFN_PHYS(start_pfn);
- const unsigned long size = PFN_PHYS(nr_pages);
+ unsigned long pfn;
- if (walk_memory_blocks(start, size, NULL, check_memblock_online))
- return false;
-
- walk_memory_blocks(start, size, (void *)MEM_GOING_OFFLINE,
- change_memblock_state);
-
- if (offline_pages(start_pfn, nr_pages)) {
- walk_memory_blocks(start, size, (void *)MEM_ONLINE,
- change_memblock_state);
- return false;
+ /* As HIGHMEM does not apply, use clear_page() directly. */
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
+ if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
+ cond_resched();
+ clear_page(__va(PFN_PHYS(pfn)));
}
-
- walk_memory_blocks(start, size, (void *)MEM_OFFLINE,
- change_memblock_state);
-
-
- return true;
+ /*
+ * Before we go ahead and use this range as cache inhibited range
+ * flush the cache.
+ */
+ flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
+ (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
+ FLUSH_CHUNK_SIZE);
}
static u64 memtrace_alloc_node(u32 nid, u64 size)
{
- u64 start_pfn, end_pfn, nr_pages, pfn;
- u64 base_pfn;
- u64 bytes = memory_block_size_bytes();
+ const unsigned long nr_pages = PHYS_PFN(size);
+ unsigned long pfn, start_pfn;
+ struct page *page;
- if (!node_spanned_pages(nid))
+ /*
+ * Trace memory needs to be aligned to the size, which is guaranteed
+ * by alloc_contig_pages().
+ */
+ page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE |
+ __GFP_NOWARN, nid, NULL);
+ if (!page)
return 0;
+ start_pfn = page_to_pfn(page);
- start_pfn = node_start_pfn(nid);
- end_pfn = node_end_pfn(nid);
- nr_pages = size >> PAGE_SHIFT;
-
- /* Trace memory needs to be aligned to the size */
- end_pfn = round_down(end_pfn - nr_pages, nr_pages);
-
- lock_device_hotplug();
- for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
- if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
- /*
- * Remove memory in memory block size chunks so that
- * iomem resources are always split to the same size and
- * we never try to remove memory that spans two iomem
- * resources.
- */
- end_pfn = base_pfn + nr_pages;
- for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
- __remove_memory(nid, pfn << PAGE_SHIFT, bytes);
- }
- unlock_device_hotplug();
- return base_pfn << PAGE_SHIFT;
- }
- }
- unlock_device_hotplug();
+ /*
+ * Clear the range while we still have a linear mapping.
+ *
+ * TODO: use __GFP_ZERO with alloc_contig_pages() once supported.
+ */
+ memtrace_clear_range(start_pfn, nr_pages);
- return 0;
+ /*
+ * Set pages PageOffline(), to indicate that nobody (e.g., hibernation,
+ * dumping, ...) should be touching these pages.
+ */
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
+ __SetPageOffline(pfn_to_page(pfn));
+
+ arch_remove_linear_mapping(PFN_PHYS(start_pfn), size);
+
+ return PFN_PHYS(start_pfn);
}
static int memtrace_init_regions_runtime(u64 size)
@@ -187,14 +200,9 @@ static int memtrace_init_debugfs(void)
snprintf(ent->name, 16, "%08x", ent->nid);
dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
- if (!dir) {
- pr_err("Failed to create debugfs directory for node %d\n",
- ent->nid);
- return -1;
- }
ent->dir = dir;
- debugfs_create_file("trace", 0400, dir, ent, &memtrace_fops);
+ debugfs_create_file_unsafe("trace", 0600, dir, ent, &memtrace_fops);
debugfs_create_x64("start", 0400, dir, &ent->start);
debugfs_create_x64("size", 0400, dir, &ent->size);
}
@@ -202,16 +210,30 @@ static int memtrace_init_debugfs(void)
return ret;
}
-static int online_mem_block(struct memory_block *mem, void *arg)
+static int memtrace_free(int nid, u64 start, u64 size)
{
- return device_online(&mem->dev);
+ struct mhp_params params = { .pgprot = PAGE_KERNEL };
+ const unsigned long nr_pages = PHYS_PFN(size);
+ const unsigned long start_pfn = PHYS_PFN(start);
+ unsigned long pfn;
+ int ret;
+
+ ret = arch_create_linear_mapping(nid, start, size, &params);
+ if (ret)
+ return ret;
+
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
+ __ClearPageOffline(pfn_to_page(pfn));
+
+ free_contig_range(start_pfn, nr_pages);
+ return 0;
}
/*
- * Iterate through the chunks of memory we have removed from the kernel
- * and attempt to add them back to the kernel.
+ * Iterate through the chunks of memory we allocated and attempt to expose
+ * them back to the kernel.
*/
-static int memtrace_online(void)
+static int memtrace_free_regions(void)
{
int i, ret = 0;
struct memtrace_entry *ent;
@@ -219,7 +241,7 @@ static int memtrace_online(void)
for (i = memtrace_array_nr - 1; i >= 0; i--) {
ent = &memtrace_array[i];
- /* We have onlined this chunk previously */
+ /* We have freed this chunk previously */
if (ent->nid == NUMA_NO_NODE)
continue;
@@ -229,36 +251,25 @@ static int memtrace_online(void)
ent->mem = 0;
}
- if (add_memory(ent->nid, ent->start, ent->size)) {
- pr_err("Failed to add trace memory to node %d\n",
+ if (memtrace_free(ent->nid, ent->start, ent->size)) {
+ pr_err("Failed to free trace memory on node %d\n",
ent->nid);
ret += 1;
continue;
}
/*
- * If kernel isn't compiled with the auto online option
- * we need to online the memory ourselves.
- */
- if (!memhp_auto_online) {
- lock_device_hotplug();
- walk_memory_blocks(ent->start, ent->size, NULL,
- online_mem_block);
- unlock_device_hotplug();
- }
-
- /*
- * Memory was added successfully so clean up references to it
- * so on reentry we can tell that this chunk was added.
+ * Memory was freed successfully so clean up references to it
+ * so on reentry we can tell that this chunk was freed.
*/
debugfs_remove_recursive(ent->dir);
- pr_info("Added trace memory back to node %d\n", ent->nid);
+ pr_info("Freed trace memory back on node %d\n", ent->nid);
ent->size = ent->start = ent->nid = NUMA_NO_NODE;
}
if (ret)
return ret;
- /* If all chunks of memory were added successfully, reset globals */
+ /* If all chunks of memory were freed successfully, reset globals */
kfree(memtrace_array);
memtrace_array = NULL;
memtrace_size = 0;
@@ -268,6 +279,7 @@ static int memtrace_online(void)
static int memtrace_enable_set(void *data, u64 val)
{
+ int rc = -EAGAIN;
u64 bytes;
/*
@@ -280,25 +292,29 @@ static int memtrace_enable_set(void *data, u64 val)
return -EINVAL;
}
- /* Re-add/online previously removed/offlined memory */
- if (memtrace_size) {
- if (memtrace_online())
- return -EAGAIN;
- }
+ mutex_lock(&memtrace_mutex);
- if (!val)
- return 0;
+ /* Free all previously allocated memory. */
+ if (memtrace_size && memtrace_free_regions())
+ goto out_unlock;
+
+ if (!val) {
+ rc = 0;
+ goto out_unlock;
+ }
- /* Offline and remove memory */
+ /* Allocate memory. */
if (memtrace_init_regions_runtime(val))
- return -EINVAL;
+ goto out_unlock;
if (memtrace_init_debugfs())
- return -EINVAL;
+ goto out_unlock;
memtrace_size = val;
-
- return 0;
+ rc = 0;
+out_unlock:
+ mutex_unlock(&memtrace_mutex);
+ return rc;
}
static int memtrace_enable_get(void *data, u64 *val)
@@ -313,9 +329,7 @@ DEFINE_SIMPLE_ATTRIBUTE(memtrace_init_fops, memtrace_enable_get,
static int memtrace_init(void)
{
memtrace_debugfs_dir = debugfs_create_dir("memtrace",
- powerpc_debugfs_root);
- if (!memtrace_debugfs_dir)
- return -1;
+ arch_debugfs_dir);
debugfs_create_file("enable", 0600, memtrace_debugfs_dir,
NULL, &memtrace_init_fops);
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
deleted file mode 100644
index b95b9e3c4c98..000000000000
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ /dev/null
@@ -1,630 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * This file implements the DMA operations for NVLink devices. The NPU
- * devices all point to the same iommu table as the parent PCI device.
- *
- * Copyright Alistair Popple, IBM Corporation 2015.
- */
-
-#include <linux/mmu_notifier.h>
-#include <linux/mmu_context.h>
-#include <linux/of.h>
-#include <linux/pci.h>
-#include <linux/memblock.h>
-#include <linux/sizes.h>
-
-#include <asm/debugfs.h>
-#include <asm/powernv.h>
-#include <asm/opal.h>
-
-#include "pci.h"
-
-static struct pci_dev *get_pci_dev(struct device_node *dn)
-{
- struct pci_dn *pdn = PCI_DN(dn);
- struct pci_dev *pdev;
-
- pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdn->phb->bus),
- pdn->busno, pdn->devfn);
-
- /*
- * pci_get_domain_bus_and_slot() increased the reference count of
- * the PCI device, but callers don't need that actually as the PE
- * already holds a reference to the device. Since callers aren't
- * aware of the reference count change, call pci_dev_put() now to
- * avoid leaks.
- */
- if (pdev)
- pci_dev_put(pdev);
-
- return pdev;
-}
-
-/* Given a NPU device get the associated PCI device. */
-struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
-{
- struct device_node *dn;
- struct pci_dev *gpdev;
-
- if (WARN_ON(!npdev))
- return NULL;
-
- if (WARN_ON(!npdev->dev.of_node))
- return NULL;
-
- /* Get assoicated PCI device */
- dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
- if (!dn)
- return NULL;
-
- gpdev = get_pci_dev(dn);
- of_node_put(dn);
-
- return gpdev;
-}
-EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
-
-/* Given the real PCI device get a linked NPU device. */
-struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
-{
- struct device_node *dn;
- struct pci_dev *npdev;
-
- if (WARN_ON(!gpdev))
- return NULL;
-
- /* Not all PCI devices have device-tree nodes */
- if (!gpdev->dev.of_node)
- return NULL;
-
- /* Get assoicated PCI device */
- dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
- if (!dn)
- return NULL;
-
- npdev = get_pci_dev(dn);
- of_node_put(dn);
-
- return npdev;
-}
-EXPORT_SYMBOL(pnv_pci_get_npu_dev);
-
-#ifdef CONFIG_IOMMU_API
-/*
- * Returns the PE assoicated with the PCI device of the given
- * NPU. Returns the linked pci device if pci_dev != NULL.
- */
-static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
- struct pci_dev **gpdev)
-{
- struct pnv_phb *phb;
- struct pci_controller *hose;
- struct pci_dev *pdev;
- struct pnv_ioda_pe *pe;
- struct pci_dn *pdn;
-
- pdev = pnv_pci_get_gpu_dev(npe->pdev);
- if (!pdev)
- return NULL;
-
- pdn = pci_get_pdn(pdev);
- if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
- return NULL;
-
- hose = pci_bus_to_host(pdev->bus);
- phb = hose->private_data;
- pe = &phb->ioda.pe_array[pdn->pe_number];
-
- if (gpdev)
- *gpdev = pdev;
-
- return pe;
-}
-
-static long pnv_npu_unset_window(struct iommu_table_group *table_group,
- int num);
-
-static long pnv_npu_set_window(struct iommu_table_group *table_group, int num,
- struct iommu_table *tbl)
-{
- struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
- table_group);
- struct pnv_phb *phb = npe->phb;
- int64_t rc;
- const unsigned long size = tbl->it_indirect_levels ?
- tbl->it_level_size : tbl->it_size;
- const __u64 start_addr = tbl->it_offset << tbl->it_page_shift;
- const __u64 win_size = tbl->it_size << tbl->it_page_shift;
- int num2 = (num == 0) ? 1 : 0;
-
- /* NPU has just one TVE so if there is another table, remove it first */
- if (npe->table_group.tables[num2])
- pnv_npu_unset_window(&npe->table_group, num2);
-
- pe_info(npe, "Setting up window %llx..%llx pg=%lx\n",
- start_addr, start_addr + win_size - 1,
- IOMMU_PAGE_SIZE(tbl));
-
- rc = opal_pci_map_pe_dma_window(phb->opal_id,
- npe->pe_number,
- npe->pe_number,
- tbl->it_indirect_levels + 1,
- __pa(tbl->it_base),
- size << 3,
- IOMMU_PAGE_SIZE(tbl));
- if (rc) {
- pe_err(npe, "Failed to configure TCE table, err %lld\n", rc);
- return rc;
- }
- pnv_pci_ioda2_tce_invalidate_entire(phb, false);
-
- /* Add the table to the list so its TCE cache will get invalidated */
- pnv_pci_link_table_and_group(phb->hose->node, num,
- tbl, &npe->table_group);
-
- return 0;
-}
-
-static long pnv_npu_unset_window(struct iommu_table_group *table_group, int num)
-{
- struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
- table_group);
- struct pnv_phb *phb = npe->phb;
- int64_t rc;
-
- if (!npe->table_group.tables[num])
- return 0;
-
- pe_info(npe, "Removing DMA window\n");
-
- rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
- npe->pe_number,
- 0/* levels */, 0/* table address */,
- 0/* table size */, 0/* page size */);
- if (rc) {
- pe_err(npe, "Unmapping failed, ret = %lld\n", rc);
- return rc;
- }
- pnv_pci_ioda2_tce_invalidate_entire(phb, false);
-
- pnv_pci_unlink_table_and_group(npe->table_group.tables[num],
- &npe->table_group);
-
- return 0;
-}
-
-/* Switch ownership from platform code to external user (e.g. VFIO) */
-static void pnv_npu_take_ownership(struct iommu_table_group *table_group)
-{
- struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
- table_group);
- struct pnv_phb *phb = npe->phb;
- int64_t rc;
- struct pci_dev *gpdev = NULL;
-
- /*
- * Note: NPU has just a single TVE in the hardware which means that
- * while used by the kernel, it can have either 32bit window or
- * DMA bypass but never both. So we deconfigure 32bit window only
- * if it was enabled at the moment of ownership change.
- */
- if (npe->table_group.tables[0]) {
- pnv_npu_unset_window(&npe->table_group, 0);
- return;
- }
-
- /* Disable bypass */
- rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
- npe->pe_number, npe->pe_number,
- 0 /* bypass base */, 0);
- if (rc) {
- pe_err(npe, "Failed to disable bypass, err %lld\n", rc);
- return;
- }
- pnv_pci_ioda2_tce_invalidate_entire(npe->phb, false);
-
- get_gpu_pci_dev_and_pe(npe, &gpdev);
- if (gpdev)
- pnv_npu2_unmap_lpar_dev(gpdev);
-}
-
-static void pnv_npu_release_ownership(struct iommu_table_group *table_group)
-{
- struct pnv_ioda_pe *npe = container_of(table_group, struct pnv_ioda_pe,
- table_group);
- struct pci_dev *gpdev = NULL;
-
- get_gpu_pci_dev_and_pe(npe, &gpdev);
- if (gpdev)
- pnv_npu2_map_lpar_dev(gpdev, 0, MSR_DR | MSR_PR | MSR_HV);
-}
-
-static struct iommu_table_group_ops pnv_pci_npu_ops = {
- .set_window = pnv_npu_set_window,
- .unset_window = pnv_npu_unset_window,
- .take_ownership = pnv_npu_take_ownership,
- .release_ownership = pnv_npu_release_ownership,
-};
-#endif /* !CONFIG_IOMMU_API */
-
-/*
- * NPU2 ATS
- */
-/* Maximum possible number of ATSD MMIO registers per NPU */
-#define NV_NMMU_ATSD_REGS 8
-#define NV_NPU_MAX_PE_NUM 16
-
-/*
- * A compound NPU IOMMU group which might consist of 1 GPU + 2xNPUs (POWER8) or
- * up to 3 x (GPU + 2xNPUs) (POWER9).
- */
-struct npu_comp {
- struct iommu_table_group table_group;
- int pe_num;
- struct pnv_ioda_pe *pe[NV_NPU_MAX_PE_NUM];
-};
-
-/* An NPU descriptor, valid for POWER9 only */
-struct npu {
- int index;
- struct npu_comp npucomp;
-};
-
-#ifdef CONFIG_IOMMU_API
-static long pnv_npu_peers_create_table_userspace(
- struct iommu_table_group *table_group,
- int num, __u32 page_shift, __u64 window_size, __u32 levels,
- struct iommu_table **ptbl)
-{
- struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
- table_group);
-
- if (!npucomp->pe_num || !npucomp->pe[0] ||
- !npucomp->pe[0]->table_group.ops ||
- !npucomp->pe[0]->table_group.ops->create_table)
- return -EFAULT;
-
- return npucomp->pe[0]->table_group.ops->create_table(
- &npucomp->pe[0]->table_group, num, page_shift,
- window_size, levels, ptbl);
-}
-
-static long pnv_npu_peers_set_window(struct iommu_table_group *table_group,
- int num, struct iommu_table *tbl)
-{
- int i, j;
- long ret = 0;
- struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
- table_group);
-
- for (i = 0; i < npucomp->pe_num; ++i) {
- struct pnv_ioda_pe *pe = npucomp->pe[i];
-
- if (!pe->table_group.ops->set_window)
- continue;
-
- ret = pe->table_group.ops->set_window(&pe->table_group,
- num, tbl);
- if (ret)
- break;
- }
-
- if (ret) {
- for (j = 0; j < i; ++j) {
- struct pnv_ioda_pe *pe = npucomp->pe[j];
-
- if (!pe->table_group.ops->unset_window)
- continue;
-
- ret = pe->table_group.ops->unset_window(
- &pe->table_group, num);
- if (ret)
- break;
- }
- } else {
- table_group->tables[num] = iommu_tce_table_get(tbl);
- }
-
- return ret;
-}
-
-static long pnv_npu_peers_unset_window(struct iommu_table_group *table_group,
- int num)
-{
- int i, j;
- long ret = 0;
- struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
- table_group);
-
- for (i = 0; i < npucomp->pe_num; ++i) {
- struct pnv_ioda_pe *pe = npucomp->pe[i];
-
- WARN_ON(npucomp->table_group.tables[num] !=
- table_group->tables[num]);
- if (!npucomp->table_group.tables[num])
- continue;
-
- if (!pe->table_group.ops->unset_window)
- continue;
-
- ret = pe->table_group.ops->unset_window(&pe->table_group, num);
- if (ret)
- break;
- }
-
- if (ret) {
- for (j = 0; j < i; ++j) {
- struct pnv_ioda_pe *pe = npucomp->pe[j];
-
- if (!npucomp->table_group.tables[num])
- continue;
-
- if (!pe->table_group.ops->set_window)
- continue;
-
- ret = pe->table_group.ops->set_window(&pe->table_group,
- num, table_group->tables[num]);
- if (ret)
- break;
- }
- } else if (table_group->tables[num]) {
- iommu_tce_table_put(table_group->tables[num]);
- table_group->tables[num] = NULL;
- }
-
- return ret;
-}
-
-static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group)
-{
- int i;
- struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
- table_group);
-
- for (i = 0; i < npucomp->pe_num; ++i) {
- struct pnv_ioda_pe *pe = npucomp->pe[i];
-
- if (!pe->table_group.ops->take_ownership)
- continue;
- pe->table_group.ops->take_ownership(&pe->table_group);
- }
-}
-
-static void pnv_npu_peers_release_ownership(
- struct iommu_table_group *table_group)
-{
- int i;
- struct npu_comp *npucomp = container_of(table_group, struct npu_comp,
- table_group);
-
- for (i = 0; i < npucomp->pe_num; ++i) {
- struct pnv_ioda_pe *pe = npucomp->pe[i];
-
- if (!pe->table_group.ops->release_ownership)
- continue;
- pe->table_group.ops->release_ownership(&pe->table_group);
- }
-}
-
-static struct iommu_table_group_ops pnv_npu_peers_ops = {
- .get_table_size = pnv_pci_ioda2_get_table_size,
- .create_table = pnv_npu_peers_create_table_userspace,
- .set_window = pnv_npu_peers_set_window,
- .unset_window = pnv_npu_peers_unset_window,
- .take_ownership = pnv_npu_peers_take_ownership,
- .release_ownership = pnv_npu_peers_release_ownership,
-};
-
-static void pnv_comp_attach_table_group(struct npu_comp *npucomp,
- struct pnv_ioda_pe *pe)
-{
- if (WARN_ON(npucomp->pe_num == NV_NPU_MAX_PE_NUM))
- return;
-
- npucomp->pe[npucomp->pe_num] = pe;
- ++npucomp->pe_num;
-}
-
-struct iommu_table_group *pnv_try_setup_npu_table_group(struct pnv_ioda_pe *pe)
-{
- struct iommu_table_group *table_group;
- struct npu_comp *npucomp;
- struct pci_dev *gpdev = NULL;
- struct pci_controller *hose;
- struct pci_dev *npdev = NULL;
-
- list_for_each_entry(gpdev, &pe->pbus->devices, bus_list) {
- npdev = pnv_pci_get_npu_dev(gpdev, 0);
- if (npdev)
- break;
- }
-
- if (!npdev)
- /* It is not an NPU attached device, skip */
- return NULL;
-
- hose = pci_bus_to_host(npdev->bus);
-
- if (hose->npu) {
- table_group = &hose->npu->npucomp.table_group;
-
- if (!table_group->group) {
- table_group->ops = &pnv_npu_peers_ops;
- iommu_register_group(table_group,
- hose->global_number,
- pe->pe_number);
- }
- } else {
- /* Create a group for 1 GPU and attached NPUs for POWER8 */
- pe->npucomp = kzalloc(sizeof(*pe->npucomp), GFP_KERNEL);
- table_group = &pe->npucomp->table_group;
- table_group->ops = &pnv_npu_peers_ops;
- iommu_register_group(table_group, hose->global_number,
- pe->pe_number);
- }
-
- /* Steal capabilities from a GPU PE */
- table_group->max_dynamic_windows_supported =
- pe->table_group.max_dynamic_windows_supported;
- table_group->tce32_start = pe->table_group.tce32_start;
- table_group->tce32_size = pe->table_group.tce32_size;
- table_group->max_levels = pe->table_group.max_levels;
- if (!table_group->pgsizes)
- table_group->pgsizes = pe->table_group.pgsizes;
-
- npucomp = container_of(table_group, struct npu_comp, table_group);
- pnv_comp_attach_table_group(npucomp, pe);
-
- return table_group;
-}
-
-struct iommu_table_group *pnv_npu_compound_attach(struct pnv_ioda_pe *pe)
-{
- struct iommu_table_group *table_group;
- struct npu_comp *npucomp;
- struct pci_dev *gpdev = NULL;
- struct pci_dev *npdev;
- struct pnv_ioda_pe *gpe = get_gpu_pci_dev_and_pe(pe, &gpdev);
-
- WARN_ON(!(pe->flags & PNV_IODA_PE_DEV));
- if (!gpe)
- return NULL;
-
- /*
- * IODA2 bridges get this set up from pci_controller_ops::setup_bridge
- * but NPU bridges do not have this hook defined so we do it here.
- * We do not setup other table group parameters as they won't be used
- * anyway - NVLink bridges are subordinate PEs.
- */
- pe->table_group.ops = &pnv_pci_npu_ops;
-
- table_group = iommu_group_get_iommudata(
- iommu_group_get(&gpdev->dev));
-
- /*
- * On P9 NPU PHB and PCI PHB support different page sizes,
- * keep only matching. We expect here that NVLink bridge PE pgsizes is
- * initialized by the caller.
- */
- table_group->pgsizes &= pe->table_group.pgsizes;
- npucomp = container_of(table_group, struct npu_comp, table_group);
- pnv_comp_attach_table_group(npucomp, pe);
-
- list_for_each_entry(npdev, &pe->phb->hose->bus->devices, bus_list) {
- struct pci_dev *gpdevtmp = pnv_pci_get_gpu_dev(npdev);
-
- if (gpdevtmp != gpdev)
- continue;
-
- iommu_add_device(table_group, &npdev->dev);
- }
-
- return table_group;
-}
-#endif /* CONFIG_IOMMU_API */
-
-int pnv_npu2_init(struct pci_controller *hose)
-{
- static int npu_index;
- struct npu *npu;
- int ret;
-
- npu = kzalloc(sizeof(*npu), GFP_KERNEL);
- if (!npu)
- return -ENOMEM;
-
- npu_index++;
- if (WARN_ON(npu_index >= NV_MAX_NPUS)) {
- ret = -ENOSPC;
- goto fail_exit;
- }
- npu->index = npu_index;
- hose->npu = npu;
-
- return 0;
-
-fail_exit:
- kfree(npu);
- return ret;
-}
-
-int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
- unsigned long msr)
-{
- int ret;
- struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
- struct pci_controller *hose;
- struct pnv_phb *nphb;
-
- if (!npdev)
- return -ENODEV;
-
- hose = pci_bus_to_host(npdev->bus);
- nphb = hose->private_data;
-
- dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n",
- nphb->opal_id, lparid);
- /*
- * Currently we only support radix and non-zero LPCR only makes sense
- * for hash tables so skiboot expects the LPCR parameter to be a zero.
- */
- ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid,
- 0 /* LPCR bits */);
- if (ret) {
- dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
- return ret;
- }
-
- dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n",
- nphb->opal_id, msr);
- ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr,
- pci_dev_id(gpdev));
- if (ret < 0)
- dev_err(&gpdev->dev, "Failed to init context: %d\n", ret);
- else
- ret = 0;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pnv_npu2_map_lpar_dev);
-
-void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr)
-{
- struct pci_dev *gpdev;
-
- list_for_each_entry(gpdev, &gpe->pbus->devices, bus_list)
- pnv_npu2_map_lpar_dev(gpdev, 0, msr);
-}
-
-int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
-{
- int ret;
- struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
- struct pci_controller *hose;
- struct pnv_phb *nphb;
-
- if (!npdev)
- return -ENODEV;
-
- hose = pci_bus_to_host(npdev->bus);
- nphb = hose->private_data;
-
- dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n",
- nphb->opal_id);
- ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/,
- pci_dev_id(gpdev));
- if (ret < 0) {
- dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret);
- return ret;
- }
-
- /* Set LPID to 0 anyway, just to be safe */
- dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id);
- ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/,
- 0 /* LPCR bits */);
- if (ret)
- dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pnv_npu2_unmap_lpar_dev);
diff --git a/arch/powerpc/platforms/powernv/ocxl.c b/arch/powerpc/platforms/powernv/ocxl.c
index 8c65aacda9c8..629067781cec 100644
--- a/arch/powerpc/platforms/powernv/ocxl.c
+++ b/arch/powerpc/platforms/powernv/ocxl.c
@@ -2,7 +2,6 @@
// Copyright 2017 IBM Corp.
#include <asm/pnv-ocxl.h>
#include <asm/opal.h>
-#include <asm/xive.h>
#include <misc/ocxl-config.h>
#include "pci.h"
@@ -108,7 +107,8 @@ static int get_max_afu_index(struct pci_dev *dev, int *afu_idx)
int pos;
u32 val;
- pos = find_dvsec_from_pos(dev, OCXL_DVSEC_FUNC_ID, 0);
+ pos = pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM,
+ OCXL_DVSEC_FUNC_ID);
if (!pos)
return -ESRCH;
@@ -289,7 +289,7 @@ int pnv_ocxl_get_pasid_count(struct pci_dev *dev, int *count)
* be used by a function depends on how many functions exist
* on the device. The NPU needs to be configured to know how
* many bits are available to PASIDs and how many are to be
- * used by the function BDF indentifier.
+ * used by the function BDF identifier.
*
* We only support one AFU-carrying function for now.
*/
@@ -478,38 +478,121 @@ EXPORT_SYMBOL_GPL(pnv_ocxl_spa_release);
int pnv_ocxl_spa_remove_pe_from_cache(void *platform_data, int pe_handle)
{
struct spa_data *data = (struct spa_data *) platform_data;
- int rc;
- rc = opal_npu_spa_clear_cache(data->phb_opal_id, data->bdfn, pe_handle);
- return rc;
+ return opal_npu_spa_clear_cache(data->phb_opal_id, data->bdfn, pe_handle);
}
EXPORT_SYMBOL_GPL(pnv_ocxl_spa_remove_pe_from_cache);
-int pnv_ocxl_alloc_xive_irq(u32 *irq, u64 *trigger_addr)
+int pnv_ocxl_map_lpar(struct pci_dev *dev, uint64_t lparid,
+ uint64_t lpcr, void __iomem **arva)
{
- __be64 flags, trigger_page;
- s64 rc;
- u32 hwirq;
-
- hwirq = xive_native_alloc_irq();
- if (!hwirq)
- return -ENOENT;
-
- rc = opal_xive_get_irq_info(hwirq, &flags, NULL, &trigger_page, NULL,
- NULL);
- if (rc || !trigger_page) {
- xive_native_free_irq(hwirq);
- return -ENOENT;
+ struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ u64 mmio_atsd;
+ int rc;
+
+ /* ATSD physical address.
+ * ATSD LAUNCH register: write access initiates a shoot down to
+ * initiate the TLB Invalidate command.
+ */
+ rc = of_property_read_u64_index(hose->dn, "ibm,mmio-atsd",
+ 0, &mmio_atsd);
+ if (rc) {
+ dev_info(&dev->dev, "No available ATSD found\n");
+ return rc;
+ }
+
+ /* Assign a register set to a Logical Partition and MMIO ATSD
+ * LPARID register to the required value.
+ */
+ rc = opal_npu_map_lpar(phb->opal_id, pci_dev_id(dev),
+ lparid, lpcr);
+ if (rc) {
+ dev_err(&dev->dev, "Error mapping device to LPAR: %d\n", rc);
+ return rc;
+ }
+
+ *arva = ioremap(mmio_atsd, 24);
+ if (!(*arva)) {
+ dev_warn(&dev->dev, "ioremap failed - mmio_atsd: %#llx\n", mmio_atsd);
+ rc = -ENOMEM;
}
- *irq = hwirq;
- *trigger_addr = be64_to_cpu(trigger_page);
- return 0;
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pnv_ocxl_map_lpar);
+
+void pnv_ocxl_unmap_lpar(void __iomem *arva)
+{
+ iounmap(arva);
}
-EXPORT_SYMBOL_GPL(pnv_ocxl_alloc_xive_irq);
+EXPORT_SYMBOL_GPL(pnv_ocxl_unmap_lpar);
-void pnv_ocxl_free_xive_irq(u32 irq)
+void pnv_ocxl_tlb_invalidate(void __iomem *arva,
+ unsigned long pid,
+ unsigned long addr,
+ unsigned long page_size)
{
- xive_native_free_irq(irq);
+ unsigned long timeout = jiffies + (HZ * PNV_OCXL_ATSD_TIMEOUT);
+ u64 val = 0ull;
+ int pend;
+ u8 size;
+
+ if (!(arva))
+ return;
+
+ if (addr) {
+ /* load Abbreviated Virtual Address register with
+ * the necessary value
+ */
+ val |= FIELD_PREP(PNV_OCXL_ATSD_AVA_AVA, addr >> (63-51));
+ out_be64(arva + PNV_OCXL_ATSD_AVA, val);
+ }
+
+ /* Write access initiates a shoot down to initiate the
+ * TLB Invalidate command
+ */
+ val = PNV_OCXL_ATSD_LNCH_R;
+ val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_RIC, 0b10);
+ if (addr)
+ val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_IS, 0b00);
+ else {
+ val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_IS, 0b01);
+ val |= PNV_OCXL_ATSD_LNCH_OCAPI_SINGLETON;
+ }
+ val |= PNV_OCXL_ATSD_LNCH_PRS;
+ /* Actual Page Size to be invalidated
+ * 000 4KB
+ * 101 64KB
+ * 001 2MB
+ * 010 1GB
+ */
+ size = 0b101;
+ if (page_size == 0x1000)
+ size = 0b000;
+ if (page_size == 0x200000)
+ size = 0b001;
+ if (page_size == 0x40000000)
+ size = 0b010;
+ val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_AP, size);
+ val |= FIELD_PREP(PNV_OCXL_ATSD_LNCH_PID, pid);
+ out_be64(arva + PNV_OCXL_ATSD_LNCH, val);
+
+ /* Poll the ATSD status register to determine when the
+ * TLB Invalidate has been completed.
+ */
+ val = in_be64(arva + PNV_OCXL_ATSD_STAT);
+ pend = val >> 63;
+
+ while (pend) {
+ if (time_after_eq(jiffies, timeout)) {
+ pr_err("%s - Timeout while reading XTS MMIO ATSD status register (val=%#llx, pidr=0x%lx)\n",
+ __func__, val, pid);
+ return;
+ }
+ cpu_relax();
+ val = in_be64(arva + PNV_OCXL_ATSD_STAT);
+ pend = val >> 63;
+ }
}
-EXPORT_SYMBOL_GPL(pnv_ocxl_free_xive_irq);
+EXPORT_SYMBOL_GPL(pnv_ocxl_tlb_invalidate);
diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
index 1656e8965d6b..c094fdf5825c 100644
--- a/arch/powerpc/platforms/powernv/opal-async.c
+++ b/arch/powerpc/platforms/powernv/opal-async.c
@@ -104,7 +104,7 @@ static int __opal_async_release_token(int token)
*/
case ASYNC_TOKEN_DISPATCHED:
opal_async_tokens[token].state = ASYNC_TOKEN_ABANDONED;
- /* Fall through */
+ fallthrough;
default:
rc = 1;
}
diff --git a/arch/powerpc/platforms/powernv/opal-call.c b/arch/powerpc/platforms/powernv/opal-call.c
index 5cd0f52d258f..f812c74c61e5 100644
--- a/arch/powerpc/platforms/powernv/opal-call.c
+++ b/arch/powerpc/platforms/powernv/opal-call.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/percpu.h>
#include <linux/jump_label.h>
+#include <asm/interrupt.h>
#include <asm/opal-api.h>
#include <asm/trace.h>
#include <asm/asm-prototypes.h>
@@ -100,6 +101,9 @@ static int64_t opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
bool mmu = (msr & (MSR_IR|MSR_DR));
int64_t ret;
+ /* OPAL call / firmware may use SRR and/or HSRR */
+ srr_regs_clobbered();
+
msr &= ~MSR_EE;
if (unlikely(!mmu))
@@ -267,8 +271,6 @@ OPAL_CALL(opal_xive_get_queue_state, OPAL_XIVE_GET_QUEUE_STATE);
OPAL_CALL(opal_xive_set_queue_state, OPAL_XIVE_SET_QUEUE_STATE);
OPAL_CALL(opal_xive_get_vp_state, OPAL_XIVE_GET_VP_STATE);
OPAL_CALL(opal_signal_system_reset, OPAL_SIGNAL_SYSTEM_RESET);
-OPAL_CALL(opal_npu_init_context, OPAL_NPU_INIT_CONTEXT);
-OPAL_CALL(opal_npu_destroy_context, OPAL_NPU_DESTROY_CONTEXT);
OPAL_CALL(opal_npu_map_lpar, OPAL_NPU_MAP_LPAR);
OPAL_CALL(opal_imc_counters_init, OPAL_IMC_COUNTERS_INIT);
OPAL_CALL(opal_imc_counters_start, OPAL_IMC_COUNTERS_START);
diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c
index ed895d82c048..bb7657115f1d 100644
--- a/arch/powerpc/platforms/powernv/opal-core.c
+++ b/arch/powerpc/platforms/powernv/opal-core.c
@@ -71,6 +71,7 @@ static LIST_HEAD(opalcore_list);
static struct opalcore_config *oc_conf;
static const struct opal_mpipl_fadump *opalc_metadata;
static const struct opal_mpipl_fadump *opalc_cpu_metadata;
+static struct kobject *mpipl_kobj;
/*
* Set crashing CPU's signal to SIGUSR1. if the kernel is triggered
@@ -88,7 +89,7 @@ static inline int is_opalcore_usable(void)
return (oc_conf && oc_conf->opalcorebuf != NULL) ? 1 : 0;
}
-static Elf64_Word *append_elf64_note(Elf64_Word *buf, char *name,
+static Elf64_Word *__init append_elf64_note(Elf64_Word *buf, char *name,
u32 type, void *data,
size_t data_len)
{
@@ -107,19 +108,19 @@ static Elf64_Word *append_elf64_note(Elf64_Word *buf, char *name,
return buf;
}
-static void fill_prstatus(struct elf_prstatus *prstatus, int pir,
+static void __init fill_prstatus(struct elf_prstatus *prstatus, int pir,
struct pt_regs *regs)
{
memset(prstatus, 0, sizeof(struct elf_prstatus));
- elf_core_copy_kernel_regs(&(prstatus->pr_reg), regs);
+ elf_core_copy_regs(&(prstatus->pr_reg), regs);
/*
* Overload PID with PIR value.
* As a PIR value could also be '0', add an offset of '100'
* to every PIR to avoid misinterpretations in GDB.
*/
- prstatus->pr_pid = cpu_to_be32(100 + pir);
- prstatus->pr_ppid = cpu_to_be32(1);
+ prstatus->common.pr_pid = cpu_to_be32(100 + pir);
+ prstatus->common.pr_ppid = cpu_to_be32(1);
/*
* Indicate SIGUSR1 for crash initiated from kernel.
@@ -129,11 +130,11 @@ static void fill_prstatus(struct elf_prstatus *prstatus, int pir,
short sig;
sig = kernel_initiated ? SIGUSR1 : SIGTERM;
- prstatus->pr_cursig = cpu_to_be16(sig);
+ prstatus->common.pr_cursig = cpu_to_be16(sig);
}
}
-static Elf64_Word *auxv_to_elf64_notes(Elf64_Word *buf,
+static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf,
u64 opal_boot_entry)
{
Elf64_Off *bufp = (Elf64_Off *)oc_conf->auxv_buf;
@@ -347,6 +348,8 @@ static int __init create_opalcore(void)
if (!dn || ret)
pr_warn("WARNING: Failed to read OPAL base & entry values\n");
+ of_node_put(dn);
+
/* Use count to keep track of the program headers */
count = 0;
@@ -428,7 +431,7 @@ static void opalcore_cleanup(void)
return;
/* Remove OPAL core sysfs file */
- sysfs_remove_bin_file(opal_kobj, &opal_core_attr);
+ sysfs_remove_bin_file(mpipl_kobj, &opal_core_attr);
oc_conf->ptload_phdr = NULL;
oc_conf->ptload_cnt = 0;
@@ -509,7 +512,7 @@ static void __init opalcore_config_init(void)
idx = be32_to_cpu(opalc_metadata->region_cnt);
if (idx > MAX_PT_LOAD_CNT) {
pr_warn("WARNING: OPAL regions count (%d) adjusted to limit (%d)",
- MAX_PT_LOAD_CNT, idx);
+ idx, MAX_PT_LOAD_CNT);
idx = MAX_PT_LOAD_CNT;
}
for (i = 0; i < idx; i++) {
@@ -563,9 +566,9 @@ error_out:
of_node_put(np);
}
-static ssize_t fadump_release_opalcore_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
+static ssize_t release_core_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int input = -1;
@@ -589,9 +592,23 @@ static ssize_t fadump_release_opalcore_store(struct kobject *kobj,
return count;
}
-static struct kobj_attribute opalcore_rel_attr = __ATTR(fadump_release_opalcore,
- 0200, NULL,
- fadump_release_opalcore_store);
+static struct kobj_attribute opalcore_rel_attr = __ATTR_WO(release_core);
+
+static struct attribute *mpipl_attr[] = {
+ &opalcore_rel_attr.attr,
+ NULL,
+};
+
+static struct bin_attribute *mpipl_bin_attr[] = {
+ &opal_core_attr,
+ NULL,
+
+};
+
+static const struct attribute_group mpipl_group = {
+ .attrs = mpipl_attr,
+ .bin_attrs = mpipl_bin_attr,
+};
static int __init opalcore_init(void)
{
@@ -609,7 +626,7 @@ static int __init opalcore_init(void)
* then capture the dump.
*/
if (!(is_opalcore_usable())) {
- pr_err("Failed to export /sys/firmware/opal/core\n");
+ pr_err("Failed to export /sys/firmware/opal/mpipl/core\n");
opalcore_cleanup();
return rc;
}
@@ -617,18 +634,28 @@ static int __init opalcore_init(void)
/* Set OPAL core file size */
opal_core_attr.size = oc_conf->opalcore_size;
+ mpipl_kobj = kobject_create_and_add("mpipl", opal_kobj);
+ if (!mpipl_kobj) {
+ pr_err("unable to create mpipl kobject\n");
+ return -ENOMEM;
+ }
+
/* Export OPAL core sysfs file */
- rc = sysfs_create_bin_file(opal_kobj, &opal_core_attr);
- if (rc != 0) {
- pr_err("Failed to export /sys/firmware/opal/core\n");
+ rc = sysfs_create_group(mpipl_kobj, &mpipl_group);
+ if (rc) {
+ pr_err("mpipl sysfs group creation failed (%d)", rc);
opalcore_cleanup();
return rc;
}
-
- rc = sysfs_create_file(kernel_kobj, &opalcore_rel_attr.attr);
+ /* The /sys/firmware/opal/core is moved to /sys/firmware/opal/mpipl/
+ * directory, need to create symlink at old location to maintain
+ * backward compatibility.
+ */
+ rc = compat_only_sysfs_link_entry_to_kobj(opal_kobj, mpipl_kobj,
+ "core", NULL);
if (rc) {
- pr_warn("unable to create sysfs file fadump_release_opalcore (%d)\n",
- rc);
+ pr_err("unable to create core symlink (%d)\n", rc);
+ return rc;
}
return 0;
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 543c816fa99e..16c5860f1372 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -88,9 +88,14 @@ static ssize_t dump_ack_store(struct dump_obj *dump_obj,
const char *buf,
size_t count)
{
- dump_send_ack(dump_obj->id);
- sysfs_remove_file_self(&dump_obj->kobj, &attr->attr);
- kobject_put(&dump_obj->kobj);
+ /*
+ * Try to self remove this attribute. If we are successful,
+ * delete the kobject itself.
+ */
+ if (sysfs_remove_file_self(&dump_obj->kobj, &attr->attr)) {
+ dump_send_ack(dump_obj->id);
+ kobject_put(&dump_obj->kobj);
+ }
return count;
}
@@ -145,7 +150,7 @@ static struct attribute *initiate_attrs[] = {
NULL,
};
-static struct attribute_group initiate_attr_group = {
+static const struct attribute_group initiate_attr_group = {
.attrs = initiate_attrs,
};
@@ -203,11 +208,12 @@ static struct attribute *dump_default_attrs[] = {
&ack_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(dump_default);
static struct kobj_type dump_ktype = {
.sysfs_ops = &dump_sysfs_ops,
.release = &dump_release,
- .default_attrs = dump_default_attrs,
+ .default_groups = dump_default_groups,
};
static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
@@ -318,15 +324,14 @@ static ssize_t dump_attr_read(struct file *filep, struct kobject *kobj,
return count;
}
-static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
- uint32_t type)
+static void create_dump_obj(uint32_t id, size_t size, uint32_t type)
{
struct dump_obj *dump;
int rc;
dump = kzalloc(sizeof(*dump), GFP_KERNEL);
if (!dump)
- return NULL;
+ return;
dump->kobj.kset = dump_kset;
@@ -346,21 +351,39 @@ static struct dump_obj *create_dump_obj(uint32_t id, size_t size,
rc = kobject_add(&dump->kobj, NULL, "0x%x-0x%x", type, id);
if (rc) {
kobject_put(&dump->kobj);
- return NULL;
+ return;
}
+ /*
+ * As soon as the sysfs file for this dump is created/activated there is
+ * a chance the opal_errd daemon (or any userspace) might read and
+ * acknowledge the dump before kobject_uevent() is called. If that
+ * happens then there is a potential race between
+ * dump_ack_store->kobject_put() and kobject_uevent() which leads to a
+ * use-after-free of a kernfs object resulting in a kernel crash.
+ *
+ * To avoid that, we need to take a reference on behalf of the bin file,
+ * so that our reference remains valid while we call kobject_uevent().
+ * We then drop our reference before exiting the function, leaving the
+ * bin file to drop the last reference (if it hasn't already).
+ */
+
+ /* Take a reference for the bin file */
+ kobject_get(&dump->kobj);
rc = sysfs_create_bin_file(&dump->kobj, &dump->dump_attr);
- if (rc) {
+ if (rc == 0) {
+ kobject_uevent(&dump->kobj, KOBJ_ADD);
+
+ pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
+ __func__, dump->id, dump->size);
+ } else {
+ /* Drop reference count taken for bin file */
kobject_put(&dump->kobj);
- return NULL;
}
- pr_info("%s: New platform dump. ID = 0x%x Size %u\n",
- __func__, dump->id, dump->size);
-
- kobject_uevent(&dump->kobj, KOBJ_ADD);
-
- return dump;
+ /* Drop our reference */
+ kobject_put(&dump->kobj);
+ return;
}
static irqreturn_t process_dump(int irq, void *data)
@@ -397,7 +420,7 @@ void __init opal_platform_dump_init(void)
int rc;
int dump_irq;
- /* ELOG not supported by firmware */
+ /* Dump not supported by firmware */
if (!opal_check_token(OPAL_DUMP_READ))
return;
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 62ef7ad995da..554fdd7f88b8 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -72,9 +72,14 @@ static ssize_t elog_ack_store(struct elog_obj *elog_obj,
const char *buf,
size_t count)
{
- opal_send_ack_elog(elog_obj->id);
- sysfs_remove_file_self(&elog_obj->kobj, &attr->attr);
- kobject_put(&elog_obj->kobj);
+ /*
+ * Try to self remove this attribute. If we are successful,
+ * delete the kobject itself.
+ */
+ if (sysfs_remove_file_self(&elog_obj->kobj, &attr->attr)) {
+ opal_send_ack_elog(elog_obj->id);
+ kobject_put(&elog_obj->kobj);
+ }
return count;
}
@@ -139,11 +144,12 @@ static struct attribute *elog_default_attrs[] = {
&ack_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(elog_default);
static struct kobj_type elog_ktype = {
.sysfs_ops = &elog_sysfs_ops,
.release = &elog_release,
- .default_attrs = elog_default_attrs,
+ .default_groups = elog_default_groups,
};
/* Maximum size of a single log on FSP is 16KB */
@@ -166,8 +172,8 @@ static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj,
opal_rc = opal_read_elog(__pa(elog->buffer),
elog->size, elog->id);
if (opal_rc != OPAL_SUCCESS) {
- pr_err("ELOG: log read failed for log-id=%llx\n",
- elog->id);
+ pr_err_ratelimited("ELOG: log read failed for log-id=%llx\n",
+ elog->id);
kfree(elog->buffer);
elog->buffer = NULL;
return -EIO;
@@ -179,14 +185,14 @@ static ssize_t raw_attr_read(struct file *filep, struct kobject *kobj,
return count;
}
-static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
+static void create_elog_obj(uint64_t id, size_t size, uint64_t type)
{
struct elog_obj *elog;
int rc;
elog = kzalloc(sizeof(*elog), GFP_KERNEL);
if (!elog)
- return NULL;
+ return;
elog->kobj.kset = elog_kset;
@@ -219,18 +225,37 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
rc = kobject_add(&elog->kobj, NULL, "0x%llx", id);
if (rc) {
kobject_put(&elog->kobj);
- return NULL;
+ return;
}
+ /*
+ * As soon as the sysfs file for this elog is created/activated there is
+ * a chance the opal_errd daemon (or any userspace) might read and
+ * acknowledge the elog before kobject_uevent() is called. If that
+ * happens then there is a potential race between
+ * elog_ack_store->kobject_put() and kobject_uevent() which leads to a
+ * use-after-free of a kernfs object resulting in a kernel crash.
+ *
+ * To avoid that, we need to take a reference on behalf of the bin file,
+ * so that our reference remains valid while we call kobject_uevent().
+ * We then drop our reference before exiting the function, leaving the
+ * bin file to drop the last reference (if it hasn't already).
+ */
+
+ /* Take a reference for the bin file */
+ kobject_get(&elog->kobj);
rc = sysfs_create_bin_file(&elog->kobj, &elog->raw_attr);
- if (rc) {
+ if (rc == 0) {
+ kobject_uevent(&elog->kobj, KOBJ_ADD);
+ } else {
+ /* Drop the reference taken for the bin file */
kobject_put(&elog->kobj);
- return NULL;
}
- kobject_uevent(&elog->kobj, KOBJ_ADD);
+ /* Drop our reference */
+ kobject_put(&elog->kobj);
- return elog;
+ return;
}
static irqreturn_t elog_event(int irq, void *data)
diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c
index d361d37d975f..964f464b1b0e 100644
--- a/arch/powerpc/platforms/powernv/opal-fadump.c
+++ b/arch/powerpc/platforms/powernv/opal-fadump.c
@@ -60,7 +60,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
addr = be64_to_cpu(addr);
pr_debug("Kernel metadata addr: %llx\n", addr);
opal_fdm_active = (void *)addr;
- if (opal_fdm_active->registered_regions == 0)
+ if (be16_to_cpu(opal_fdm_active->registered_regions) == 0)
return;
ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_BOOT_MEM, &addr);
@@ -95,24 +95,24 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf);
static void opal_fadump_update_config(struct fw_dump *fadump_conf,
const struct opal_fadump_mem_struct *fdm)
{
- pr_debug("Boot memory regions count: %d\n", fdm->region_cnt);
+ pr_debug("Boot memory regions count: %d\n", be16_to_cpu(fdm->region_cnt));
/*
* The destination address of the first boot memory region is the
* destination address of boot memory regions.
*/
- fadump_conf->boot_mem_dest_addr = fdm->rgn[0].dest;
+ fadump_conf->boot_mem_dest_addr = be64_to_cpu(fdm->rgn[0].dest);
pr_debug("Destination address of boot memory regions: %#016llx\n",
fadump_conf->boot_mem_dest_addr);
- fadump_conf->fadumphdr_addr = fdm->fadumphdr_addr;
+ fadump_conf->fadumphdr_addr = be64_to_cpu(fdm->fadumphdr_addr);
}
/*
* This function is called in the capture kernel to get configuration details
* from metadata setup by the first kernel.
*/
-static void opal_fadump_get_config(struct fw_dump *fadump_conf,
+static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
const struct opal_fadump_mem_struct *fdm)
{
unsigned long base, size, last_end, hole_size;
@@ -126,9 +126,9 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
fadump_conf->boot_memory_size = 0;
pr_debug("Boot memory regions:\n");
- for (i = 0; i < fdm->region_cnt; i++) {
- base = fdm->rgn[i].src;
- size = fdm->rgn[i].size;
+ for (i = 0; i < be16_to_cpu(fdm->region_cnt); i++) {
+ base = be64_to_cpu(fdm->rgn[i].src);
+ size = be64_to_cpu(fdm->rgn[i].size);
pr_debug("\t[%03d] base: 0x%lx, size: 0x%lx\n", i, base, size);
fadump_conf->boot_mem_addr[i] = base;
@@ -143,7 +143,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
* Start address of reserve dump area (permanent reservation) for
* re-registering FADump after dump capture.
*/
- fadump_conf->reserve_dump_area_start = fdm->rgn[0].dest;
+ fadump_conf->reserve_dump_area_start = be64_to_cpu(fdm->rgn[0].dest);
/*
* Rarely, but it can so happen that system crashes before all
@@ -155,13 +155,14 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
* Hope the memory that could not be preserved only has pages
* that are usually filtered out while saving the vmcore.
*/
- if (fdm->region_cnt > fdm->registered_regions) {
+ if (be16_to_cpu(fdm->region_cnt) > be16_to_cpu(fdm->registered_regions)) {
pr_warn("Not all memory regions were saved!!!\n");
pr_warn(" Unsaved memory regions:\n");
- i = fdm->registered_regions;
- while (i < fdm->region_cnt) {
+ i = be16_to_cpu(fdm->registered_regions);
+ while (i < be16_to_cpu(fdm->region_cnt)) {
pr_warn("\t[%03d] base: 0x%llx, size: 0x%llx\n",
- i, fdm->rgn[i].src, fdm->rgn[i].size);
+ i, be64_to_cpu(fdm->rgn[i].src),
+ be64_to_cpu(fdm->rgn[i].size));
i++;
}
@@ -170,7 +171,7 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
}
fadump_conf->boot_mem_top = (fadump_conf->boot_memory_size + hole_size);
- fadump_conf->boot_mem_regs_cnt = fdm->region_cnt;
+ fadump_conf->boot_mem_regs_cnt = be16_to_cpu(fdm->region_cnt);
opal_fadump_update_config(fadump_conf, fdm);
}
@@ -178,35 +179,38 @@ static void opal_fadump_get_config(struct fw_dump *fadump_conf,
static void opal_fadump_init_metadata(struct opal_fadump_mem_struct *fdm)
{
fdm->version = OPAL_FADUMP_VERSION;
- fdm->region_cnt = 0;
- fdm->registered_regions = 0;
- fdm->fadumphdr_addr = 0;
+ fdm->region_cnt = cpu_to_be16(0);
+ fdm->registered_regions = cpu_to_be16(0);
+ fdm->fadumphdr_addr = cpu_to_be64(0);
}
static u64 opal_fadump_init_mem_struct(struct fw_dump *fadump_conf)
{
u64 addr = fadump_conf->reserve_dump_area_start;
+ u16 reg_cnt;
int i;
opal_fdm = __va(fadump_conf->kernel_metadata);
opal_fadump_init_metadata(opal_fdm);
/* Boot memory regions */
+ reg_cnt = be16_to_cpu(opal_fdm->region_cnt);
for (i = 0; i < fadump_conf->boot_mem_regs_cnt; i++) {
- opal_fdm->rgn[i].src = fadump_conf->boot_mem_addr[i];
- opal_fdm->rgn[i].dest = addr;
- opal_fdm->rgn[i].size = fadump_conf->boot_mem_sz[i];
+ opal_fdm->rgn[i].src = cpu_to_be64(fadump_conf->boot_mem_addr[i]);
+ opal_fdm->rgn[i].dest = cpu_to_be64(addr);
+ opal_fdm->rgn[i].size = cpu_to_be64(fadump_conf->boot_mem_sz[i]);
- opal_fdm->region_cnt++;
+ reg_cnt++;
addr += fadump_conf->boot_mem_sz[i];
}
+ opal_fdm->region_cnt = cpu_to_be16(reg_cnt);
/*
- * Kernel metadata is passed to f/w and retrieved in capture kerenl.
+ * Kernel metadata is passed to f/w and retrieved in capture kernel.
* So, use it to save fadump header address instead of calculating it.
*/
- opal_fdm->fadumphdr_addr = (opal_fdm->rgn[0].dest +
- fadump_conf->boot_memory_size);
+ opal_fdm->fadumphdr_addr = cpu_to_be64(be64_to_cpu(opal_fdm->rgn[0].dest) +
+ fadump_conf->boot_memory_size);
opal_fadump_update_config(fadump_conf, opal_fdm);
@@ -269,18 +273,21 @@ static u64 opal_fadump_get_bootmem_min(void)
static int opal_fadump_register(struct fw_dump *fadump_conf)
{
s64 rc = OPAL_PARAMETER;
+ u16 registered_regs;
int i, err = -EIO;
- for (i = 0; i < opal_fdm->region_cnt; i++) {
+ registered_regs = be16_to_cpu(opal_fdm->registered_regions);
+ for (i = 0; i < be16_to_cpu(opal_fdm->region_cnt); i++) {
rc = opal_mpipl_update(OPAL_MPIPL_ADD_RANGE,
- opal_fdm->rgn[i].src,
- opal_fdm->rgn[i].dest,
- opal_fdm->rgn[i].size);
+ be64_to_cpu(opal_fdm->rgn[i].src),
+ be64_to_cpu(opal_fdm->rgn[i].dest),
+ be64_to_cpu(opal_fdm->rgn[i].size));
if (rc != OPAL_SUCCESS)
break;
- opal_fdm->registered_regions++;
+ registered_regs++;
}
+ opal_fdm->registered_regions = cpu_to_be16(registered_regs);
switch (rc) {
case OPAL_SUCCESS:
@@ -291,7 +298,8 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
case OPAL_RESOURCE:
/* If MAX regions limit in f/w is hit, warn and proceed. */
pr_warn("%d regions could not be registered for MPIPL as MAX limit is reached!\n",
- (opal_fdm->region_cnt - opal_fdm->registered_regions));
+ (be16_to_cpu(opal_fdm->region_cnt) -
+ be16_to_cpu(opal_fdm->registered_regions)));
fadump_conf->dump_registered = 1;
err = 0;
break;
@@ -312,7 +320,7 @@ static int opal_fadump_register(struct fw_dump *fadump_conf)
* If some regions were registered before OPAL_MPIPL_ADD_RANGE
* OPAL call failed, unregister all regions.
*/
- if ((err < 0) && (opal_fdm->registered_regions > 0))
+ if ((err < 0) && (be16_to_cpu(opal_fdm->registered_regions) > 0))
opal_fadump_unregister(fadump_conf);
return err;
@@ -328,7 +336,7 @@ static int opal_fadump_unregister(struct fw_dump *fadump_conf)
return -EIO;
}
- opal_fdm->registered_regions = 0;
+ opal_fdm->registered_regions = cpu_to_be16(0);
fadump_conf->dump_registered = 0;
return 0;
}
@@ -563,25 +571,26 @@ static void opal_fadump_region_show(struct fw_dump *fadump_conf,
else
fdm_ptr = opal_fdm;
- for (i = 0; i < fdm_ptr->region_cnt; i++) {
+ for (i = 0; i < be16_to_cpu(fdm_ptr->region_cnt); i++) {
/*
* Only regions that are registered for MPIPL
* would have dump data.
*/
if ((fadump_conf->dump_active) &&
- (i < fdm_ptr->registered_regions))
- dumped_bytes = fdm_ptr->rgn[i].size;
+ (i < be16_to_cpu(fdm_ptr->registered_regions)))
+ dumped_bytes = be64_to_cpu(fdm_ptr->rgn[i].size);
seq_printf(m, "DUMP: Src: %#016llx, Dest: %#016llx, ",
- fdm_ptr->rgn[i].src, fdm_ptr->rgn[i].dest);
+ be64_to_cpu(fdm_ptr->rgn[i].src),
+ be64_to_cpu(fdm_ptr->rgn[i].dest));
seq_printf(m, "Size: %#llx, Dumped: %#llx bytes\n",
- fdm_ptr->rgn[i].size, dumped_bytes);
+ be64_to_cpu(fdm_ptr->rgn[i].size), dumped_bytes);
}
- /* Dump is active. Show reserved area start address. */
+ /* Dump is active. Show preserved area start address. */
if (fadump_conf->dump_active) {
- seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n",
- fadump_conf->reserve_dump_area_start);
+ seq_printf(m, "\nMemory above %#016llx is reserved for saving crash dump\n",
+ fadump_conf->boot_mem_top);
}
}
@@ -624,6 +633,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
{
const __be32 *prop;
unsigned long dn;
+ __be64 be_addr;
u64 addr = 0;
int i, len;
s64 ret;
@@ -671,7 +681,7 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
* Firmware supports 32-bit field for size. Align it to PAGE_SIZE
* and request firmware to copy multiple kernel boot memory regions.
*/
- fadump_conf->max_copy_size = _ALIGN_DOWN(U32_MAX, PAGE_SIZE);
+ fadump_conf->max_copy_size = ALIGN_DOWN(U32_MAX, PAGE_SIZE);
/*
* Check if dump has been initiated on last reboot.
@@ -680,13 +690,13 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
if (!prop)
return;
- ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &addr);
- if ((ret != OPAL_SUCCESS) || !addr) {
+ ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_KERNEL, &be_addr);
+ if ((ret != OPAL_SUCCESS) || !be_addr) {
pr_err("Failed to get Kernel metadata (%lld)\n", ret);
return;
}
- addr = be64_to_cpu(addr);
+ addr = be64_to_cpu(be_addr);
pr_debug("Kernel metadata addr: %llx\n", addr);
opal_fdm_active = __va(addr);
@@ -697,14 +707,14 @@ void __init opal_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
}
/* Kernel regions not registered with f/w for MPIPL */
- if (opal_fdm_active->registered_regions == 0) {
+ if (be16_to_cpu(opal_fdm_active->registered_regions) == 0) {
opal_fdm_active = NULL;
return;
}
- ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &addr);
- if (addr) {
- addr = be64_to_cpu(addr);
+ ret = opal_mpipl_query_tag(OPAL_MPIPL_TAG_CPU, &be_addr);
+ if (be_addr) {
+ addr = be64_to_cpu(be_addr);
pr_debug("CPU metadata addr: %llx\n", addr);
opal_cpu_metadata = __va(addr);
}
diff --git a/arch/powerpc/platforms/powernv/opal-fadump.h b/arch/powerpc/platforms/powernv/opal-fadump.h
index f1e9ecf548c5..3f715efb0aa6 100644
--- a/arch/powerpc/platforms/powernv/opal-fadump.h
+++ b/arch/powerpc/platforms/powernv/opal-fadump.h
@@ -31,14 +31,14 @@
* OPAL FADump kernel metadata
*
* The address of this structure will be registered with f/w for retrieving
- * and processing during crash dump.
+ * in the capture kernel to process the crash dump.
*/
struct opal_fadump_mem_struct {
u8 version;
u8 reserved[3];
- u16 region_cnt; /* number of regions */
- u16 registered_regions; /* Regions registered for MPIPL */
- u64 fadumphdr_addr;
+ __be16 region_cnt; /* number of regions */
+ __be16 registered_regions; /* Regions registered for MPIPL */
+ __be64 fadumphdr_addr;
struct opal_mpipl_region rgn[FADUMP_MAX_MEM_REGS];
} __packed;
@@ -135,7 +135,7 @@ static inline void opal_fadump_read_regs(char *bufp, unsigned int regs_cnt,
for (i = 0; i < regs_cnt; i++, bufp += reg_entry_size) {
reg_entry = (struct hdat_fadump_reg_entry *)bufp;
val = (cpu_endian ? be64_to_cpu(reg_entry->reg_val) :
- reg_entry->reg_val);
+ (u64)(reg_entry->reg_val));
opal_fadump_set_regval_regnum(regs,
be32_to_cpu(reg_entry->reg_type),
be32_to_cpu(reg_entry->reg_num),
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index 7e7d38b17420..d5ea04e8e4c5 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -512,7 +512,7 @@ static struct attribute *image_op_attrs[] = {
NULL /* need to NULL terminate the list of attributes */
};
-static struct attribute_group image_op_attr_group = {
+static const struct attribute_group image_op_attr_group = {
.attrs = image_op_attrs,
};
@@ -520,6 +520,10 @@ void __init opal_flash_update_init(void)
{
int ret;
+ /* Firmware update is not supported by firmware */
+ if (!opal_check_token(OPAL_FLASH_VALIDATE))
+ return;
+
/* Allocate validate image buffer */
validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
if (!validate_flash_data.buf) {
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index 3e1f064a18db..f0c1830deb51 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -213,6 +213,8 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
"A hypervisor resource error occurred",
"CAPP recovery process is in progress",
};
+ static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
/* Print things out */
if (hmi_evt->version < OpalHMIEvt_V1) {
@@ -240,19 +242,22 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
break;
}
- printk("%s%s Hypervisor Maintenance interrupt [%s]\n",
- level, sevstr,
- hmi_evt->disposition == OpalHMI_DISPOSITION_RECOVERED ?
- "Recovered" : "Not recovered");
- error_info = hmi_evt->type < ARRAY_SIZE(hmi_error_types) ?
- hmi_error_types[hmi_evt->type]
- : "Unknown";
- printk("%s Error detail: %s\n", level, error_info);
- printk("%s HMER: %016llx\n", level, be64_to_cpu(hmi_evt->hmer));
- if ((hmi_evt->type == OpalHMI_ERROR_TFAC) ||
- (hmi_evt->type == OpalHMI_ERROR_TFMR_PARITY))
- printk("%s TFMR: %016llx\n", level,
+ if (hmi_evt->severity != OpalHMI_SEV_NO_ERROR || __ratelimit(&rs)) {
+ printk("%s%s Hypervisor Maintenance interrupt [%s]\n",
+ level, sevstr,
+ hmi_evt->disposition == OpalHMI_DISPOSITION_RECOVERED ?
+ "Recovered" : "Not recovered");
+ error_info = hmi_evt->type < ARRAY_SIZE(hmi_error_types) ?
+ hmi_error_types[hmi_evt->type]
+ : "Unknown";
+ printk("%s Error detail: %s\n", level, error_info);
+ printk("%s HMER: %016llx\n", level,
+ be64_to_cpu(hmi_evt->hmer));
+ if ((hmi_evt->type == OpalHMI_ERROR_TFAC) ||
+ (hmi_evt->type == OpalHMI_ERROR_TFMR_PARITY))
+ printk("%s TFMR: %016llx\n", level,
be64_to_cpu(hmi_evt->tfmr));
+ }
if (hmi_evt->version < OpalHMIEvt_V2)
return;
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 000b350d4060..348a8cdaecd6 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -13,11 +13,11 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/crash_dump.h>
+#include <linux/debugfs.h>
#include <asm/opal.h>
#include <asm/io.h>
#include <asm/imc-pmu.h>
#include <asm/cputhreads.h>
-#include <asm/debugfs.h>
static struct dentry *imc_debugfs_parent;
@@ -35,11 +35,10 @@ static int imc_mem_set(void *data, u64 val)
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_imc_x64, imc_mem_get, imc_mem_set, "0x%016llx\n");
-static struct dentry *imc_debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value)
+static void imc_debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value)
{
- return debugfs_create_file_unsafe(name, mode, parent,
- value, &fops_imc_x64);
+ debugfs_create_file_unsafe(name, mode, parent, value, &fops_imc_x64);
}
/*
@@ -57,10 +56,7 @@ static void export_imc_mode_and_cmd(struct device_node *node,
u32 cb_offset;
struct imc_mem_info *ptr = pmu_ptr->mem_info;
- imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
-
- if (!imc_debugfs_parent)
- return;
+ imc_debugfs_parent = debugfs_create_dir("imc", arch_debugfs_dir);
if (of_property_read_u32(node, "cb_offset", &cb_offset))
cb_offset = IMC_CNTL_BLK_OFFSET;
@@ -69,21 +65,15 @@ static void export_imc_mode_and_cmd(struct device_node *node,
loc = (u64)(ptr->vbase) + cb_offset;
imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
- if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
- imc_mode_addr))
- goto err;
+ imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
+ imc_mode_addr);
imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
- if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
- imc_cmd_addr))
- goto err;
+ imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
+ imc_cmd_addr);
ptr++;
}
- return;
-
-err:
- debugfs_remove_recursive(imc_debugfs_parent);
}
/*
@@ -196,7 +186,7 @@ static void disable_nest_pmu_counters(void)
int nid, cpu;
const struct cpumask *l_cpumask;
- get_online_cpus();
+ cpus_read_lock();
for_each_node_with_cpus(nid) {
l_cpumask = cpumask_of_node(nid);
cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
@@ -205,25 +195,25 @@ static void disable_nest_pmu_counters(void)
opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
get_hard_smp_processor_id(cpu));
}
- put_online_cpus();
+ cpus_read_unlock();
}
static void disable_core_pmu_counters(void)
{
- cpumask_t cores_map;
int cpu, rc;
- get_online_cpus();
+ cpus_read_lock();
/* Disable the IMC Core functions */
- cores_map = cpu_online_cores_map();
- for_each_cpu(cpu, &cores_map) {
+ for_each_online_cpu(cpu) {
+ if (cpu_first_thread_sibling(cpu) != cpu)
+ continue;
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(cpu));
if (rc)
pr_err("%s: Failed to stop Core (cpu = %d)\n",
- __FUNCTION__, cpu);
+ __func__, cpu);
}
- put_online_cpus();
+ cpus_read_unlock();
}
int get_max_nest_dev(void)
@@ -278,14 +268,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
domain = IMC_DOMAIN_THREAD;
break;
case IMC_TYPE_TRACE:
- /*
- * FIXME. Using trace_imc events to monitor application
- * or KVM thread performance can cause a checkstop
- * (system crash).
- * Disable it for now.
- */
- pr_info_once("IMC: disabling trace_imc PMU\n");
- domain = -1;
+ domain = IMC_DOMAIN_TRACE;
break;
default:
pr_warn("IMC Unknown Device type \n");
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index c164419e254d..d55652b5f6fa 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -46,18 +46,15 @@ void opal_handle_events(void)
e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask;
again:
while (e) {
- int virq, hwirq;
+ int hwirq;
hwirq = fls64(e) - 1;
e &= ~BIT_ULL(hwirq);
local_irq_disable();
- virq = irq_find_mapping(opal_event_irqchip.domain, hwirq);
- if (virq) {
- irq_enter();
- generic_handle_irq(virq);
- irq_exit();
- }
+ irq_enter();
+ generic_handle_domain_irq(opal_event_irqchip.domain, hwirq);
+ irq_exit();
local_irq_enable();
cond_resched();
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index 608569082ba0..d129d6d45a50 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -10,13 +10,13 @@
#include <linux/bug.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/debugfs.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/opal.h>
#include <asm/prom.h>
#include <linux/uaccess.h>
-#include <asm/debugfs.h>
#include <asm/isa-bridge.h>
static int opal_lpc_chip_id = -1;
@@ -197,7 +197,7 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
/*
* Select access size based on count and alignment and
- * access type. IO and MEM only support byte acceses,
+ * access type. IO and MEM only support byte accesses,
* FW supports all 3.
*/
len = 1;
@@ -371,7 +371,7 @@ static int opal_lpc_init_debugfs(void)
if (opal_lpc_chip_id < 0)
return -ENODEV;
- root = debugfs_create_dir("lpc", powerpc_debugfs_root);
+ root = debugfs_create_dir("lpc", arch_debugfs_dir);
rc |= opal_lpc_debugfs_create_type(root, "io", OPAL_LPC_IO);
rc |= opal_lpc_debugfs_create_type(root, "mem", OPAL_LPC_MEM);
@@ -396,6 +396,7 @@ void __init opal_lpc_init(void)
if (!of_get_property(np, "primary", NULL))
continue;
opal_lpc_chip_id = of_get_ibm_chip_id(np);
+ of_node_put(np);
break;
}
if (opal_lpc_chip_id < 0)
diff --git a/arch/powerpc/platforms/powernv/opal-memory-errors.c b/arch/powerpc/platforms/powernv/opal-memory-errors.c
index 1e8e17df9ce8..a1754a28265d 100644
--- a/arch/powerpc/platforms/powernv/opal-memory-errors.c
+++ b/arch/powerpc/platforms/powernv/opal-memory-errors.c
@@ -82,7 +82,7 @@ static DECLARE_WORK(mem_error_work, mem_error_handler);
/*
* opal_memory_err_event - notifier handler that queues up the opal message
- * to be preocessed later.
+ * to be processed later.
*/
static int opal_memory_err_event(struct notifier_block *nb,
unsigned long msg_type, void *msg)
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
index d26da19a611f..22d6efe17b0d 100644
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
@@ -12,6 +12,8 @@
#include <linux/types.h>
#include <asm/barrier.h>
+#include "powernv.h"
+
/* OPAL in-memory console. Defined in OPAL source at core/console.c */
struct memcons {
__be64 magic;
@@ -103,7 +105,7 @@ static struct bin_attribute opal_msglog_attr = {
.read = opal_msglog_read
};
-struct memcons *memcons_init(struct device_node *node, const char *mc_prop_name)
+struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name)
{
u64 mcaddr;
struct memcons *mc;
@@ -131,7 +133,7 @@ out_err:
return NULL;
}
-u32 memcons_get_size(struct memcons *mc)
+u32 __init memcons_get_size(struct memcons *mc)
{
return be32_to_cpu(mc->ibuf_size) + be32_to_cpu(mc->obuf_size);
}
diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c
index 2a3717fc24ea..db99ffcb7b82 100644
--- a/arch/powerpc/platforms/powernv/opal-power.c
+++ b/arch/powerpc/platforms/powernv/opal-power.c
@@ -53,7 +53,7 @@ static bool detect_epow(void)
}
/* Check for existing EPOW, DPO events */
-static bool poweroff_pending(void)
+static bool __init poweroff_pending(void)
{
int rc;
__be64 opal_dpo_timeout;
diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
index c16d44f6f1d1..7bfe4cbeb35a 100644
--- a/arch/powerpc/platforms/powernv/opal-powercap.c
+++ b/arch/powerpc/platforms/powernv/opal-powercap.c
@@ -129,7 +129,7 @@ out_token:
return ret;
}
-static void powercap_add_attr(int handle, const char *name,
+static void __init powercap_add_attr(int handle, const char *name,
struct powercap_attr *attr)
{
attr->handle = handle;
@@ -153,7 +153,7 @@ void __init opal_powercap_init(void)
pcaps = kcalloc(of_get_child_count(powercap), sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
- return;
+ goto out_put_powercap;
powercap_kobj = kobject_create_and_add("powercap", opal_kobj);
if (!powercap_kobj) {
@@ -226,6 +226,7 @@ void __init opal_powercap_init(void)
}
i++;
}
+ of_node_put(powercap);
return;
@@ -236,6 +237,9 @@ out_pcaps_pattrs:
kfree(pcaps[i].pg.name);
}
kobject_put(powercap_kobj);
+ of_node_put(node);
out_pcaps:
kfree(pcaps);
+out_put_powercap:
+ of_node_put(powercap);
}
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index 45f4223a790f..113bdb151f68 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -24,7 +24,7 @@
#include <linux/uaccess.h>
-/**
+/*
* The msg member must be at the end of the struct, as it's followed by the
* message data.
*/
@@ -105,7 +105,6 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
{
size_t addr, size;
pgprot_t page_prot;
- int rc;
pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff,
@@ -121,10 +120,8 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
size, vma->vm_page_prot);
- rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
+ return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
page_prot);
-
- return rc;
}
static bool opal_msg_queue_empty(void)
@@ -372,6 +369,12 @@ static struct notifier_block opal_prd_event_nb = {
.priority = 0,
};
+static struct notifier_block opal_prd_event_nb2 = {
+ .notifier_call = opal_prd_msg_notifier,
+ .next = NULL,
+ .priority = 0,
+};
+
static int opal_prd_probe(struct platform_device *pdev)
{
int rc;
@@ -393,9 +396,10 @@ static int opal_prd_probe(struct platform_device *pdev)
return rc;
}
- rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb);
+ rc = opal_message_notifier_register(OPAL_MSG_PRD2, &opal_prd_event_nb2);
if (rc) {
pr_err("Couldn't register PRD2 event notifier\n");
+ opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
return rc;
}
@@ -404,6 +408,8 @@ static int opal_prd_probe(struct platform_device *pdev)
pr_err("failed to register miscdev\n");
opal_message_notifier_unregister(OPAL_MSG_PRD,
&opal_prd_event_nb);
+ opal_message_notifier_unregister(OPAL_MSG_PRD2,
+ &opal_prd_event_nb2);
return rc;
}
@@ -414,6 +420,7 @@ static int opal_prd_remove(struct platform_device *pdev)
{
misc_deregister(&opal_prd_dev);
opal_message_notifier_unregister(OPAL_MSG_PRD, &opal_prd_event_nb);
+ opal_message_notifier_unregister(OPAL_MSG_PRD2, &opal_prd_event_nb2);
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/opal-psr.c b/arch/powerpc/platforms/powernv/opal-psr.c
index 69d7e75950d1..6441e17b6996 100644
--- a/arch/powerpc/platforms/powernv/opal-psr.c
+++ b/arch/powerpc/platforms/powernv/opal-psr.c
@@ -135,7 +135,7 @@ void __init opal_psr_init(void)
psr_attrs = kcalloc(of_get_child_count(psr), sizeof(*psr_attrs),
GFP_KERNEL);
if (!psr_attrs)
- return;
+ goto out_put_psr;
psr_kobj = kobject_create_and_add("psr", opal_kobj);
if (!psr_kobj) {
@@ -162,10 +162,14 @@ void __init opal_psr_init(void)
}
i++;
}
+ of_node_put(psr);
return;
out_kobj:
+ of_node_put(node);
kobject_put(psr_kobj);
out:
kfree(psr_attrs);
+out_put_psr:
+ of_node_put(psr);
}
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index 44d7dacb33a2..a9bcf9217e64 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -18,7 +18,7 @@
#include <asm/firmware.h>
#include <asm/machdep.h>
-static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
+static void __init opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
{
tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) +
bcd2bin((y_m_d >> 16) & 0xff)) - 1900;
diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
index f8ae1fb0c102..9944376b115c 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor-groups.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
@@ -126,7 +126,7 @@ static void add_attr(int handle, struct sg_attr *attr, int index)
attr->attr.store = ops_info[index].store;
}
-static int add_attr_group(const __be32 *ops, int len, struct sensor_group *sg,
+static int __init add_attr_group(const __be32 *ops, int len, struct sensor_group *sg,
u32 handle)
{
int i, j;
@@ -144,7 +144,7 @@ static int add_attr_group(const __be32 *ops, int len, struct sensor_group *sg,
return sysfs_create_group(sg_kobj, &sg->sg);
}
-static int get_nr_attrs(const __be32 *ops, int len)
+static int __init get_nr_attrs(const __be32 *ops, int len)
{
int i, j;
int nr_attrs = 0;
@@ -170,7 +170,7 @@ void __init opal_sensor_groups_init(void)
sgs = kcalloc(of_get_child_count(sg), sizeof(*sgs), GFP_KERNEL);
if (!sgs)
- return;
+ goto out_sg_put;
sg_kobj = kobject_create_and_add("sensor_groups", opal_kobj);
if (!sg_kobj) {
@@ -222,6 +222,7 @@ void __init opal_sensor_groups_init(void)
}
i++;
}
+ of_node_put(sg);
return;
@@ -231,6 +232,9 @@ out_sgs_sgattrs:
kfree(sgs[i].sg.attrs);
}
kobject_put(sg_kobj);
+ of_node_put(node);
out_sgs:
kfree(sgs);
+out_sg_put:
+ of_node_put(sg);
}
diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c
index f16a43540e30..91b36541b9e5 100644
--- a/arch/powerpc/platforms/powernv/opal-tracepoints.c
+++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c
@@ -2,7 +2,6 @@
#include <linux/percpu.h>
#include <linux/jump_label.h>
#include <asm/trace.h>
-#include <asm/asm-prototypes.h>
#ifdef CONFIG_JUMP_LABEL
struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e5acc33b3b20..0ed95f753416 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -57,7 +57,7 @@ opal_return:
.long 0xa64b7b7d /* mthsrr1 r11 */
.long 0x2402004c /* hrfid */
#endif
- ld r2,PACATOC(r13)
+ LOAD_PACA_TOC()
ld r0,PPC_LR_STKOFF(r1)
mtlr r0
blr
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index fd510d961b8c..6b4eed2ef4fa 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -14,11 +14,11 @@
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/debugfs.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/opal.h>
-#include <asm/debugfs.h>
#include <asm/prom.h>
static u64 opal_scom_unmangle(u64 addr)
@@ -189,7 +189,7 @@ static int scom_debug_init(void)
if (!firmware_has_feature(FW_FEATURE_OPAL))
return 0;
- root = debugfs_create_dir("scom", powerpc_debugfs_root);
+ root = debugfs_create_dir("scom", arch_debugfs_dir);
if (!root)
return -1;
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 2b3dfd0b6cdd..cdf3838f08d3 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -73,7 +73,7 @@ static struct task_struct *kopald_tsk;
static struct opal_msg *opal_msg;
static u32 opal_msg_size __ro_after_init;
-void opal_configure_cores(void)
+void __init opal_configure_cores(void)
{
u64 reinit_flags = 0;
@@ -588,7 +588,7 @@ static int opal_recover_mce(struct pt_regs *regs,
{
int recovered = 0;
- if (!(regs->msr & MSR_RI)) {
+ if (regs_is_unrecoverable(regs)) {
/* If MSR_RI isn't set, we cannot recover */
pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
recovered = 0;
@@ -624,7 +624,7 @@ static int opal_recover_mce(struct pt_regs *regs,
*/
recovered = 0;
} else {
- die("Machine check", regs, SIGBUS);
+ die_mce("Machine check", regs, SIGBUS);
recovered = 1;
}
}
@@ -731,7 +731,7 @@ int opal_hmi_exception_early2(struct pt_regs *regs)
return 1;
}
-/* HMI exception handler called in virtual mode during check_irq_replay. */
+/* HMI exception handler called in virtual mode when irqs are next enabled. */
int opal_handle_hmi_exception(struct pt_regs *regs)
{
/*
@@ -773,13 +773,13 @@ bool opal_mce_check_early_recovery(struct pt_regs *regs)
* Setup regs->nip to rfi into fixup address.
*/
if (recover_addr)
- regs->nip = recover_addr;
+ regs_set_return_ip(regs, recover_addr);
out:
return !!recover_addr;
}
-static int opal_sysfs_init(void)
+static int __init opal_sysfs_init(void)
{
opal_kobj = kobject_create_and_add("opal", firmware_kobj);
if (!opal_kobj) {
@@ -811,6 +811,10 @@ static int opal_add_one_export(struct kobject *parent, const char *export_name,
goto out;
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr) {
+ rc = -ENOMEM;
+ goto out;
+ }
name = kstrdup(export_name, GFP_KERNEL);
if (!name) {
rc = -ENOMEM;
@@ -888,6 +892,7 @@ static void opal_export_attrs(void)
kobj = kobject_create_and_add("exports", opal_kobj);
if (!kobj) {
pr_warn("kobject_create_and_add() of exports failed\n");
+ of_node_put(np);
return;
}
@@ -933,7 +938,7 @@ static void __init opal_dump_region_init(void)
"rc = %d\n", rc);
}
-static void opal_pdev_init(const char *compatible)
+static void __init opal_pdev_init(const char *compatible)
{
struct device_node *np;
@@ -948,6 +953,8 @@ static void __init opal_imc_init_dev(void)
np = of_find_compatible_node(NULL, NULL, IMC_DTB_COMPAT);
if (np)
of_platform_device_create(np, NULL, NULL);
+
+ of_node_put(np);
}
static int kopald(void *unused)
@@ -977,7 +984,7 @@ void opal_wake_poller(void)
wake_up_process(kopald_tsk);
}
-static void opal_init_heartbeat(void)
+static void __init opal_init_heartbeat(void)
{
/* Old firwmware, we assume the HVC heartbeat is sufficient */
if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
diff --git a/arch/powerpc/platforms/powernv/pci-cxl.c b/arch/powerpc/platforms/powernv/pci-cxl.c
index 8c739c94ed28..7e419de71db8 100644
--- a/arch/powerpc/platforms/powernv/pci-cxl.c
+++ b/arch/powerpc/platforms/powernv/pci-cxl.c
@@ -4,6 +4,7 @@
*/
#include <linux/module.h>
+#include <misc/cxl-base.h>
#include <asm/pnv-pci.h>
#include <asm/opal.h>
@@ -150,25 +151,3 @@ int pnv_cxl_ioda_msi_setup(struct pci_dev *dev, unsigned int hwirq,
return 0;
}
EXPORT_SYMBOL(pnv_cxl_ioda_msi_setup);
-
-#if IS_MODULE(CONFIG_CXL)
-static inline int get_cxl_module(void)
-{
- struct module *cxl_module;
-
- mutex_lock(&module_mutex);
-
- cxl_module = find_module("cxl");
- if (cxl_module)
- __module_get(cxl_module);
-
- mutex_unlock(&module_mutex);
-
- if (!cxl_module)
- return -ENODEV;
-
- return 0;
-}
-#else
-static inline int get_cxl_module(void) { return 0; }
-#endif
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index 5dc6847d5f4c..e96324502db0 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -17,6 +17,34 @@
#include <asm/tce.h>
#include "pci.h"
+unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
+{
+ struct pci_controller *hose = phb->hose;
+ struct device_node *dn = hose->dn;
+ unsigned long mask = 0;
+ int i, rc, count;
+ u32 val;
+
+ count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes");
+ if (count <= 0) {
+ mask = SZ_4K | SZ_64K;
+ /* Add 16M for POWER8 by default */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+ !cpu_has_feature(CPU_FTR_ARCH_300))
+ mask |= SZ_16M | SZ_256M;
+ return mask;
+ }
+
+ for (i = 0; i < count; i++) {
+ rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes",
+ i, &val);
+ if (rc == 0)
+ mask |= 1ULL << val;
+ }
+
+ return mask;
+}
+
void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
u64 dma_offset, unsigned int page_shift)
@@ -117,8 +145,7 @@ int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
#ifdef CONFIG_IOMMU_API
int pnv_tce_xchg(struct iommu_table *tbl, long index,
- unsigned long *hpa, enum dma_data_direction *direction,
- bool alloc)
+ unsigned long *hpa, enum dma_data_direction *direction)
{
u64 proto_tce = iommu_direction_to_tce_perm(*direction);
unsigned long newtce = *hpa | proto_tce, oldtce;
@@ -136,9 +163,9 @@ int pnv_tce_xchg(struct iommu_table *tbl, long index,
}
if (!ptce) {
- ptce = pnv_tce(tbl, false, idx, alloc);
+ ptce = pnv_tce(tbl, false, idx, true);
if (!ptce)
- return alloc ? H_HARDWARE : H_TOO_HARD;
+ return -ENOMEM;
}
if (newtce & TCE_PCI_WRITE)
@@ -352,6 +379,8 @@ void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
/* Remove link to a group from table's list of attached groups */
found = false;
+
+ rcu_read_lock();
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
if (tgl->table_group == table_group) {
list_del_rcu(&tgl->next);
@@ -360,6 +389,8 @@ void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
break;
}
}
+ rcu_read_unlock();
+
if (WARN_ON(!found))
return;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 22c22cd7bd82..5c144c05cbfd 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -20,10 +20,12 @@
#include <linux/iommu.h>
#include <linux/rculist.h>
#include <linux/sizes.h>
+#include <linux/debugfs.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/msi_bitmap.h>
@@ -32,10 +34,10 @@
#include <asm/iommu.h>
#include <asm/tce.h>
#include <asm/xics.h>
-#include <asm/debugfs.h>
#include <asm/firmware.h>
#include <asm/pnv-pci.h>
#include <asm/mmzone.h>
+#include <asm/xive.h>
#include <misc/cxl-base.h>
@@ -47,10 +49,10 @@
#define PNV_IODA1_M64_SEGS 8 /* Segments per M64 BAR */
#define PNV_IODA1_DMA32_SEGSIZE 0x10000000
-static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_NVLINK",
- "NPU_OCAPI" };
+static const char * const pnv_phb_names[] = { "IODA1", "IODA2", "NPU_OCAPI" };
static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
+static void pnv_pci_configure_bus(struct pci_bus *bus);
void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...)
@@ -65,7 +67,7 @@ void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
vaf.va = &args;
if (pe->flags & PNV_IODA_PE_DEV)
- strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
+ strscpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
sprintf(pfix, "%04x:%02x ",
pci_domain_nr(pe->pbus), pe->pbus->number);
@@ -114,32 +116,13 @@ static int __init pci_reset_phbs_setup(char *str)
early_param("ppc_pci_reset_phbs", pci_reset_phbs_setup);
-static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
-{
- /*
- * WARNING: We cannot rely on the resource flags. The Linux PCI
- * allocation code sometimes decides to put a 64-bit prefetchable
- * BAR in the 32-bit window, so we have to compare the addresses.
- *
- * For simplicity we only test resource start.
- */
- return (r->start >= phb->ioda.m64_base &&
- r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
-}
-
-static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
-{
- unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
-
- return (resource_flags & flags) == flags;
-}
-
static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
{
s64 rc;
phb->ioda.pe_array[pe_no].phb = phb;
phb->ioda.pe_array[pe_no].pe_number = pe_no;
+ phb->ioda.pe_array[pe_no].dma_setup_done = false;
/*
* Clear the PE frozen state as it might be put into frozen state
@@ -163,35 +146,58 @@ static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
return;
}
+ mutex_lock(&phb->ioda.pe_alloc_mutex);
if (test_and_set_bit(pe_no, phb->ioda.pe_alloc))
pr_debug("%s: PE %x was reserved on PHB#%x\n",
__func__, pe_no, phb->hose->global_number);
+ mutex_unlock(&phb->ioda.pe_alloc_mutex);
pnv_ioda_init_pe(phb, pe_no);
}
-static struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb)
+struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb, int count)
{
- long pe;
+ struct pnv_ioda_pe *ret = NULL;
+ int run = 0, pe, i;
+
+ mutex_lock(&phb->ioda.pe_alloc_mutex);
+ /* scan backwards for a run of @count cleared bits */
for (pe = phb->ioda.total_pe_num - 1; pe >= 0; pe--) {
- if (!test_and_set_bit(pe, phb->ioda.pe_alloc))
- return pnv_ioda_init_pe(phb, pe);
+ if (test_bit(pe, phb->ioda.pe_alloc)) {
+ run = 0;
+ continue;
+ }
+
+ run++;
+ if (run == count)
+ break;
+ }
+ if (run != count)
+ goto out;
+
+ for (i = pe; i < pe + count; i++) {
+ set_bit(i, phb->ioda.pe_alloc);
+ pnv_ioda_init_pe(phb, i);
}
+ ret = &phb->ioda.pe_array[pe];
- return NULL;
+out:
+ mutex_unlock(&phb->ioda.pe_alloc_mutex);
+ return ret;
}
-static void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
+void pnv_ioda_free_pe(struct pnv_ioda_pe *pe)
{
struct pnv_phb *phb = pe->phb;
unsigned int pe_num = pe->pe_number;
WARN_ON(pe->pdev);
- WARN_ON(pe->npucomp); /* NPUs for nvlink are not supposed to be freed */
- kfree(pe->npucomp);
memset(pe, 0, sizeof(struct pnv_ioda_pe));
+
+ mutex_lock(&phb->ioda.pe_alloc_mutex);
clear_bit(pe_num, phb->ioda.pe_alloc);
+ mutex_unlock(&phb->ioda.pe_alloc_mutex);
}
/* The default M64 BAR is shared by all PEs */
@@ -251,8 +257,7 @@ fail:
static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
unsigned long *pe_bitmap)
{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
struct resource *r;
resource_size_t base, sgsz, start, end;
int segno, i;
@@ -264,8 +269,8 @@ static void pnv_ioda_reserve_dev_m64_pe(struct pci_dev *pdev,
if (!r->parent || !pnv_pci_is_m64(phb, r))
continue;
- start = _ALIGN_DOWN(r->start - base, sgsz);
- end = _ALIGN_UP(r->end - base, sgsz);
+ start = ALIGN_DOWN(r->start - base, sgsz);
+ end = ALIGN(r->end - base, sgsz);
for (segno = start / sgsz; segno < end / sgsz; segno++) {
if (pe_bitmap)
set_bit(segno, pe_bitmap);
@@ -310,6 +315,28 @@ static int pnv_ioda1_init_m64(struct pnv_phb *phb)
}
}
+ for (index = 0; index < phb->ioda.total_pe_num; index++) {
+ int64_t rc;
+
+ /*
+ * P7IOC supports M64DT, which helps mapping M64 segment
+ * to one particular PE#. However, PHB3 has fixed mapping
+ * between M64 segment and PE#. In order to have same logic
+ * for P7IOC and PHB3, we enforce fixed mapping between M64
+ * segment and PE# on P7IOC.
+ */
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ index, OPAL_M64_WINDOW_TYPE,
+ index / PNV_IODA1_M64_SEGS,
+ index % PNV_IODA1_M64_SEGS);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
+ __func__, rc, phb->hose->global_number,
+ index);
+ goto fail;
+ }
+ }
+
/*
* Exclude the segments for reserved and root bus PE, which
* are first or last two PEs.
@@ -350,8 +377,7 @@ static void pnv_ioda_reserve_m64_pe(struct pci_bus *bus,
static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
{
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
struct pnv_ioda_pe *master_pe, *pe;
unsigned long size, *pe_alloc;
int i;
@@ -361,7 +387,7 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
return NULL;
/* Allocate bitmap */
- size = _ALIGN_UP(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
+ size = ALIGN(phb->ioda.total_pe_num / 8, sizeof(unsigned long));
pe_alloc = kzalloc(size, GFP_KERNEL);
if (!pe_alloc) {
pr_warn("%s: Out of memory !\n",
@@ -402,26 +428,6 @@ static struct pnv_ioda_pe *pnv_ioda_pick_m64_pe(struct pci_bus *bus, bool all)
pe->master = master_pe;
list_add_tail(&pe->list, &master_pe->slaves);
}
-
- /*
- * P7IOC supports M64DT, which helps mapping M64 segment
- * to one particular PE#. However, PHB3 has fixed mapping
- * between M64 segment and PE#. In order to have same logic
- * for P7IOC and PHB3, we enforce fixed mapping between M64
- * segment and PE# on P7IOC.
- */
- if (phb->type == PNV_PHB_IODA1) {
- int64_t rc;
-
- rc = opal_pci_map_pe_mmio_window(phb->opal_id,
- pe->pe_number, OPAL_M64_WINDOW_TYPE,
- pe->pe_number / PNV_IODA1_M64_SEGS,
- pe->pe_number % PNV_IODA1_M64_SEGS);
- if (rc != OPAL_SUCCESS)
- pr_warn("%s: Error %lld mapping M64 for PHB#%x-PE#%x\n",
- __func__, rc, phb->hose->global_number,
- pe->pe_number);
- }
}
kfree(pe_alloc);
@@ -660,10 +666,19 @@ static int pnv_ioda_get_pe_state(struct pnv_phb *phb, int pe_no)
return state;
}
+struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn)
+{
+ int pe_number = phb->ioda.pe_rmap[bdfn];
+
+ if (pe_number == IODA_INVALID_PE)
+ return NULL;
+
+ return &phb->ioda.pe_array[pe_number];
+}
+
struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
struct pci_dn *pdn = pci_get_pdn(dev);
if (!pdn)
@@ -805,7 +820,7 @@ static void pnv_ioda_unset_peltv(struct pnv_phb *phb,
pe_warn(pe, "OPAL error %lld remove self from PELTV\n", rc);
}
-static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
+int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
{
struct pci_dev *parent;
uint8_t bcomp, dcomp, fcomp;
@@ -859,7 +874,7 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
* Release from all parents PELT-V. NPUs don't have a PELTV
* table
*/
- if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI)
+ if (phb->type != PNV_PHB_NPU_OCAPI)
pnv_ioda_unset_peltv(phb, pe, parent);
rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
@@ -876,9 +891,8 @@ static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
return 0;
}
-static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
+int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
{
- struct pci_dev *parent;
uint8_t bcomp, dcomp, fcomp;
long rc, rid_end, rid;
@@ -888,7 +902,6 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
- parent = pe->pbus->self;
if (pe->flags & PNV_IODA_PE_BUS_ALL)
count = resource_size(&pe->pbus->busn_res);
else
@@ -909,12 +922,6 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
}
rid_end = pe->rid + (count << 8);
} else {
-#ifdef CONFIG_PCI_IOV
- if (pe->flags & PNV_IODA_PE_VF)
- parent = pe->parent_dev;
- else
-#endif /* CONFIG_PCI_IOV */
- parent = pe->pdev->bus->self;
bcomp = OpalPciBusAll;
dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
@@ -938,7 +945,7 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
* Configure PELTV. NPUs don't have a PELTV table so skip
* configuration on them.
*/
- if (phb->type != PNV_PHB_NPU_NVLINK && phb->type != PNV_PHB_NPU_OCAPI)
+ if (phb->type != PNV_PHB_NPU_OCAPI)
pnv_ioda_set_peltv(phb, pe, true);
/* Setup reverse map */
@@ -971,95 +978,9 @@ out:
return 0;
}
-#ifdef CONFIG_PCI_IOV
-static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
-{
- struct pci_dn *pdn = pci_get_pdn(dev);
- int i;
- struct resource *res, res2;
- resource_size_t size;
- u16 num_vfs;
-
- if (!dev->is_physfn)
- return -EINVAL;
-
- /*
- * "offset" is in VFs. The M64 windows are sized so that when they
- * are segmented, each segment is the same size as the IOV BAR.
- * Each segment is in a separate PE, and the high order bits of the
- * address are the PE number. Therefore, each VF's BAR is in a
- * separate PE, and changing the IOV BAR start address changes the
- * range of PEs the VFs are in.
- */
- num_vfs = pdn->num_vfs;
- for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
- if (!res->flags || !res->parent)
- continue;
-
- /*
- * The actual IOV BAR range is determined by the start address
- * and the actual size for num_vfs VFs BAR. This check is to
- * make sure that after shifting, the range will not overlap
- * with another device.
- */
- size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
- res2.flags = res->flags;
- res2.start = res->start + (size * offset);
- res2.end = res2.start + (size * num_vfs) - 1;
-
- if (res2.end > res->end) {
- dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
- i, &res2, res, num_vfs, offset);
- return -EBUSY;
- }
- }
-
- /*
- * Since M64 BAR shares segments among all possible 256 PEs,
- * we have to shift the beginning of PF IOV BAR to make it start from
- * the segment which belongs to the PE number assigned to the first VF.
- * This creates a "hole" in the /proc/iomem which could be used for
- * allocating other resources so we reserve this area below and
- * release when IOV is released.
- */
- for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &dev->resource[i + PCI_IOV_RESOURCES];
- if (!res->flags || !res->parent)
- continue;
-
- size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
- res2 = *res;
- res->start += size * offset;
-
- dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
- i, &res2, res, (offset > 0) ? "En" : "Dis",
- num_vfs, offset);
-
- if (offset < 0) {
- devm_release_resource(&dev->dev, &pdn->holes[i]);
- memset(&pdn->holes[i], 0, sizeof(pdn->holes[i]));
- }
-
- pci_update_resource(dev, i + PCI_IOV_RESOURCES);
-
- if (offset > 0) {
- pdn->holes[i].start = res2.start;
- pdn->holes[i].end = res2.start + size * offset - 1;
- pdn->holes[i].flags = IORESOURCE_BUS;
- pdn->holes[i].name = "pnv_iov_reserved";
- devm_request_resource(&dev->dev, res->parent,
- &pdn->holes[i]);
- }
- }
- return 0;
-}
-#endif /* CONFIG_PCI_IOV */
-
static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
struct pci_dn *pdn = pci_get_pdn(dev);
struct pnv_ioda_pe *pe;
@@ -1071,7 +992,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
if (pdn->pe_number != IODA_INVALID_PE)
return NULL;
- pe = pnv_ioda_alloc_pe(phb);
+ pe = pnv_ioda_alloc_pe(phb, 1);
if (!pe) {
pr_warn("%s: Not enough PE# available, disabling device\n",
pci_name(dev));
@@ -1080,8 +1001,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
/* NOTE: We don't get a reference for the pointer in the PE
* data structure, both the device and PE structures should be
- * destroyed at the same time. However, removing nvlink
- * devices will need some work.
+ * destroyed at the same time.
*
* At some point we want to remove the PDN completely anyways
*/
@@ -1110,34 +1030,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
return pe;
}
-static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- struct pci_dn *pdn = pci_get_pdn(dev);
-
- if (pdn == NULL) {
- pr_warn("%s: No device node associated with device !\n",
- pci_name(dev));
- continue;
- }
-
- /*
- * In partial hotplug case, the PCI device might be still
- * associated with the PE and needn't attach it to the PE
- * again.
- */
- if (pdn->pe_number != IODA_INVALID_PE)
- continue;
-
- pe->device_count++;
- pdn->pe_number = pe->pe_number;
- if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
- pnv_ioda_setup_same_PE(dev->subordinate, pe);
- }
-}
-
/*
* There're 2 types of PCI bus sensitive PEs: One that is compromised of
* single PCI bus. Another one that contains the primary PCI bus and its
@@ -1146,8 +1038,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
*/
static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
{
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
struct pnv_ioda_pe *pe = NULL;
unsigned int pe_num;
@@ -1156,15 +1047,13 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
* We should reuse it instead of allocating a new one.
*/
pe_num = phb->ioda.pe_rmap[bus->number << 8];
- if (pe_num != IODA_INVALID_PE) {
+ if (WARN_ON(pe_num != IODA_INVALID_PE)) {
pe = &phb->ioda.pe_array[pe_num];
- pnv_ioda_setup_same_PE(bus, pe);
return NULL;
}
/* PE number for root bus should have been reserved */
- if (pci_is_root_bus(bus) &&
- phb->ioda.root_pe_idx != IODA_INVALID_PE)
+ if (pci_is_root_bus(bus))
pe = &phb->ioda.pe_array[phb->ioda.root_pe_idx];
/* Check if PE is determined by M64 */
@@ -1173,7 +1062,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
/* The PE number isn't pinned by M64 */
if (!pe)
- pe = pnv_ioda_alloc_pe(phb);
+ pe = pnv_ioda_alloc_pe(phb, 1);
if (!pe) {
pr_warn("%s: Not enough PE# available for PCI bus %04x:%02x\n",
@@ -1202,589 +1091,72 @@ static struct pnv_ioda_pe *pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
return NULL;
}
- /* Associate it with all child devices */
- pnv_ioda_setup_same_PE(bus, pe);
-
/* Put PE to the list */
list_add_tail(&pe->list, &phb->ioda.pe_list);
return pe;
}
-static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
-{
- int pe_num, found_pe = false, rc;
- long rid;
- struct pnv_ioda_pe *pe;
- struct pci_dev *gpu_pdev;
- struct pci_dn *npu_pdn;
- struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
- struct pnv_phb *phb = hose->private_data;
-
- /*
- * Intentionally leak a reference on the npu device (for
- * nvlink only; this is not an opencapi path) to make sure it
- * never goes away, as it's been the case all along and some
- * work is needed otherwise.
- */
- pci_dev_get(npu_pdev);
-
- /*
- * Due to a hardware errata PE#0 on the NPU is reserved for
- * error handling. This means we only have three PEs remaining
- * which need to be assigned to four links, implying some
- * links must share PEs.
- *
- * To achieve this we assign PEs such that NPUs linking the
- * same GPU get assigned the same PE.
- */
- gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
- for (pe_num = 0; pe_num < phb->ioda.total_pe_num; pe_num++) {
- pe = &phb->ioda.pe_array[pe_num];
- if (!pe->pdev)
- continue;
-
- if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
- /*
- * This device has the same peer GPU so should
- * be assigned the same PE as the existing
- * peer NPU.
- */
- dev_info(&npu_pdev->dev,
- "Associating to existing PE %x\n", pe_num);
- npu_pdn = pci_get_pdn(npu_pdev);
- rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
- npu_pdn->pe_number = pe_num;
- phb->ioda.pe_rmap[rid] = pe->pe_number;
- pe->device_count++;
-
- /* Map the PE to this link */
- rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
- OpalPciBusAll,
- OPAL_COMPARE_RID_DEVICE_NUMBER,
- OPAL_COMPARE_RID_FUNCTION_NUMBER,
- OPAL_MAP_PE);
- WARN_ON(rc != OPAL_SUCCESS);
- found_pe = true;
- break;
- }
- }
-
- if (!found_pe)
- /*
- * Could not find an existing PE so allocate a new
- * one.
- */
- return pnv_ioda_setup_dev_PE(npu_pdev);
- else
- return pe;
-}
-
-static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
-{
- struct pci_dev *pdev;
-
- list_for_each_entry(pdev, &bus->devices, bus_list)
- pnv_ioda_setup_npu_PE(pdev);
-}
-
-static void pnv_pci_ioda_setup_PEs(void)
-{
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
-
- list_for_each_entry(hose, &hose_list, list_node) {
- phb = hose->private_data;
- if (phb->type == PNV_PHB_NPU_NVLINK) {
- /* PE#0 is needed for error reporting */
- pnv_ioda_reserve_pe(phb, 0);
- pnv_ioda_setup_npu_PEs(hose->bus);
- if (phb->model == PNV_PHB_MODEL_NPU2)
- WARN_ON_ONCE(pnv_npu2_init(hose));
- }
- }
- list_for_each_entry(hose, &hose_list, list_node) {
- phb = hose->private_data;
- if (phb->type != PNV_PHB_IODA2)
- continue;
-
- list_for_each_entry(pe, &phb->ioda.pe_list, list)
- pnv_npu2_map_lpar(pe, MSR_DR | MSR_PR | MSR_HV);
- }
-}
-
-#ifdef CONFIG_PCI_IOV
-static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)
-{
- struct pci_bus *bus;
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pci_dn *pdn;
- int i, j;
- int m64_bars;
-
- bus = pdev->bus;
- hose = pci_bus_to_host(bus);
- phb = hose->private_data;
- pdn = pci_get_pdn(pdev);
-
- if (pdn->m64_single_mode)
- m64_bars = num_vfs;
- else
- m64_bars = 1;
-
- for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
- for (j = 0; j < m64_bars; j++) {
- if (pdn->m64_map[j][i] == IODA_INVALID_M64)
- continue;
- opal_pci_phb_mmio_enable(phb->opal_id,
- OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 0);
- clear_bit(pdn->m64_map[j][i], &phb->ioda.m64_bar_alloc);
- pdn->m64_map[j][i] = IODA_INVALID_M64;
- }
-
- kfree(pdn->m64_map);
- return 0;
-}
-
-static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
-{
- struct pci_bus *bus;
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pci_dn *pdn;
- unsigned int win;
- struct resource *res;
- int i, j;
- int64_t rc;
- int total_vfs;
- resource_size_t size, start;
- int pe_num;
- int m64_bars;
-
- bus = pdev->bus;
- hose = pci_bus_to_host(bus);
- phb = hose->private_data;
- pdn = pci_get_pdn(pdev);
- total_vfs = pci_sriov_get_totalvfs(pdev);
-
- if (pdn->m64_single_mode)
- m64_bars = num_vfs;
- else
- m64_bars = 1;
-
- pdn->m64_map = kmalloc_array(m64_bars,
- sizeof(*pdn->m64_map),
- GFP_KERNEL);
- if (!pdn->m64_map)
- return -ENOMEM;
- /* Initialize the m64_map to IODA_INVALID_M64 */
- for (i = 0; i < m64_bars ; i++)
- for (j = 0; j < PCI_SRIOV_NUM_BARS; j++)
- pdn->m64_map[i][j] = IODA_INVALID_M64;
-
-
- for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &pdev->resource[i + PCI_IOV_RESOURCES];
- if (!res->flags || !res->parent)
- continue;
-
- for (j = 0; j < m64_bars; j++) {
- do {
- win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
- phb->ioda.m64_bar_idx + 1, 0);
-
- if (win >= phb->ioda.m64_bar_idx + 1)
- goto m64_failed;
- } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
-
- pdn->m64_map[j][i] = win;
-
- if (pdn->m64_single_mode) {
- size = pci_iov_resource_size(pdev,
- PCI_IOV_RESOURCES + i);
- start = res->start + size * j;
- } else {
- size = resource_size(res);
- start = res->start;
- }
-
- /* Map the M64 here */
- if (pdn->m64_single_mode) {
- pe_num = pdn->pe_num_map[j];
- rc = opal_pci_map_pe_mmio_window(phb->opal_id,
- pe_num, OPAL_M64_WINDOW_TYPE,
- pdn->m64_map[j][i], 0);
- }
-
- rc = opal_pci_set_phb_mem_window(phb->opal_id,
- OPAL_M64_WINDOW_TYPE,
- pdn->m64_map[j][i],
- start,
- 0, /* unused */
- size);
-
-
- if (rc != OPAL_SUCCESS) {
- dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
- win, rc);
- goto m64_failed;
- }
-
- if (pdn->m64_single_mode)
- rc = opal_pci_phb_mmio_enable(phb->opal_id,
- OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 2);
- else
- rc = opal_pci_phb_mmio_enable(phb->opal_id,
- OPAL_M64_WINDOW_TYPE, pdn->m64_map[j][i], 1);
-
- if (rc != OPAL_SUCCESS) {
- dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
- win, rc);
- goto m64_failed;
- }
- }
- }
- return 0;
-
-m64_failed:
- pnv_pci_vf_release_m64(pdev, num_vfs);
- return -EBUSY;
-}
-
-static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
- int num);
-
-static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
-{
- struct iommu_table *tbl;
- int64_t rc;
-
- tbl = pe->table_group.tables[0];
- rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
- if (rc)
- pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
-
- pnv_pci_ioda2_set_bypass(pe, false);
- if (pe->table_group.group) {
- iommu_group_put(pe->table_group.group);
- BUG_ON(pe->table_group.group);
- }
- iommu_tce_table_put(tbl);
-}
-
-static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
-{
- struct pci_bus *bus;
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe, *pe_n;
- struct pci_dn *pdn;
-
- bus = pdev->bus;
- hose = pci_bus_to_host(bus);
- phb = hose->private_data;
- pdn = pci_get_pdn(pdev);
-
- if (!pdev->is_physfn)
- return;
-
- list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
- if (pe->parent_dev != pdev)
- continue;
-
- pnv_pci_ioda2_release_dma_pe(pdev, pe);
-
- /* Remove from list */
- mutex_lock(&phb->ioda.pe_list_mutex);
- list_del(&pe->list);
- mutex_unlock(&phb->ioda.pe_list_mutex);
-
- pnv_ioda_deconfigure_pe(phb, pe);
-
- pnv_ioda_free_pe(pe);
- }
-}
-
-void pnv_pci_sriov_disable(struct pci_dev *pdev)
-{
- struct pci_bus *bus;
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
- struct pci_dn *pdn;
- u16 num_vfs, i;
-
- bus = pdev->bus;
- hose = pci_bus_to_host(bus);
- phb = hose->private_data;
- pdn = pci_get_pdn(pdev);
- num_vfs = pdn->num_vfs;
-
- /* Release VF PEs */
- pnv_ioda_release_vf_PE(pdev);
-
- if (phb->type == PNV_PHB_IODA2) {
- if (!pdn->m64_single_mode)
- pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);
-
- /* Release M64 windows */
- pnv_pci_vf_release_m64(pdev, num_vfs);
-
- /* Release PE numbers */
- if (pdn->m64_single_mode) {
- for (i = 0; i < num_vfs; i++) {
- if (pdn->pe_num_map[i] == IODA_INVALID_PE)
- continue;
-
- pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
- pnv_ioda_free_pe(pe);
- }
- } else
- bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
- /* Releasing pe_num_map */
- kfree(pdn->pe_num_map);
- }
-}
-
-static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
+static void pnv_pci_ioda1_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe);
-#ifdef CONFIG_IOMMU_API
-static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe,
- struct iommu_table_group *table_group, struct pci_bus *bus);
-
-#endif
-static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
-{
- struct pci_bus *bus;
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
- int pe_num;
- u16 vf_index;
- struct pci_dn *pdn;
-
- bus = pdev->bus;
- hose = pci_bus_to_host(bus);
- phb = hose->private_data;
- pdn = pci_get_pdn(pdev);
-
- if (!pdev->is_physfn)
- return;
-
- /* Reserve PE for each VF */
- for (vf_index = 0; vf_index < num_vfs; vf_index++) {
- int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
- int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
- struct pci_dn *vf_pdn;
-
- if (pdn->m64_single_mode)
- pe_num = pdn->pe_num_map[vf_index];
- else
- pe_num = *pdn->pe_num_map + vf_index;
-
- pe = &phb->ioda.pe_array[pe_num];
- pe->pe_number = pe_num;
- pe->phb = phb;
- pe->flags = PNV_IODA_PE_VF;
- pe->pbus = NULL;
- pe->parent_dev = pdev;
- pe->mve_number = -1;
- pe->rid = (vf_bus << 8) | vf_devfn;
- pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
- hose->global_number, pdev->bus->number,
- PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
-
- if (pnv_ioda_configure_pe(phb, pe)) {
- /* XXX What do we do here ? */
- pnv_ioda_free_pe(pe);
- pe->pdev = NULL;
- continue;
- }
-
- /* Put PE to the list */
- mutex_lock(&phb->ioda.pe_list_mutex);
- list_add_tail(&pe->list, &phb->ioda.pe_list);
- mutex_unlock(&phb->ioda.pe_list_mutex);
-
- /* associate this pe to it's pdn */
- list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
- if (vf_pdn->busno == vf_bus &&
- vf_pdn->devfn == vf_devfn) {
- vf_pdn->pe_number = pe_num;
- break;
- }
- }
-
- pnv_pci_ioda2_setup_dma_pe(phb, pe);
-#ifdef CONFIG_IOMMU_API
- iommu_register_group(&pe->table_group,
- pe->phb->hose->global_number, pe->pe_number);
- pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL);
-#endif
- }
-}
-
-int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev)
{
- struct pci_bus *bus;
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
- struct pci_dn *pdn;
- int ret;
- u16 i;
-
- bus = pdev->bus;
- hose = pci_bus_to_host(bus);
- phb = hose->private_data;
- pdn = pci_get_pdn(pdev);
-
- if (phb->type == PNV_PHB_IODA2) {
- if (!pdn->vfs_expanded) {
- dev_info(&pdev->dev, "don't support this SRIOV device"
- " with non 64bit-prefetchable IOV BAR\n");
- return -ENOSPC;
- }
-
- /*
- * When M64 BARs functions in Single PE mode, the number of VFs
- * could be enabled must be less than the number of M64 BARs.
- */
- if (pdn->m64_single_mode && num_vfs > phb->ioda.m64_bar_idx) {
- dev_info(&pdev->dev, "Not enough M64 BAR for VFs\n");
- return -EBUSY;
- }
-
- /* Allocating pe_num_map */
- if (pdn->m64_single_mode)
- pdn->pe_num_map = kmalloc_array(num_vfs,
- sizeof(*pdn->pe_num_map),
- GFP_KERNEL);
- else
- pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);
-
- if (!pdn->pe_num_map)
- return -ENOMEM;
-
- if (pdn->m64_single_mode)
- for (i = 0; i < num_vfs; i++)
- pdn->pe_num_map[i] = IODA_INVALID_PE;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ struct pnv_ioda_pe *pe;
- /* Calculate available PE for required VFs */
- if (pdn->m64_single_mode) {
- for (i = 0; i < num_vfs; i++) {
- pe = pnv_ioda_alloc_pe(phb);
- if (!pe) {
- ret = -EBUSY;
- goto m64_failed;
- }
+ /* Check if the BDFN for this device is associated with a PE yet */
+ pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8));
+ if (!pe) {
+ /* VF PEs should be pre-configured in pnv_pci_sriov_enable() */
+ if (WARN_ON(pdev->is_virtfn))
+ return;
- pdn->pe_num_map[i] = pe->pe_number;
- }
- } else {
- mutex_lock(&phb->ioda.pe_alloc_mutex);
- *pdn->pe_num_map = bitmap_find_next_zero_area(
- phb->ioda.pe_alloc, phb->ioda.total_pe_num,
- 0, num_vfs, 0);
- if (*pdn->pe_num_map >= phb->ioda.total_pe_num) {
- mutex_unlock(&phb->ioda.pe_alloc_mutex);
- dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
- kfree(pdn->pe_num_map);
- return -EBUSY;
- }
- bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
- mutex_unlock(&phb->ioda.pe_alloc_mutex);
- }
- pdn->num_vfs = num_vfs;
+ pnv_pci_configure_bus(pdev->bus);
+ pe = pnv_pci_bdfn_to_pe(phb, pdev->devfn | (pdev->bus->number << 8));
+ pci_info(pdev, "Configured PE#%x\n", pe ? pe->pe_number : 0xfffff);
- /* Assign M64 window accordingly */
- ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
- if (ret) {
- dev_info(&pdev->dev, "Not enough M64 window resources\n");
- goto m64_failed;
- }
/*
- * When using one M64 BAR to map one IOV BAR, we need to shift
- * the IOV BAR according to the PE# allocated to the VFs.
- * Otherwise, the PE# for the VF will conflict with others.
+ * If we can't setup the IODA PE something has gone horribly
+ * wrong and we can't enable DMA for the device.
*/
- if (!pdn->m64_single_mode) {
- ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);
- if (ret)
- goto m64_failed;
- }
+ if (WARN_ON(!pe))
+ return;
+ } else {
+ pci_info(pdev, "Added to existing PE#%x\n", pe->pe_number);
}
- /* Setup VF PEs */
- pnv_ioda_setup_vf_PE(pdev, num_vfs);
-
- return 0;
-
-m64_failed:
- if (pdn->m64_single_mode) {
- for (i = 0; i < num_vfs; i++) {
- if (pdn->pe_num_map[i] == IODA_INVALID_PE)
- continue;
-
- pe = &phb->ioda.pe_array[pdn->pe_num_map[i]];
- pnv_ioda_free_pe(pe);
- }
- } else
- bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
-
- /* Releasing pe_num_map */
- kfree(pdn->pe_num_map);
-
- return ret;
-}
-
-int pnv_pcibios_sriov_disable(struct pci_dev *pdev)
-{
- pnv_pci_sriov_disable(pdev);
-
- /* Release PCI data */
- remove_sriov_vf_pdns(pdev);
- return 0;
-}
-
-int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
-{
- /* Allocate PCI data */
- add_sriov_vf_pdns(pdev);
-
- return pnv_pci_sriov_enable(pdev, num_vfs);
-}
-#endif /* CONFIG_PCI_IOV */
-
-static void pnv_pci_ioda_dma_dev_setup(struct pci_dev *pdev)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct pci_dn *pdn = pci_get_pdn(pdev);
- struct pnv_ioda_pe *pe;
-
/*
- * The function can be called while the PE#
- * hasn't been assigned. Do nothing for the
- * case.
+ * We assume that bridges *probably* don't need to do any DMA so we can
+ * skip allocating a TCE table, etc unless we get a non-bridge device.
*/
- if (!pdn || pdn->pe_number == IODA_INVALID_PE)
- return;
+ if (!pe->dma_setup_done && !pci_is_bridge(pdev)) {
+ switch (phb->type) {
+ case PNV_PHB_IODA1:
+ pnv_pci_ioda1_setup_dma_pe(phb, pe);
+ break;
+ case PNV_PHB_IODA2:
+ pnv_pci_ioda2_setup_dma_pe(phb, pe);
+ break;
+ default:
+ pr_warn("%s: No DMA for PHB#%x (type %d)\n",
+ __func__, phb->hose->global_number, phb->type);
+ }
+ }
+
+ if (pdn)
+ pdn->pe_number = pe->pe_number;
+ pe->device_count++;
- pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
pdev->dev.archdata.dma_offset = pe->tce_bypass_base;
set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]);
- /*
- * Note: iommu_add_device() will fail here as
- * for physical PE: the device is already added by now;
- * for virtual PE: sysfs entries are not ready yet and
- * tce_iommu_bus_notifier will add the device to a group later.
- */
+
+ /* PEs with a DMA weight of zero won't have a group */
+ if (pe->table_group.group)
+ iommu_add_device(&pe->table_group, &pdev->dev);
}
/*
@@ -1859,8 +1231,7 @@ err:
static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
u64 dma_mask)
{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *pe;
@@ -1897,35 +1268,20 @@ static bool pnv_pci_ioda_iommu_bypass_supported(struct pci_dev *pdev,
return false;
}
-static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
+static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb)
{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
- dev->dev.archdata.dma_offset = pe->tce_bypass_base;
-
- if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
- pnv_ioda_setup_bus_dma(pe, dev->subordinate);
- }
-}
-
-static inline __be64 __iomem *pnv_ioda_get_inval_reg(struct pnv_phb *phb,
- bool real_mode)
-{
- return real_mode ? (__be64 __iomem *)(phb->regs_phys + 0x210) :
- (phb->regs + 0x210);
+ return phb->regs + 0x210;
}
static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
- unsigned long index, unsigned long npages, bool rm)
+ unsigned long index, unsigned long npages)
{
struct iommu_table_group_link *tgl = list_first_entry_or_null(
&tbl->it_group_list, struct iommu_table_group_link,
next);
struct pnv_ioda_pe *pe = container_of(tgl->table_group,
struct pnv_ioda_pe, table_group);
- __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb);
unsigned long start, end, inc;
start = __pa(((__be64 *)tbl->it_base) + index - tbl->it_offset);
@@ -1940,11 +1296,7 @@ static void pnv_pci_p7ioc_tce_invalidate(struct iommu_table *tbl,
mb(); /* Ensure above stores are visible */
while (start <= end) {
- if (rm)
- __raw_rm_writeq_be(start, invalidate);
- else
- __raw_writeq_be(start, invalidate);
-
+ __raw_writeq_be(start, invalidate);
start += inc;
}
@@ -1963,7 +1315,7 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
attrs);
if (!ret)
- pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, npages);
return ret;
}
@@ -1971,10 +1323,9 @@ static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
#ifdef CONFIG_IOMMU_API
/* Common for IODA1 and IODA2 */
static int pnv_ioda_tce_xchg_no_kill(struct iommu_table *tbl, long index,
- unsigned long *hpa, enum dma_data_direction *direction,
- bool realmode)
+ unsigned long *hpa, enum dma_data_direction *direction)
{
- return pnv_tce_xchg(tbl, index, hpa, direction, !realmode);
+ return pnv_tce_xchg(tbl, index, hpa, direction);
}
#endif
@@ -1983,7 +1334,7 @@ static void pnv_ioda1_tce_free(struct iommu_table *tbl, long index,
{
pnv_tce_free(tbl, index, npages);
- pnv_pci_p7ioc_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_p7ioc_tce_invalidate(tbl, index, npages);
}
static struct iommu_table_ops pnv_ioda1_iommu_ops = {
@@ -2001,33 +1352,21 @@ static struct iommu_table_ops pnv_ioda1_iommu_ops = {
#define PHB3_TCE_KILL_INVAL_PE PPC_BIT(1)
#define PHB3_TCE_KILL_INVAL_ONE PPC_BIT(2)
-static void pnv_pci_phb3_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
-{
- __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(phb, rm);
- const unsigned long val = PHB3_TCE_KILL_INVAL_ALL;
-
- mb(); /* Ensure previous TCE table stores are visible */
- if (rm)
- __raw_rm_writeq_be(val, invalidate);
- else
- __raw_writeq_be(val, invalidate);
-}
-
static inline void pnv_pci_phb3_tce_invalidate_pe(struct pnv_ioda_pe *pe)
{
/* 01xb - invalidate TCEs that match the specified PE# */
- __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, false);
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb);
unsigned long val = PHB3_TCE_KILL_INVAL_PE | (pe->pe_number & 0xFF);
mb(); /* Ensure above stores are visible */
__raw_writeq_be(val, invalidate);
}
-static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
+static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe,
unsigned shift, unsigned long index,
unsigned long npages)
{
- __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb, rm);
+ __be64 __iomem *invalidate = pnv_ioda_get_inval_reg(pe->phb);
unsigned long start, end, inc;
/* We'll invalidate DMA address in PE scope */
@@ -2042,10 +1381,7 @@ static void pnv_pci_phb3_tce_invalidate(struct pnv_ioda_pe *pe, bool rm,
mb();
while (start <= end) {
- if (rm)
- __raw_rm_writeq_be(start, invalidate);
- else
- __raw_writeq_be(start, invalidate);
+ __raw_writeq_be(start, invalidate);
start += inc;
}
}
@@ -2062,7 +1398,7 @@ static inline void pnv_pci_ioda2_tce_invalidate_pe(struct pnv_ioda_pe *pe)
}
static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
- unsigned long index, unsigned long npages, bool rm)
+ unsigned long index, unsigned long npages)
{
struct iommu_table_group_link *tgl;
@@ -2072,22 +1408,8 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
struct pnv_phb *phb = pe->phb;
unsigned int shift = tbl->it_page_shift;
- /*
- * NVLink1 can use the TCE kill register directly as
- * it's the same as PHB3. NVLink2 is different and
- * should go via the OPAL call.
- */
- if (phb->model == PNV_PHB_MODEL_NPU) {
- /*
- * The NVLink hardware does not support TCE kill
- * per TCE entry so we have to invalidate
- * the entire cache for it.
- */
- pnv_pci_phb3_tce_invalidate_entire(phb, rm);
- continue;
- }
if (phb->model == PNV_PHB_MODEL_PHB3 && phb->regs)
- pnv_pci_phb3_tce_invalidate(pe, rm, shift,
+ pnv_pci_phb3_tce_invalidate(pe, shift,
index, npages);
else
opal_pci_tce_kill(phb->opal_id,
@@ -2097,14 +1419,6 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
}
}
-void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm)
-{
- if (phb->model == PNV_PHB_MODEL_NPU || phb->model == PNV_PHB_MODEL_PHB3)
- pnv_pci_phb3_tce_invalidate_entire(phb, rm);
- else
- opal_pci_tce_kill(phb->opal_id, OPAL_PCI_TCE_KILL, 0, 0, 0, 0);
-}
-
static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
@@ -2114,7 +1428,7 @@ static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
attrs);
if (!ret)
- pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_ioda2_tce_invalidate(tbl, index, npages);
return ret;
}
@@ -2124,7 +1438,7 @@ static void pnv_ioda2_tce_free(struct iommu_table *tbl, long index,
{
pnv_tce_free(tbl, index, npages);
- pnv_pci_ioda2_tce_invalidate(tbl, index, npages, false);
+ pnv_pci_ioda2_tce_invalidate(tbl, index, npages);
}
static struct iommu_table_ops pnv_ioda2_iommu_ops = {
@@ -2295,11 +1609,11 @@ found:
tbl->it_ops = &pnv_ioda1_iommu_ops;
pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift;
pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift;
- iommu_init_table(tbl, phb->hose->node, 0, 0);
-
- if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ tbl->it_index = (phb->hose->global_number << 16) | pe->pe_number;
+ if (!iommu_init_table(tbl, phb->hose->node, 0, 0))
+ panic("Failed to initialize iommu table");
+ pe->dma_setup_done = true;
return;
fail:
/* XXX Failure: Try to fallback to 64-bit only ? */
@@ -2465,16 +1779,17 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
res_start = pe->phb->ioda.m32_pci_base >> tbl->it_page_shift;
res_end = min(window_size, SZ_4G) >> tbl->it_page_shift;
}
- iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end);
- rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
+ tbl->it_index = (pe->phb->hose->global_number << 16) | pe->pe_number;
+ if (iommu_init_table(tbl, pe->phb->hose->node, res_start, res_end))
+ rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl);
+ else
+ rc = -ENOMEM;
if (rc) {
- pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n",
- rc);
+ pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", rc);
iommu_tce_table_put(tbl);
- return rc;
+ tbl = NULL; /* This clears iommu_table_base below */
}
-
if (!pnv_iommu_bypass_disabled)
pnv_pci_ioda2_set_bypass(pe, true);
@@ -2489,7 +1804,6 @@ static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe)
return 0;
}
-#if defined(CONFIG_IOMMU_API) || defined(CONFIG_PCI_IOV)
static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
int num)
{
@@ -2513,7 +1827,6 @@ static long pnv_pci_ioda2_unset_window(struct iommu_table_group *table_group,
return ret;
}
-#endif
#ifdef CONFIG_IOMMU_API
unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
@@ -2537,7 +1850,7 @@ unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
direct_table_size = 1UL << table_shift;
for ( ; levels; --levels) {
- bytes += _ALIGN_UP(tce_table_size, direct_table_size);
+ bytes += ALIGN(tce_table_size, direct_table_size);
tce_table_size /= direct_table_size;
tce_table_size <<= 3;
@@ -2562,6 +1875,19 @@ static long pnv_pci_ioda2_create_table_userspace(
return ret;
}
+static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
+ dev->dev.archdata.dma_offset = pe->tce_bypass_base;
+
+ if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
+ pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+ }
+}
+
static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
{
struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe,
@@ -2596,145 +1922,13 @@ static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
.take_ownership = pnv_ioda2_take_ownership,
.release_ownership = pnv_ioda2_release_ownership,
};
-
-static void pnv_ioda_setup_bus_iommu_group_add_devices(struct pnv_ioda_pe *pe,
- struct iommu_table_group *table_group,
- struct pci_bus *bus)
-{
- struct pci_dev *dev;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- iommu_add_device(table_group, &dev->dev);
-
- if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
- pnv_ioda_setup_bus_iommu_group_add_devices(pe,
- table_group, dev->subordinate);
- }
-}
-
-static void pnv_ioda_setup_bus_iommu_group(struct pnv_ioda_pe *pe,
- struct iommu_table_group *table_group, struct pci_bus *bus)
-{
-
- if (pe->flags & PNV_IODA_PE_DEV)
- iommu_add_device(table_group, &pe->pdev->dev);
-
- if ((pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) || bus)
- pnv_ioda_setup_bus_iommu_group_add_devices(pe, table_group,
- bus);
-}
-
-static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb);
-
-static void pnv_pci_ioda_setup_iommu_api(void)
-{
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct pnv_ioda_pe *pe;
-
- /*
- * There are 4 types of PEs:
- * - PNV_IODA_PE_BUS: a downstream port with an adapter,
- * created from pnv_pci_setup_bridge();
- * - PNV_IODA_PE_BUS_ALL: a PCI-PCIX bridge with devices behind it,
- * created from pnv_pci_setup_bridge();
- * - PNV_IODA_PE_VF: a SRIOV virtual function,
- * created from pnv_pcibios_sriov_enable();
- * - PNV_IODA_PE_DEV: an NPU or OCAPI device,
- * created from pnv_pci_ioda_fixup().
- *
- * Normally a PE is represented by an IOMMU group, however for
- * devices with side channels the groups need to be more strict.
- */
- list_for_each_entry(hose, &hose_list, list_node) {
- phb = hose->private_data;
-
- if (phb->type == PNV_PHB_NPU_NVLINK ||
- phb->type == PNV_PHB_NPU_OCAPI)
- continue;
-
- list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- struct iommu_table_group *table_group;
-
- table_group = pnv_try_setup_npu_table_group(pe);
- if (!table_group) {
- if (!pnv_pci_ioda_pe_dma_weight(pe))
- continue;
-
- table_group = &pe->table_group;
- iommu_register_group(&pe->table_group,
- pe->phb->hose->global_number,
- pe->pe_number);
- }
- pnv_ioda_setup_bus_iommu_group(pe, table_group,
- pe->pbus);
- }
- }
-
- /*
- * Now we have all PHBs discovered, time to add NPU devices to
- * the corresponding IOMMU groups.
- */
- list_for_each_entry(hose, &hose_list, list_node) {
- unsigned long pgsizes;
-
- phb = hose->private_data;
-
- if (phb->type != PNV_PHB_NPU_NVLINK)
- continue;
-
- pgsizes = pnv_ioda_parse_tce_sizes(phb);
- list_for_each_entry(pe, &phb->ioda.pe_list, list) {
- /*
- * IODA2 bridges get this set up from
- * pci_controller_ops::setup_bridge but NPU bridges
- * do not have this hook defined so we do it here.
- */
- pe->table_group.pgsizes = pgsizes;
- pnv_npu_compound_attach(pe);
- }
- }
-}
-#else /* !CONFIG_IOMMU_API */
-static void pnv_pci_ioda_setup_iommu_api(void) { };
#endif
-static unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb)
-{
- struct pci_controller *hose = phb->hose;
- struct device_node *dn = hose->dn;
- unsigned long mask = 0;
- int i, rc, count;
- u32 val;
-
- count = of_property_count_u32_elems(dn, "ibm,supported-tce-sizes");
- if (count <= 0) {
- mask = SZ_4K | SZ_64K;
- /* Add 16M for POWER8 by default */
- if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
- !cpu_has_feature(CPU_FTR_ARCH_300))
- mask |= SZ_16M | SZ_256M;
- return mask;
- }
-
- for (i = 0; i < count; i++) {
- rc = of_property_read_u32_index(dn, "ibm,supported-tce-sizes",
- i, &val);
- if (rc == 0)
- mask |= 1ULL << val;
- }
-
- return mask;
-}
-
-static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
- struct pnv_ioda_pe *pe)
+void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe)
{
int64_t rc;
- if (!pnv_pci_ioda_pe_dma_weight(pe))
- return;
-
/* TVE #1 is selected by PCI address bit 59 */
pe->tce_bypass_base = 1ull << 59;
@@ -2749,39 +1943,53 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
IOMMU_TABLE_GROUP_MAX_TABLES;
pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS;
pe->table_group.pgsizes = pnv_ioda_parse_tce_sizes(phb);
-#ifdef CONFIG_IOMMU_API
- pe->table_group.ops = &pnv_pci_ioda2_ops;
-#endif
rc = pnv_pci_ioda2_setup_default_config(pe);
if (rc)
return;
- if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
- pnv_ioda_setup_bus_dma(pe, pe->pbus);
+#ifdef CONFIG_IOMMU_API
+ pe->table_group.ops = &pnv_pci_ioda2_ops;
+ iommu_register_group(&pe->table_group, phb->hose->global_number,
+ pe->pe_number);
+#endif
+ pe->dma_setup_done = true;
}
-int64_t pnv_opal_pci_msi_eoi(struct irq_chip *chip, unsigned int hw_irq)
+/*
+ * Called from KVM in real mode to EOI passthru interrupts. The ICP
+ * EOI is handled directly in KVM in kvmppc_deliver_irq_passthru().
+ *
+ * The IRQ data is mapped in the PCI-MSI domain and the EOI OPAL call
+ * needs an HW IRQ number mapped in the XICS IRQ domain. The HW IRQ
+ * numbers of the in-the-middle MSI domain are vector numbers and it's
+ * good enough for OPAL. Use that.
+ */
+int64_t pnv_opal_pci_msi_eoi(struct irq_data *d)
{
- struct pnv_phb *phb = container_of(chip, struct pnv_phb,
- ioda.irq_chip);
+ struct pci_controller *hose = irq_data_get_irq_chip_data(d->parent_data);
+ struct pnv_phb *phb = hose->private_data;
- return opal_pci_msi_eoi(phb->opal_id, hw_irq);
+ return opal_pci_msi_eoi(phb->opal_id, d->parent_data->hwirq);
}
+/*
+ * The IRQ data is mapped in the XICS domain, with OPAL HW IRQ numbers
+ */
static void pnv_ioda2_msi_eoi(struct irq_data *d)
{
int64_t rc;
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
- struct irq_chip *chip = irq_data_get_irq_chip(d);
+ struct pci_controller *hose = irq_data_get_irq_chip_data(d);
+ struct pnv_phb *phb = hose->private_data;
- rc = pnv_opal_pci_msi_eoi(chip, hw_irq);
+ rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
WARN_ON_ONCE(rc);
icp_native_eoi(d);
}
-
+/* P8/CXL only */
void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
{
struct irq_data *idata;
@@ -2803,27 +2011,32 @@ void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq)
phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
}
irq_set_chip(virq, &phb->ioda.irq_chip);
+ irq_set_chip_data(virq, phb->hose);
}
+static struct irq_chip pnv_pci_msi_irq_chip;
+
/*
* Returns true iff chip is something that we could call
* pnv_opal_pci_msi_eoi for.
*/
bool is_pnv_opal_msi(struct irq_chip *chip)
{
- return chip->irq_eoi == pnv_ioda2_msi_eoi;
+ return chip == &pnv_pci_msi_irq_chip;
}
EXPORT_SYMBOL_GPL(is_pnv_opal_msi);
-static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int virq,
- unsigned int is_64, struct msi_msg *msg)
+static int __pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
+ unsigned int xive_num,
+ unsigned int is_64, struct msi_msg *msg)
{
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
- unsigned int xive_num = hwirq - phb->msi_base;
__be32 data;
int rc;
+ dev_dbg(&dev->dev, "%s: setup %s-bit MSI for vector #%d\n", __func__,
+ is_64 ? "64" : "32", xive_num);
+
/* No PE assigned ? bail out ... no MSI for you ! */
if (pe == NULL)
return -ENXIO;
@@ -2871,153 +2084,238 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
}
msg->data = be32_to_cpu(data);
- pnv_set_msi_irq_chip(phb, virq);
-
- pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
- " address=%x_%08x data=%x PE# %x\n",
- pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
- msg->address_hi, msg->address_lo, data, pe->pe_number);
-
return 0;
}
-static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
+/*
+ * The msi_free() op is called before irq_domain_free_irqs_top() when
+ * the handler data is still available. Use that to clear the XIVE
+ * controller.
+ */
+static void pnv_msi_ops_msi_free(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int irq)
{
- unsigned int count;
- const __be32 *prop = of_get_property(phb->hose->dn,
- "ibm,opal-msi-ranges", NULL);
- if (!prop) {
- /* BML Fallback */
- prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
- }
- if (!prop)
- return;
+ if (xive_enabled())
+ xive_irq_free_data(irq);
+}
- phb->msi_base = be32_to_cpup(prop);
- count = be32_to_cpup(prop + 1);
- if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
- pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
- phb->hose->global_number);
- return;
- }
+static struct msi_domain_ops pnv_pci_msi_domain_ops = {
+ .msi_free = pnv_msi_ops_msi_free,
+};
- phb->msi_setup = pnv_pci_ioda_msi_setup;
- phb->msi32_support = 1;
- pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
- count, phb->msi_base);
+static void pnv_msi_shutdown(struct irq_data *d)
+{
+ d = d->parent_data;
+ if (d->chip->irq_shutdown)
+ d->chip->irq_shutdown(d);
}
-#ifdef CONFIG_PCI_IOV
-static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
+static void pnv_msi_mask(struct irq_data *d)
{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
- const resource_size_t gate = phb->ioda.m64_segsize >> 2;
- struct resource *res;
- int i;
- resource_size_t size, total_vf_bar_sz;
- struct pci_dn *pdn;
- int mul, total_vfs;
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
- pdn = pci_get_pdn(pdev);
- pdn->vfs_expanded = 0;
- pdn->m64_single_mode = false;
+static void pnv_msi_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
- total_vfs = pci_sriov_get_totalvfs(pdev);
- mul = phb->ioda.total_pe_num;
- total_vf_bar_sz = 0;
+static struct irq_chip pnv_pci_msi_irq_chip = {
+ .name = "PNV-PCI-MSI",
+ .irq_shutdown = pnv_msi_shutdown,
+ .irq_mask = pnv_msi_mask,
+ .irq_unmask = pnv_msi_unmask,
+ .irq_eoi = irq_chip_eoi_parent,
+};
- for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &pdev->resource[i + PCI_IOV_RESOURCES];
- if (!res->flags || res->parent)
- continue;
- if (!pnv_pci_is_m64_flags(res->flags)) {
- dev_warn(&pdev->dev, "Don't support SR-IOV with"
- " non M64 VF BAR%d: %pR. \n",
- i, res);
- goto truncate_iov;
- }
+static struct msi_domain_info pnv_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ .ops = &pnv_pci_msi_domain_ops,
+ .chip = &pnv_pci_msi_irq_chip,
+};
- total_vf_bar_sz += pci_iov_resource_size(pdev,
- i + PCI_IOV_RESOURCES);
+static void pnv_msi_compose_msg(struct irq_data *d, struct msi_msg *msg)
+{
+ struct msi_desc *entry = irq_data_get_msi_desc(d);
+ struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
+ struct pci_controller *hose = irq_data_get_irq_chip_data(d);
+ struct pnv_phb *phb = hose->private_data;
+ int rc;
+
+ rc = __pnv_pci_ioda_msi_setup(phb, pdev, d->hwirq,
+ entry->pci.msi_attrib.is_64, msg);
+ if (rc)
+ dev_err(&pdev->dev, "Failed to setup %s-bit MSI #%ld : %d\n",
+ entry->pci.msi_attrib.is_64 ? "64" : "32", d->hwirq, rc);
+}
+/*
+ * The IRQ data is mapped in the MSI domain in which HW IRQ numbers
+ * correspond to vector numbers.
+ */
+static void pnv_msi_eoi(struct irq_data *d)
+{
+ struct pci_controller *hose = irq_data_get_irq_chip_data(d);
+ struct pnv_phb *phb = hose->private_data;
+
+ if (phb->model == PNV_PHB_MODEL_PHB3) {
/*
- * If bigger than quarter of M64 segment size, just round up
- * power of two.
- *
- * Generally, one M64 BAR maps one IOV BAR. To avoid conflict
- * with other devices, IOV BAR size is expanded to be
- * (total_pe * VF_BAR_size). When VF_BAR_size is half of M64
- * segment size , the expanded size would equal to half of the
- * whole M64 space size, which will exhaust the M64 Space and
- * limit the system flexibility. This is a design decision to
- * set the boundary to quarter of the M64 segment size.
+ * The EOI OPAL call takes an OPAL HW IRQ number but
+ * since it is translated into a vector number in
+ * OPAL, use that directly.
*/
- if (total_vf_bar_sz > gate) {
- mul = roundup_pow_of_two(total_vfs);
- dev_info(&pdev->dev,
- "VF BAR Total IOV size %llx > %llx, roundup to %d VFs\n",
- total_vf_bar_sz, gate, mul);
- pdn->m64_single_mode = true;
- break;
- }
+ WARN_ON_ONCE(opal_pci_msi_eoi(phb->opal_id, d->hwirq));
}
- for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &pdev->resource[i + PCI_IOV_RESOURCES];
- if (!res->flags || res->parent)
- continue;
+ irq_chip_eoi_parent(d);
+}
- size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
- /*
- * On PHB3, the minimum size alignment of M64 BAR in single
- * mode is 32MB.
- */
- if (pdn->m64_single_mode && (size < SZ_32M))
- goto truncate_iov;
- dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
- res->end = res->start + size * mul - 1;
- dev_dbg(&pdev->dev, " %pR\n", res);
- dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
- i, res, mul);
+static struct irq_chip pnv_msi_irq_chip = {
+ .name = "PNV-MSI",
+ .irq_shutdown = pnv_msi_shutdown,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = pnv_msi_eoi,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = pnv_msi_compose_msg,
+};
+
+static int pnv_irq_parent_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, int hwirq)
+{
+ struct irq_fwspec parent_fwspec;
+ int ret;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = hwirq;
+ parent_fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pnv_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct pci_controller *hose = domain->host_data;
+ struct pnv_phb *phb = hose->private_data;
+ msi_alloc_info_t *info = arg;
+ struct pci_dev *pdev = msi_desc_to_pci_dev(info->desc);
+ int hwirq;
+ int i, ret;
+
+ hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, nr_irqs);
+ if (hwirq < 0) {
+ dev_warn(&pdev->dev, "failed to find a free MSI\n");
+ return -ENOSPC;
}
- pdn->vfs_expanded = mul;
- return;
+ dev_dbg(&pdev->dev, "%s bridge %pOF %d/%x #%d\n", __func__,
+ hose->dn, virq, hwirq, nr_irqs);
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = pnv_irq_parent_domain_alloc(domain, virq + i,
+ phb->msi_base + hwirq + i);
+ if (ret)
+ goto out;
-truncate_iov:
- /* To save MMIO space, IOV BAR is truncated. */
- for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
- res = &pdev->resource[i + PCI_IOV_RESOURCES];
- res->flags = 0;
- res->end = res->start - 1;
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &pnv_msi_irq_chip, hose);
}
+
+ return 0;
+
+out:
+ irq_domain_free_irqs_parent(domain, virq, i - 1);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, nr_irqs);
+ return ret;
}
-static void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
+static void pnv_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
- if (WARN_ON(pci_dev_is_added(pdev)))
- return;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct pci_controller *hose = irq_data_get_irq_chip_data(d);
+ struct pnv_phb *phb = hose->private_data;
- if (pdev->is_virtfn) {
- struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);
+ pr_debug("%s bridge %pOF %d/%lx #%d\n", __func__, hose->dn,
+ virq, d->hwirq, nr_irqs);
- /*
- * VF PEs are single-device PEs so their pdev pointer needs to
- * be set. The pdev doesn't exist when the PE is allocated (in
- * (pcibios_sriov_enable()) so we fix it up here.
- */
- pe->pdev = pdev;
- WARN_ON(!(pe->flags & PNV_IODA_PE_VF));
- } else if (pdev->is_physfn) {
- /*
- * For PFs adjust their allocated IOV resources to match what
- * the PHB can support using it's M64 BAR table.
- */
- pnv_pci_ioda_fixup_iov_resources(pdev);
+ msi_bitmap_free_hwirqs(&phb->msi_bmp, d->hwirq, nr_irqs);
+ /* XIVE domain is cleared through ->msi_free() */
+}
+
+static const struct irq_domain_ops pnv_irq_domain_ops = {
+ .alloc = pnv_irq_domain_alloc,
+ .free = pnv_irq_domain_free,
+};
+
+static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count)
+{
+ struct pnv_phb *phb = hose->private_data;
+ struct irq_domain *parent = irq_get_default_host();
+
+ hose->fwnode = irq_domain_alloc_named_id_fwnode("PNV-MSI", phb->opal_id);
+ if (!hose->fwnode)
+ return -ENOMEM;
+
+ hose->dev_domain = irq_domain_create_hierarchy(parent, 0, count,
+ hose->fwnode,
+ &pnv_irq_domain_ops, hose);
+ if (!hose->dev_domain) {
+ pr_err("PCI: failed to create IRQ domain bridge %pOF (domain %d)\n",
+ hose->dn, hose->global_number);
+ irq_domain_free_fwnode(hose->fwnode);
+ return -ENOMEM;
+ }
+
+ hose->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(hose->dn),
+ &pnv_msi_domain_info,
+ hose->dev_domain);
+ if (!hose->msi_domain) {
+ pr_err("PCI: failed to create MSI IRQ domain bridge %pOF (domain %d)\n",
+ hose->dn, hose->global_number);
+ irq_domain_free_fwnode(hose->fwnode);
+ irq_domain_remove(hose->dev_domain);
+ return -ENOMEM;
}
+
+ return 0;
+}
+
+static void __init pnv_pci_init_ioda_msis(struct pnv_phb *phb)
+{
+ unsigned int count;
+ const __be32 *prop = of_get_property(phb->hose->dn,
+ "ibm,opal-msi-ranges", NULL);
+ if (!prop) {
+ /* BML Fallback */
+ prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
+ }
+ if (!prop)
+ return;
+
+ phb->msi_base = be32_to_cpup(prop);
+ count = be32_to_cpup(prop + 1);
+ if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
+ pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
+ phb->hose->global_number);
+ return;
+ }
+
+ pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
+ count, phb->msi_base);
+
+ pnv_msi_allocate_domains(phb->hose, count);
}
-#endif /* CONFIG_PCI_IOV */
static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
struct resource *res)
@@ -3078,7 +2376,7 @@ static void pnv_ioda_setup_pe_res(struct pnv_ioda_pe *pe,
/*
* This function is supposed to be called on basis of PE from top
- * to bottom style. So the the I/O or MMIO segment assigned to
+ * to bottom style. So the I/O or MMIO segment assigned to
* parent PE could be overridden by its child PEs if necessary.
*/
static void pnv_ioda_setup_pe_seg(struct pnv_ioda_pe *pe)
@@ -3169,16 +2467,8 @@ static void pnv_pci_ioda_create_dbgfs(void)
list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
phb = hose->private_data;
- /* Notify initialization of PHB done */
- phb->initialized = 1;
-
sprintf(name, "PCI%04x", hose->global_number);
- phb->dbgfs = debugfs_create_dir(name, powerpc_debugfs_root);
- if (!phb->dbgfs) {
- pr_warn("%s: Error on creating debugfs on PHB#%x\n",
- __func__, hose->global_number);
- continue;
- }
+ phb->dbgfs = debugfs_create_dir(name, arch_debugfs_dir);
debugfs_create_file_unsafe("dump_diag_regs", 0200, phb->dbgfs,
phb, &pnv_pci_diag_data_fops);
@@ -3225,8 +2515,6 @@ static void pnv_pci_enable_bridges(void)
static void pnv_pci_ioda_fixup(void)
{
- pnv_pci_ioda_setup_PEs();
- pnv_pci_ioda_setup_iommu_api();
pnv_pci_ioda_create_dbgfs();
pnv_pci_enable_bridges();
@@ -3251,10 +2539,9 @@ static void pnv_pci_ioda_fixup(void)
static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
unsigned long type)
{
- struct pci_dev *bridge;
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
int num_pci_bridges = 0;
+ struct pci_dev *bridge;
bridge = bus->self;
while (bridge) {
@@ -3338,28 +2625,16 @@ static void pnv_pci_fixup_bridge_resources(struct pci_bus *bus,
}
}
-static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
+static void pnv_pci_configure_bus(struct pci_bus *bus)
{
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct pnv_phb *phb = hose->private_data;
struct pci_dev *bridge = bus->self;
struct pnv_ioda_pe *pe;
- bool all = (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
+ bool all = (bridge && pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE);
- /* Extend bridge's windows if necessary */
- pnv_pci_fixup_bridge_resources(bus, type);
-
- /* The PE for root bus should be realized before any one else */
- if (!phb->ioda.root_pe_populated) {
- pe = pnv_ioda_setup_bus_PE(phb->hose->bus, false);
- if (pe) {
- phb->ioda.root_pe_idx = pe->pe_number;
- phb->ioda.root_pe_populated = true;
- }
- }
+ dev_info(&bus->dev, "Configuring PE for bus\n");
/* Don't assign PE to PCI bus, which doesn't have subordinate devices */
- if (list_empty(&bus->devices))
+ if (WARN_ON(list_empty(&bus->devices)))
return;
/* Reserve PEs according to used M64 resources */
@@ -3375,17 +2650,6 @@ static void pnv_pci_setup_bridge(struct pci_bus *bus, unsigned long type)
return;
pnv_ioda_setup_pe_seg(pe);
- switch (phb->type) {
- case PNV_PHB_IODA1:
- pnv_pci_ioda1_setup_dma_pe(phb, pe);
- break;
- case PNV_PHB_IODA2:
- pnv_pci_ioda2_setup_dma_pe(phb, pe);
- break;
- default:
- pr_warn("%s: No DMA for PHB#%x (type %d)\n",
- __func__, phb->hose->global_number, phb->type);
- }
}
static resource_size_t pnv_pci_default_alignment(void)
@@ -3393,76 +2657,27 @@ static resource_size_t pnv_pci_default_alignment(void)
return PAGE_SIZE;
}
-#ifdef CONFIG_PCI_IOV
-static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
- int resno)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct pci_dn *pdn = pci_get_pdn(pdev);
- resource_size_t align;
-
- /*
- * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
- * SR-IOV. While from hardware perspective, the range mapped by M64
- * BAR should be size aligned.
- *
- * When IOV BAR is mapped with M64 BAR in Single PE mode, the extra
- * powernv-specific hardware restriction is gone. But if just use the
- * VF BAR size as the alignment, PF BAR / VF BAR may be allocated with
- * in one segment of M64 #15, which introduces the PE conflict between
- * PF and VF. Based on this, the minimum alignment of an IOV BAR is
- * m64_segsize.
- *
- * This function returns the total IOV BAR size if M64 BAR is in
- * Shared PE mode or just VF BAR size if not.
- * If the M64 BAR is in Single PE mode, return the VF BAR size or
- * M64 segment size if IOV BAR size is less.
- */
- align = pci_iov_resource_size(pdev, resno);
- if (!pdn->vfs_expanded)
- return align;
- if (pdn->m64_single_mode)
- return max(align, (resource_size_t)phb->ioda.m64_segsize);
-
- return pdn->vfs_expanded * align;
-}
-#endif /* CONFIG_PCI_IOV */
-
/* Prevent enabling devices for which we couldn't properly
* assign a PE
*/
static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
struct pci_dn *pdn;
- /* The function is probably called while the PEs have
- * not be created yet. For example, resource reassignment
- * during PCI probe period. We just skip the check if
- * PEs isn't ready.
- */
- if (!phb->initialized)
- return true;
-
pdn = pci_get_pdn(dev);
- if (!pdn || pdn->pe_number == IODA_INVALID_PE)
+ if (!pdn || pdn->pe_number == IODA_INVALID_PE) {
+ pci_err(dev, "pci_enable_device() blocked, no PE assigned.\n");
return false;
+ }
return true;
}
static bool pnv_ocapi_enable_device_hook(struct pci_dev *dev)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
struct pci_dn *pdn;
struct pnv_ioda_pe *pe;
- if (!phb->initialized)
- return true;
-
pdn = pci_get_pdn(dev);
if (!pdn)
return false;
@@ -3506,18 +2721,17 @@ static long pnv_pci_ioda1_unset_window(struct iommu_table_group *table_group,
static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
{
- unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
struct iommu_table *tbl = pe->table_group.tables[0];
int64_t rc;
- if (!weight)
+ if (!pe->dma_setup_done)
return;
rc = pnv_pci_ioda1_unset_window(&pe->table_group, 0);
if (rc != OPAL_SUCCESS)
return;
- pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size, false);
+ pnv_pci_p7ioc_tce_invalidate(tbl, tbl->it_offset, tbl->it_size);
if (pe->table_group.group) {
iommu_group_put(pe->table_group.group);
WARN_ON(pe->table_group.group);
@@ -3527,22 +2741,17 @@ static void pnv_pci_ioda1_release_pe_dma(struct pnv_ioda_pe *pe)
iommu_tce_table_put(tbl);
}
-static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
+void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
{
struct iommu_table *tbl = pe->table_group.tables[0];
- unsigned int weight = pnv_pci_ioda_pe_dma_weight(pe);
-#ifdef CONFIG_IOMMU_API
int64_t rc;
-#endif
- if (!weight)
+ if (!pe->dma_setup_done)
return;
-#ifdef CONFIG_IOMMU_API
rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0);
if (rc)
pe_warn(pe, "OPAL error %lld release DMA window\n", rc);
-#endif
pnv_pci_ioda2_set_bypass(pe, false);
if (pe->table_group.group) {
@@ -3565,14 +2774,8 @@ static void pnv_ioda_free_pe_seg(struct pnv_ioda_pe *pe,
if (map[idx] != pe->pe_number)
continue;
- if (win == OPAL_M64_WINDOW_TYPE)
- rc = opal_pci_map_pe_mmio_window(phb->opal_id,
- phb->ioda.reserved_pe_idx, win,
- idx / PNV_IODA1_M64_SEGS,
- idx % PNV_IODA1_M64_SEGS);
- else
- rc = opal_pci_map_pe_mmio_window(phb->opal_id,
- phb->ioda.reserved_pe_idx, win, 0, idx);
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ phb->ioda.reserved_pe_idx, win, 0, idx);
if (rc != OPAL_SUCCESS)
pe_warn(pe, "Error %lld unmapping (%d) segment#%d\n",
@@ -3591,8 +2794,7 @@ static void pnv_ioda_release_pe_seg(struct pnv_ioda_pe *pe)
phb->ioda.io_segmap);
pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
phb->ioda.m32_segmap);
- pnv_ioda_free_pe_seg(pe, OPAL_M64_WINDOW_TYPE,
- phb->ioda.m64_segmap);
+ /* M64 is pre-configured by pnv_ioda1_init_m64() */
} else if (phb->type == PNV_PHB_IODA2) {
pnv_ioda_free_pe_seg(pe, OPAL_M32_WINDOW_TYPE,
phb->ioda.m32_segmap);
@@ -3604,6 +2806,8 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
struct pnv_phb *phb = pe->phb;
struct pnv_ioda_pe *slave, *tmp;
+ pe_info(pe, "Releasing PE\n");
+
mutex_lock(&phb->ioda.pe_list_mutex);
list_del(&pe->list);
mutex_unlock(&phb->ioda.pe_list_mutex);
@@ -3638,26 +2842,35 @@ static void pnv_ioda_release_pe(struct pnv_ioda_pe *pe)
* that it can be populated again in PCI hot add path. The PE
* shouldn't be destroyed as it's the global reserved resource.
*/
- if (phb->ioda.root_pe_populated &&
- phb->ioda.root_pe_idx == pe->pe_number)
- phb->ioda.root_pe_populated = false;
- else
- pnv_ioda_free_pe(pe);
+ if (phb->ioda.root_pe_idx == pe->pe_number)
+ return;
+
+ pnv_ioda_free_pe(pe);
}
static void pnv_pci_release_device(struct pci_dev *pdev)
{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *pe;
+ /* The VF PE state is torn down when sriov_disable() is called */
if (pdev->is_virtfn)
return;
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
return;
+#ifdef CONFIG_PCI_IOV
+ /*
+ * FIXME: Try move this to sriov_disable(). It's here since we allocate
+ * the iov state at probe time since we need to fiddle with the IOV
+ * resources.
+ */
+ if (pdev->is_physfn)
+ kfree(pdev->dev.archdata.iov_data);
+#endif
+
/*
* PCI hotplug can happen as part of EEH error recovery. The @pdn
* isn't removed and added afterwards in this scenario. We should
@@ -3674,15 +2887,6 @@ static void pnv_pci_release_device(struct pci_dev *pdev)
pnv_ioda_release_pe(pe);
}
-static void pnv_npu_disable_device(struct pci_dev *pdev)
-{
- struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
- struct eeh_pe *eehpe = edev ? edev->pe : NULL;
-
- if (eehpe && eeh_ops && eeh_ops->reset)
- eeh_ops->reset(eehpe, EEH_RESET_HOT);
-}
-
static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
@@ -3693,8 +2897,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
static void pnv_pci_ioda_dma_bus_setup(struct pci_bus *bus)
{
- struct pci_controller *hose = bus->sysdata;
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(bus);
struct pnv_ioda_pe *pe;
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
@@ -3715,24 +2918,12 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.dma_dev_setup = pnv_pci_ioda_dma_dev_setup,
.dma_bus_setup = pnv_pci_ioda_dma_bus_setup,
.iommu_bypass_supported = pnv_pci_ioda_iommu_bypass_supported,
- .setup_msi_irqs = pnv_setup_msi_irqs,
- .teardown_msi_irqs = pnv_teardown_msi_irqs,
.enable_device_hook = pnv_pci_enable_device_hook,
.release_device = pnv_pci_release_device,
.window_alignment = pnv_pci_window_alignment,
- .setup_bridge = pnv_pci_setup_bridge,
- .reset_secondary_bus = pnv_pci_reset_secondary_bus,
- .shutdown = pnv_pci_ioda_shutdown,
-};
-
-static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
- .setup_msi_irqs = pnv_setup_msi_irqs,
- .teardown_msi_irqs = pnv_teardown_msi_irqs,
- .enable_device_hook = pnv_pci_enable_device_hook,
- .window_alignment = pnv_pci_window_alignment,
+ .setup_bridge = pnv_pci_fixup_bridge_resources,
.reset_secondary_bus = pnv_pci_reset_secondary_bus,
.shutdown = pnv_pci_ioda_shutdown,
- .disable_device = pnv_npu_disable_device,
};
static const struct pci_controller_ops pnv_npu_ocapi_ioda_controller_ops = {
@@ -3750,6 +2941,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
struct pnv_phb *phb;
unsigned long size, m64map_off, m32map_off, pemap_off;
unsigned long iomap_off = 0, dma32map_off = 0;
+ struct pnv_ioda_pe *root_pe;
struct resource r;
const __be64 *prop64;
const __be32 *prop32;
@@ -3772,7 +2964,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb_id = be64_to_cpup(prop64);
pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
- phb = memblock_alloc(sizeof(*phb), SMP_CACHE_BYTES);
+ phb = kzalloc(sizeof(*phb), GFP_KERNEL);
if (!phb)
panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(*phb));
@@ -3782,7 +2974,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
if (!phb->hose) {
pr_err(" Can't allocate PCI controller for %pOF\n",
np);
- memblock_free(__pa(phb), sizeof(struct pnv_phb));
+ memblock_free(phb, sizeof(struct pnv_phb));
return;
}
@@ -3807,10 +2999,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->model = PNV_PHB_MODEL_P7IOC;
else if (of_device_is_compatible(np, "ibm,power8-pciex"))
phb->model = PNV_PHB_MODEL_PHB3;
- else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
- phb->model = PNV_PHB_MODEL_NPU;
- else if (of_device_is_compatible(np, "ibm,power9-npu-pciex"))
- phb->model = PNV_PHB_MODEL_NPU2;
else
phb->model = PNV_PHB_MODEL_UNKNOWN;
@@ -3821,7 +3009,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
else
phb->diag_data_size = PNV_PCI_DIAG_BUF_SIZE;
- phb->diag_data = memblock_alloc(phb->diag_data_size, SMP_CACHE_BYTES);
+ phb->diag_data = kzalloc(phb->diag_data_size, GFP_KERNEL);
if (!phb->diag_data)
panic("%s: Failed to allocate %u bytes\n", __func__,
phb->diag_data_size);
@@ -3868,7 +3056,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
PNV_IODA1_DMA32_SEGSIZE;
/* Allocate aux data & arrays. We don't have IO ports on PHB3 */
- size = _ALIGN_UP(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
+ size = ALIGN(max_t(unsigned, phb->ioda.total_pe_num, 8) / 8,
sizeof(unsigned long));
m64map_off = size;
size += phb->ioda.total_pe_num * sizeof(phb->ioda.m64_segmap[0]);
@@ -3883,9 +3071,10 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
}
pemap_off = size;
size += phb->ioda.total_pe_num * sizeof(struct pnv_ioda_pe);
- aux = memblock_alloc(size, SMP_CACHE_BYTES);
+ aux = kzalloc(size, GFP_KERNEL);
if (!aux)
panic("%s: Failed to allocate %lu bytes\n", __func__, size);
+
phb->ioda.pe_alloc = aux;
phb->ioda.m64_segmap = aux + m64map_off;
phb->ioda.m32_segmap = aux + m32map_off;
@@ -3917,7 +3106,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->ioda.root_pe_idx = phb->ioda.reserved_pe_idx - 1;
pnv_ioda_reserve_pe(phb, phb->ioda.root_pe_idx);
} else {
- phb->ioda.root_pe_idx = IODA_INVALID_PE;
+ /* otherwise just allocate one */
+ root_pe = pnv_ioda_alloc_pe(phb, 1);
+ phb->ioda.root_pe_idx = root_pe->pe_number;
}
INIT_LIST_HEAD(&phb->ioda.pe_list);
@@ -3965,9 +3156,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
switch (phb->type) {
- case PNV_PHB_NPU_NVLINK:
- hose->controller_ops = pnv_npu_ioda_controller_ops;
- break;
case PNV_PHB_NPU_OCAPI:
hose->controller_ops = pnv_npu_ocapi_ioda_controller_ops;
break;
@@ -4010,6 +3198,9 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Remove M64 resource if we can't configure it successfully */
if (!phb->init_m64 || phb->init_m64(phb))
hose->mem_resources[1].flags = 0;
+
+ /* create pci_dn's for DT nodes under this PHB */
+ pci_devs_phb_init_dynamic(hose);
}
void __init pnv_pci_init_ioda2_phb(struct device_node *np)
@@ -4017,11 +3208,6 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np)
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
}
-void __init pnv_pci_init_npu_phb(struct device_node *np)
-{
- pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_NVLINK);
-}
-
void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np)
{
pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU_OCAPI);
@@ -4029,8 +3215,7 @@ void __init pnv_pci_init_npu2_opencapi_phb(struct device_node *np)
static void pnv_npu2_opencapi_cfg_size_fixup(struct pci_dev *dev)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
if (!machine_is(powernv))
return;
diff --git a/arch/powerpc/platforms/powernv/pci-sriov.c b/arch/powerpc/platforms/powernv/pci-sriov.c
new file mode 100644
index 000000000000..7195133b26bb
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci-sriov.c
@@ -0,0 +1,760 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/bitmap.h>
+#include <linux/pci.h>
+
+#include <asm/opal.h>
+
+#include "pci.h"
+
+/*
+ * The majority of the complexity in supporting SR-IOV on PowerNV comes from
+ * the need to put the MMIO space for each VF into a separate PE. Internally
+ * the PHB maps MMIO addresses to a specific PE using the "Memory BAR Table".
+ * The MBT historically only applied to the 64bit MMIO window of the PHB
+ * so it's common to see it referred to as the "M64BT".
+ *
+ * An MBT entry stores the mapped range as an <base>,<mask> pair. This forces
+ * the address range that we want to map to be power-of-two sized and aligned.
+ * For conventional PCI devices this isn't really an issue since PCI device BARs
+ * have the same requirement.
+ *
+ * For a SR-IOV BAR things are a little more awkward since size and alignment
+ * are not coupled. The alignment is set based on the per-VF BAR size, but
+ * the total BAR area is: number-of-vfs * per-vf-size. The number of VFs
+ * isn't necessarily a power of two, so neither is the total size. To fix that
+ * we need to finesse (read: hack) the Linux BAR allocator so that it will
+ * allocate the SR-IOV BARs in a way that lets us map them using the MBT.
+ *
+ * The changes to size and alignment that we need to do depend on the "mode"
+ * of MBT entry that we use. We only support SR-IOV on PHB3 (IODA2) and above,
+ * so as a baseline we can assume that we have the following BAR modes
+ * available:
+ *
+ * NB: $PE_COUNT is the number of PEs that the PHB supports.
+ *
+ * a) A segmented BAR that splits the mapped range into $PE_COUNT equally sized
+ * segments. The n'th segment is mapped to the n'th PE.
+ * b) An un-segmented BAR that maps the whole address range to a specific PE.
+ *
+ *
+ * We prefer to use mode a) since it only requires one MBT entry per SR-IOV BAR
+ * For comparison b) requires one entry per-VF per-BAR, or:
+ * (num-vfs * num-sriov-bars) in total. To use a) we need the size of each segment
+ * to equal the size of the per-VF BAR area. So:
+ *
+ * new_size = per-vf-size * number-of-PEs
+ *
+ * The alignment for the SR-IOV BAR also needs to be changed from per-vf-size
+ * to "new_size", calculated above. Implementing this is a convoluted process
+ * which requires several hooks in the PCI core:
+ *
+ * 1. In pcibios_device_add() we call pnv_pci_ioda_fixup_iov().
+ *
+ * At this point the device has been probed and the device's BARs are sized,
+ * but no resource allocations have been done. The SR-IOV BARs are sized
+ * based on the maximum number of VFs supported by the device and we need
+ * to increase that to new_size.
+ *
+ * 2. Later, when Linux actually assigns resources it tries to make the resource
+ * allocations for each PCI bus as compact as possible. As a part of that it
+ * sorts the BARs on a bus by their required alignment, which is calculated
+ * using pci_resource_alignment().
+ *
+ * For IOV resources this goes:
+ * pci_resource_alignment()
+ * pci_sriov_resource_alignment()
+ * pcibios_sriov_resource_alignment()
+ * pnv_pci_iov_resource_alignment()
+ *
+ * Our hook overrides the default alignment, equal to the per-vf-size, with
+ * new_size computed above.
+ *
+ * 3. When userspace enables VFs for a device:
+ *
+ * sriov_enable()
+ * pcibios_sriov_enable()
+ * pnv_pcibios_sriov_enable()
+ *
+ * This is where we actually allocate PE numbers for each VF and setup the
+ * MBT mapping for each SR-IOV BAR. In steps 1) and 2) we setup an "arena"
+ * where each MBT segment is equal in size to the VF BAR so we can shift
+ * around the actual SR-IOV BAR location within this arena. We need this
+ * ability because the PE space is shared by all devices on the same PHB.
+ * When using mode a) described above segment 0 in maps to PE#0 which might
+ * be already being used by another device on the PHB.
+ *
+ * As a result we need allocate a contigious range of PE numbers, then shift
+ * the address programmed into the SR-IOV BAR of the PF so that the address
+ * of VF0 matches up with the segment corresponding to the first allocated
+ * PE number. This is handled in pnv_pci_vf_resource_shift().
+ *
+ * Once all that is done we return to the PCI core which then enables VFs,
+ * scans them and creates pci_devs for each. The init process for a VF is
+ * largely the same as a normal device, but the VF is inserted into the IODA
+ * PE that we allocated for it rather than the PE associated with the bus.
+ *
+ * 4. When userspace disables VFs we unwind the above in
+ * pnv_pcibios_sriov_disable(). Fortunately this is relatively simple since
+ * we don't need to validate anything, just tear down the mappings and
+ * move SR-IOV resource back to its "proper" location.
+ *
+ * That's how mode a) works. In theory mode b) (single PE mapping) is less work
+ * since we can map each individual VF with a separate BAR. However, there's a
+ * few limitations:
+ *
+ * 1) For IODA2 mode b) has a minimum alignment requirement of 32MB. This makes
+ * it only usable for devices with very large per-VF BARs. Such devices are
+ * similar to Big Foot. They definitely exist, but I've never seen one.
+ *
+ * 2) The number of MBT entries that we have is limited. PHB3 and PHB4 only
+ * 16 total and some are needed for. Most SR-IOV capable network cards can support
+ * more than 16 VFs on each port.
+ *
+ * We use b) when using a) would use more than 1/4 of the entire 64 bit MMIO
+ * window of the PHB.
+ *
+ *
+ *
+ * PHB4 (IODA3) added a few new features that would be useful for SR-IOV. It
+ * allowed the MBT to map 32bit MMIO space in addition to 64bit which allows
+ * us to support SR-IOV BARs in the 32bit MMIO window. This is useful since
+ * the Linux BAR allocation will place any BAR marked as non-prefetchable into
+ * the non-prefetchable bridge window, which is 32bit only. It also added two
+ * new modes:
+ *
+ * c) A segmented BAR similar to a), but each segment can be individually
+ * mapped to any PE. This is matches how the 32bit MMIO window worked on
+ * IODA1&2.
+ *
+ * d) A segmented BAR with 8, 64, or 128 segments. This works similarly to a),
+ * but with fewer segments and configurable base PE.
+ *
+ * i.e. The n'th segment maps to the (n + base)'th PE.
+ *
+ * The base PE is also required to be a multiple of the window size.
+ *
+ * Unfortunately, the OPAL API doesn't currently (as of skiboot v6.6) allow us
+ * to exploit any of the IODA3 features.
+ */
+
+static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
+{
+ struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
+ struct resource *res;
+ int i;
+ resource_size_t vf_bar_sz;
+ struct pnv_iov_data *iov;
+ int mul;
+
+ iov = kzalloc(sizeof(*iov), GFP_KERNEL);
+ if (!iov)
+ goto disable_iov;
+ pdev->dev.archdata.iov_data = iov;
+ mul = phb->ioda.total_pe_num;
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || res->parent)
+ continue;
+ if (!pnv_pci_is_m64_flags(res->flags)) {
+ dev_warn(&pdev->dev, "Don't support SR-IOV with non M64 VF BAR%d: %pR. \n",
+ i, res);
+ goto disable_iov;
+ }
+
+ vf_bar_sz = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
+
+ /*
+ * Generally, one segmented M64 BAR maps one IOV BAR. However,
+ * if a VF BAR is too large we end up wasting a lot of space.
+ * If each VF needs more than 1/4 of the default m64 segment
+ * then each VF BAR should be mapped in single-PE mode to reduce
+ * the amount of space required. This does however limit the
+ * number of VFs we can support.
+ *
+ * The 1/4 limit is arbitrary and can be tweaked.
+ */
+ if (vf_bar_sz > (phb->ioda.m64_segsize >> 2)) {
+ /*
+ * On PHB3, the minimum size alignment of M64 BAR in
+ * single mode is 32MB. If this VF BAR is smaller than
+ * 32MB, but still too large for a segmented window
+ * then we can't map it and need to disable SR-IOV for
+ * this device.
+ */
+ if (vf_bar_sz < SZ_32M) {
+ pci_err(pdev, "VF BAR%d: %pR can't be mapped in single PE mode\n",
+ i, res);
+ goto disable_iov;
+ }
+
+ iov->m64_single_mode[i] = true;
+ continue;
+ }
+
+ /*
+ * This BAR can be mapped with one segmented window, so adjust
+ * te resource size to accommodate.
+ */
+ pci_dbg(pdev, " Fixing VF BAR%d: %pR to\n", i, res);
+ res->end = res->start + vf_bar_sz * mul - 1;
+ pci_dbg(pdev, " %pR\n", res);
+
+ pci_info(pdev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
+ i, res, mul);
+
+ iov->need_shift = true;
+ }
+
+ return;
+
+disable_iov:
+ /* Save ourselves some MMIO space by disabling the unusable BARs */
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ res->flags = 0;
+ res->end = res->start - 1;
+ }
+
+ pdev->dev.archdata.iov_data = NULL;
+ kfree(iov);
+}
+
+void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev)
+{
+ if (pdev->is_virtfn) {
+ struct pnv_ioda_pe *pe = pnv_ioda_get_pe(pdev);
+
+ /*
+ * VF PEs are single-device PEs so their pdev pointer needs to
+ * be set. The pdev doesn't exist when the PE is allocated (in
+ * (pcibios_sriov_enable()) so we fix it up here.
+ */
+ pe->pdev = pdev;
+ WARN_ON(!(pe->flags & PNV_IODA_PE_VF));
+ } else if (pdev->is_physfn) {
+ /*
+ * For PFs adjust their allocated IOV resources to match what
+ * the PHB can support using it's M64 BAR table.
+ */
+ pnv_pci_ioda_fixup_iov_resources(pdev);
+ }
+}
+
+resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
+ int resno)
+{
+ resource_size_t align = pci_iov_resource_size(pdev, resno);
+ struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
+ struct pnv_iov_data *iov = pnv_iov_get(pdev);
+
+ /*
+ * iov can be null if we have an SR-IOV device with IOV BAR that can't
+ * be placed in the m64 space (i.e. The BAR is 32bit or non-prefetch).
+ * In that case we don't allow VFs to be enabled since one of their
+ * BARs would not be placed in the correct PE.
+ */
+ if (!iov)
+ return align;
+
+ /*
+ * If we're using single mode then we can just use the native VF BAR
+ * alignment. We validated that it's possible to use a single PE
+ * window above when we did the fixup.
+ */
+ if (iov->m64_single_mode[resno - PCI_IOV_RESOURCES])
+ return align;
+
+ /*
+ * On PowerNV platform, IOV BAR is mapped by M64 BAR to enable the
+ * SR-IOV. While from hardware perspective, the range mapped by M64
+ * BAR should be size aligned.
+ *
+ * This function returns the total IOV BAR size if M64 BAR is in
+ * Shared PE mode or just VF BAR size if not.
+ * If the M64 BAR is in Single PE mode, return the VF BAR size or
+ * M64 segment size if IOV BAR size is less.
+ */
+ return phb->ioda.total_pe_num * align;
+}
+
+static int pnv_pci_vf_release_m64(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pnv_iov_data *iov;
+ struct pnv_phb *phb;
+ int window_id;
+
+ phb = pci_bus_to_pnvhb(pdev->bus);
+ iov = pnv_iov_get(pdev);
+
+ for_each_set_bit(window_id, iov->used_m64_bar_mask, MAX_M64_BARS) {
+ opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE,
+ window_id,
+ 0);
+
+ clear_bit(window_id, &phb->ioda.m64_bar_alloc);
+ }
+
+ return 0;
+}
+
+
+/*
+ * PHB3 and beyond support segmented windows. The window's address range
+ * is subdivided into phb->ioda.total_pe_num segments and there's a 1-1
+ * mapping between PEs and segments.
+ */
+static int64_t pnv_ioda_map_m64_segmented(struct pnv_phb *phb,
+ int window_id,
+ resource_size_t start,
+ resource_size_t size)
+{
+ int64_t rc;
+
+ rc = opal_pci_set_phb_mem_window(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE,
+ window_id,
+ start,
+ 0, /* unused */
+ size);
+ if (rc)
+ goto out;
+
+ rc = opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE,
+ window_id,
+ OPAL_ENABLE_M64_SPLIT);
+out:
+ if (rc)
+ pr_err("Failed to map M64 window #%d: %lld\n", window_id, rc);
+
+ return rc;
+}
+
+static int64_t pnv_ioda_map_m64_single(struct pnv_phb *phb,
+ int pe_num,
+ int window_id,
+ resource_size_t start,
+ resource_size_t size)
+{
+ int64_t rc;
+
+ /*
+ * The API for setting up m64 mmio windows seems to have been designed
+ * with P7-IOC in mind. For that chip each M64 BAR (window) had a fixed
+ * split of 8 equally sized segments each of which could individually
+ * assigned to a PE.
+ *
+ * The problem with this is that the API doesn't have any way to
+ * communicate the number of segments we want on a BAR. This wasn't
+ * a problem for p7-ioc since you didn't have a choice, but the
+ * single PE windows added in PHB3 don't map cleanly to this API.
+ *
+ * As a result we've got this slightly awkward process where we
+ * call opal_pci_map_pe_mmio_window() to put the single in single
+ * PE mode, and set the PE for the window before setting the address
+ * bounds. We need to do it this way because the single PE windows
+ * for PHB3 have different alignment requirements on PHB3.
+ */
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ pe_num,
+ OPAL_M64_WINDOW_TYPE,
+ window_id,
+ 0);
+ if (rc)
+ goto out;
+
+ /*
+ * NB: In single PE mode the window needs to be aligned to 32MB
+ */
+ rc = opal_pci_set_phb_mem_window(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE,
+ window_id,
+ start,
+ 0, /* ignored by FW, m64 is 1-1 */
+ size);
+ if (rc)
+ goto out;
+
+ /*
+ * Now actually enable it. We specified the BAR should be in "non-split"
+ * mode so FW will validate that the BAR is in single PE mode.
+ */
+ rc = opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE,
+ window_id,
+ OPAL_ENABLE_M64_NON_SPLIT);
+out:
+ if (rc)
+ pr_err("Error mapping single PE BAR\n");
+
+ return rc;
+}
+
+static int pnv_pci_alloc_m64_bar(struct pnv_phb *phb, struct pnv_iov_data *iov)
+{
+ int win;
+
+ do {
+ win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
+ phb->ioda.m64_bar_idx + 1, 0);
+
+ if (win >= phb->ioda.m64_bar_idx + 1)
+ return -1;
+ } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
+
+ set_bit(win, iov->used_m64_bar_mask);
+
+ return win;
+}
+
+static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pnv_iov_data *iov;
+ struct pnv_phb *phb;
+ int win;
+ struct resource *res;
+ int i, j;
+ int64_t rc;
+ resource_size_t size, start;
+ int base_pe_num;
+
+ phb = pci_bus_to_pnvhb(pdev->bus);
+ iov = pnv_iov_get(pdev);
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+
+ /* don't need single mode? map everything in one go! */
+ if (!iov->m64_single_mode[i]) {
+ win = pnv_pci_alloc_m64_bar(phb, iov);
+ if (win < 0)
+ goto m64_failed;
+
+ size = resource_size(res);
+ start = res->start;
+
+ rc = pnv_ioda_map_m64_segmented(phb, win, start, size);
+ if (rc)
+ goto m64_failed;
+
+ continue;
+ }
+
+ /* otherwise map each VF with single PE BARs */
+ size = pci_iov_resource_size(pdev, PCI_IOV_RESOURCES + i);
+ base_pe_num = iov->vf_pe_arr[0].pe_number;
+
+ for (j = 0; j < num_vfs; j++) {
+ win = pnv_pci_alloc_m64_bar(phb, iov);
+ if (win < 0)
+ goto m64_failed;
+
+ start = res->start + size * j;
+ rc = pnv_ioda_map_m64_single(phb, win,
+ base_pe_num + j,
+ start,
+ size);
+ if (rc)
+ goto m64_failed;
+ }
+ }
+ return 0;
+
+m64_failed:
+ pnv_pci_vf_release_m64(pdev, num_vfs);
+ return -EBUSY;
+}
+
+static void pnv_ioda_release_vf_PE(struct pci_dev *pdev)
+{
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe, *pe_n;
+
+ phb = pci_bus_to_pnvhb(pdev->bus);
+
+ if (!pdev->is_physfn)
+ return;
+
+ /* FIXME: Use pnv_ioda_release_pe()? */
+ list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
+ if (pe->parent_dev != pdev)
+ continue;
+
+ pnv_pci_ioda2_release_pe_dma(pe);
+
+ /* Remove from list */
+ mutex_lock(&phb->ioda.pe_list_mutex);
+ list_del(&pe->list);
+ mutex_unlock(&phb->ioda.pe_list_mutex);
+
+ pnv_ioda_deconfigure_pe(phb, pe);
+
+ pnv_ioda_free_pe(pe);
+ }
+}
+
+static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
+{
+ struct resource *res, res2;
+ struct pnv_iov_data *iov;
+ resource_size_t size;
+ u16 num_vfs;
+ int i;
+
+ if (!dev->is_physfn)
+ return -EINVAL;
+ iov = pnv_iov_get(dev);
+
+ /*
+ * "offset" is in VFs. The M64 windows are sized so that when they
+ * are segmented, each segment is the same size as the IOV BAR.
+ * Each segment is in a separate PE, and the high order bits of the
+ * address are the PE number. Therefore, each VF's BAR is in a
+ * separate PE, and changing the IOV BAR start address changes the
+ * range of PEs the VFs are in.
+ */
+ num_vfs = iov->num_vfs;
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+ if (iov->m64_single_mode[i])
+ continue;
+
+ /*
+ * The actual IOV BAR range is determined by the start address
+ * and the actual size for num_vfs VFs BAR. This check is to
+ * make sure that after shifting, the range will not overlap
+ * with another device.
+ */
+ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ res2.flags = res->flags;
+ res2.start = res->start + (size * offset);
+ res2.end = res2.start + (size * num_vfs) - 1;
+
+ if (res2.end > res->end) {
+ dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
+ i, &res2, res, num_vfs, offset);
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * Since M64 BAR shares segments among all possible 256 PEs,
+ * we have to shift the beginning of PF IOV BAR to make it start from
+ * the segment which belongs to the PE number assigned to the first VF.
+ * This creates a "hole" in the /proc/iomem which could be used for
+ * allocating other resources so we reserve this area below and
+ * release when IOV is released.
+ */
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+ if (iov->m64_single_mode[i])
+ continue;
+
+ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ res2 = *res;
+ res->start += size * offset;
+
+ dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (%sabling %d VFs shifted by %d)\n",
+ i, &res2, res, (offset > 0) ? "En" : "Dis",
+ num_vfs, offset);
+
+ if (offset < 0) {
+ devm_release_resource(&dev->dev, &iov->holes[i]);
+ memset(&iov->holes[i], 0, sizeof(iov->holes[i]));
+ }
+
+ pci_update_resource(dev, i + PCI_IOV_RESOURCES);
+
+ if (offset > 0) {
+ iov->holes[i].start = res2.start;
+ iov->holes[i].end = res2.start + size * offset - 1;
+ iov->holes[i].flags = IORESOURCE_BUS;
+ iov->holes[i].name = "pnv_iov_reserved";
+ devm_request_resource(&dev->dev, res->parent,
+ &iov->holes[i]);
+ }
+ }
+ return 0;
+}
+
+static void pnv_pci_sriov_disable(struct pci_dev *pdev)
+{
+ u16 num_vfs, base_pe;
+ struct pnv_iov_data *iov;
+
+ iov = pnv_iov_get(pdev);
+ num_vfs = iov->num_vfs;
+ base_pe = iov->vf_pe_arr[0].pe_number;
+
+ if (WARN_ON(!iov))
+ return;
+
+ /* Release VF PEs */
+ pnv_ioda_release_vf_PE(pdev);
+
+ /* Un-shift the IOV BARs if we need to */
+ if (iov->need_shift)
+ pnv_pci_vf_resource_shift(pdev, -base_pe);
+
+ /* Release M64 windows */
+ pnv_pci_vf_release_m64(pdev, num_vfs);
+}
+
+static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe;
+ int pe_num;
+ u16 vf_index;
+ struct pnv_iov_data *iov;
+ struct pci_dn *pdn;
+
+ if (!pdev->is_physfn)
+ return;
+
+ phb = pci_bus_to_pnvhb(pdev->bus);
+ pdn = pci_get_pdn(pdev);
+ iov = pnv_iov_get(pdev);
+
+ /* Reserve PE for each VF */
+ for (vf_index = 0; vf_index < num_vfs; vf_index++) {
+ int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
+ int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
+ struct pci_dn *vf_pdn;
+
+ pe = &iov->vf_pe_arr[vf_index];
+ pe->phb = phb;
+ pe->flags = PNV_IODA_PE_VF;
+ pe->pbus = NULL;
+ pe->parent_dev = pdev;
+ pe->mve_number = -1;
+ pe->rid = (vf_bus << 8) | vf_devfn;
+
+ pe_num = pe->pe_number;
+ pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%x\n",
+ pci_domain_nr(pdev->bus), pdev->bus->number,
+ PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
+
+ if (pnv_ioda_configure_pe(phb, pe)) {
+ /* XXX What do we do here ? */
+ pnv_ioda_free_pe(pe);
+ pe->pdev = NULL;
+ continue;
+ }
+
+ /* Put PE to the list */
+ mutex_lock(&phb->ioda.pe_list_mutex);
+ list_add_tail(&pe->list, &phb->ioda.pe_list);
+ mutex_unlock(&phb->ioda.pe_list_mutex);
+
+ /* associate this pe to it's pdn */
+ list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
+ if (vf_pdn->busno == vf_bus &&
+ vf_pdn->devfn == vf_devfn) {
+ vf_pdn->pe_number = pe_num;
+ break;
+ }
+ }
+
+ pnv_pci_ioda2_setup_dma_pe(phb, pe);
+ }
+}
+
+static int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pnv_ioda_pe *base_pe;
+ struct pnv_iov_data *iov;
+ struct pnv_phb *phb;
+ int ret;
+ u16 i;
+
+ phb = pci_bus_to_pnvhb(pdev->bus);
+ iov = pnv_iov_get(pdev);
+
+ /*
+ * There's a calls to IODA2 PE setup code littered throughout. We could
+ * probably fix that, but we'd still have problems due to the
+ * restriction inherent on IODA1 PHBs.
+ *
+ * NB: We class IODA3 as IODA2 since they're very similar.
+ */
+ if (phb->type != PNV_PHB_IODA2) {
+ pci_err(pdev, "SR-IOV is not supported on this PHB\n");
+ return -ENXIO;
+ }
+
+ if (!iov) {
+ dev_info(&pdev->dev, "don't support this SRIOV device with non 64bit-prefetchable IOV BAR\n");
+ return -ENOSPC;
+ }
+
+ /* allocate a contiguous block of PEs for our VFs */
+ base_pe = pnv_ioda_alloc_pe(phb, num_vfs);
+ if (!base_pe) {
+ pci_err(pdev, "Unable to allocate PEs for %d VFs\n", num_vfs);
+ return -EBUSY;
+ }
+
+ iov->vf_pe_arr = base_pe;
+ iov->num_vfs = num_vfs;
+
+ /* Assign M64 window accordingly */
+ ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
+ if (ret) {
+ dev_info(&pdev->dev, "Not enough M64 window resources\n");
+ goto m64_failed;
+ }
+
+ /*
+ * When using one M64 BAR to map one IOV BAR, we need to shift
+ * the IOV BAR according to the PE# allocated to the VFs.
+ * Otherwise, the PE# for the VF will conflict with others.
+ */
+ if (iov->need_shift) {
+ ret = pnv_pci_vf_resource_shift(pdev, base_pe->pe_number);
+ if (ret)
+ goto shift_failed;
+ }
+
+ /* Setup VF PEs */
+ pnv_ioda_setup_vf_PE(pdev, num_vfs);
+
+ return 0;
+
+shift_failed:
+ pnv_pci_vf_release_m64(pdev, num_vfs);
+
+m64_failed:
+ for (i = 0; i < num_vfs; i++)
+ pnv_ioda_free_pe(&iov->vf_pe_arr[i]);
+
+ return ret;
+}
+
+int pnv_pcibios_sriov_disable(struct pci_dev *pdev)
+{
+ pnv_pci_sriov_disable(pdev);
+
+ /* Release PCI data */
+ remove_sriov_vf_pdns(pdev);
+ return 0;
+}
+
+int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+{
+ /* Allocate PCI data */
+ add_sriov_vf_pdns(pdev);
+
+ return pnv_pci_sriov_enable(pdev, num_vfs);
+}
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 5bf818246339..233a50e65fce 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -18,7 +18,6 @@
#include <asm/sections.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
#include <asm/msi_bitmap.h>
@@ -160,75 +159,6 @@ exit:
}
EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
-int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct msi_desc *entry;
- struct msi_msg msg;
- int hwirq;
- unsigned int virq;
- int rc;
-
- if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
- return -ENODEV;
-
- if (pdev->no_64bit_msi && !phb->msi32_support)
- return -ENODEV;
-
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
- pr_warn("%s: Supports only 64-bit MSIs\n",
- pci_name(pdev));
- return -ENXIO;
- }
- hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
- if (hwirq < 0) {
- pr_warn("%s: Failed to find a free MSI\n",
- pci_name(pdev));
- return -ENOSPC;
- }
- virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
- if (!virq) {
- pr_warn("%s: Failed to map MSI to linux irq\n",
- pci_name(pdev));
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
- return -ENOMEM;
- }
- rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
- virq, entry->msi_attrib.is_64, &msg);
- if (rc) {
- pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
- irq_dispose_mapping(virq);
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
- return rc;
- }
- irq_set_msi_desc(virq, entry);
- pci_write_msi_msg(virq, &msg);
- }
- return 0;
-}
-
-void pnv_teardown_msi_irqs(struct pci_dev *pdev)
-{
- struct pci_controller *hose = pci_bus_to_host(pdev->bus);
- struct pnv_phb *phb = hose->private_data;
- struct msi_desc *entry;
- irq_hw_number_t hwirq;
-
- if (WARN_ON(!phb))
- return;
-
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->irq)
- continue;
- hwirq = virq_to_hw(entry->irq);
- irq_set_msi_desc(entry->irq, NULL);
- irq_dispose_mapping(entry->irq);
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
- }
-}
-
/* Nicely print the contents of the PE State Tables (PEST). */
static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
{
@@ -713,7 +643,7 @@ int pnv_pci_cfg_write(struct pci_dn *pdn,
return PCIBIOS_SUCCESSFUL;
}
-#if CONFIG_EEH
+#ifdef CONFIG_EEH
static bool pnv_pci_cfg_check(struct pci_dn *pdn)
{
struct eeh_dev *edev = NULL;
@@ -824,10 +754,9 @@ EXPORT_SYMBOL(pnv_pci_get_phb_node);
int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
{
- __be64 val;
- struct pci_controller *hose;
- struct pnv_phb *phb;
+ struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
u64 tunnel_bar;
+ __be64 val;
int rc;
if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
@@ -835,9 +764,6 @@ int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
return -ENXIO;
- hose = pci_bus_to_host(dev->bus);
- phb = hose->private_data;
-
mutex_lock(&tunnel_mutex);
rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
if (rc != OPAL_SUCCESS) {
@@ -888,7 +814,7 @@ void pnv_pci_shutdown(void)
/* Fixup wrong class code in p7ioc and p8 root complex */
static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
{
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
@@ -932,17 +858,6 @@ void __init pnv_pci_init(void)
for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
pnv_pci_init_ioda2_phb(np);
- /* Look for NPU PHBs */
- for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
- pnv_pci_init_npu_phb(np);
-
- /*
- * Look for NPU2 PHBs which we treat mostly as NPU PHBs with
- * the exception of TCE kill which requires an OPAL call.
- */
- for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
- pnv_pci_init_npu_phb(np);
-
/* Look for NPU2 OpenCAPI PHBs */
for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
pnv_pci_init_npu2_opencapi_phb(np);
@@ -955,28 +870,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
struct device *dev = data;
- struct pci_dev *pdev;
- struct pci_dn *pdn;
- struct pnv_ioda_pe *pe;
- struct pci_controller *hose;
- struct pnv_phb *phb;
switch (action) {
- case BUS_NOTIFY_ADD_DEVICE:
- pdev = to_pci_dev(dev);
- pdn = pci_get_pdn(pdev);
- hose = pci_bus_to_host(pdev->bus);
- phb = hose->private_data;
-
- WARN_ON_ONCE(!phb);
- if (!pdn || pdn->pe_number == IODA_INVALID_PE || !phb)
- return 0;
-
- pe = &phb->ioda.pe_array[pdn->pe_number];
- if (!pe->table_group.group)
- return 0;
- iommu_add_device(&pe->table_group, dev);
- return 0;
case BUS_NOTIFY_DEL_DEVICE:
iommu_del_device(dev);
return 0;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index d3bbdeab3a32..f12643958b8d 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -10,10 +10,9 @@
struct pci_dn;
enum pnv_phb_type {
- PNV_PHB_IODA1 = 0,
- PNV_PHB_IODA2 = 1,
- PNV_PHB_NPU_NVLINK = 2,
- PNV_PHB_NPU_OCAPI = 3,
+ PNV_PHB_IODA1,
+ PNV_PHB_IODA2,
+ PNV_PHB_NPU_OCAPI,
};
/* Precise PHB model for error management */
@@ -21,8 +20,6 @@ enum pnv_phb_model {
PNV_PHB_MODEL_UNKNOWN,
PNV_PHB_MODEL_P7IOC,
PNV_PHB_MODEL_PHB3,
- PNV_PHB_MODEL_NPU,
- PNV_PHB_MODEL_NPU2,
};
#define PNV_PCI_DIAG_BUF_SIZE 8192
@@ -33,6 +30,24 @@ enum pnv_phb_model {
#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
#define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
+/*
+ * A brief note on PNV_IODA_PE_BUS_ALL
+ *
+ * This is needed because of the behaviour of PCIe-to-PCI bridges. The PHB uses
+ * the Requester ID field of the PCIe request header to determine the device
+ * (and PE) that initiated a DMA. In legacy PCI individual memory read/write
+ * requests aren't tagged with the RID. To work around this the PCIe-to-PCI
+ * bridge will use (secondary_bus_no << 8) | 0x00 as the RID on the PCIe side.
+ *
+ * PCIe-to-X bridges have a similar issue even though PCI-X requests also have
+ * a RID in the transaction header. The PCIe-to-X bridge is permitted to "take
+ * ownership" of a transaction by a PCI-X device when forwarding it to the PCIe
+ * side of the bridge.
+ *
+ * To work around these problems we use the BUS_ALL flag since every subordinate
+ * bus of the bridge should go into the same PE.
+ */
+
/* Indicates operations are frozen for a PE: MMIO in PESTA & DMA in PESTB. */
#define PNV_IODA_STOPPED_STATE 0x8000000000000000
@@ -63,13 +78,19 @@ struct pnv_ioda_pe {
/* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
struct iommu_table_group table_group;
- struct npu_comp *npucomp;
/* 64-bit TCE bypass region */
bool tce_bypass_enabled;
uint64_t tce_bypass_base;
- /* MSIs. MVE index is identical for for 32 and 64 bit MSI
+ /*
+ * Used to track whether we've done DMA setup for this PE or not. We
+ * want to defer allocating TCE tables, etc until we've added a
+ * non-bridge device to the PE.
+ */
+ bool dma_setup_done;
+
+ /* MSIs. MVE index is identical for 32 and 64 bit MSI
* and -1 if not supported. (It's actually identical to the
* PE number)
*/
@@ -94,7 +115,6 @@ struct pnv_phb {
int flags;
void __iomem *regs;
u64 regs_phys;
- int initialized;
spinlock_t lock;
#ifdef CONFIG_DEBUG_FS
@@ -103,11 +123,7 @@ struct pnv_phb {
#endif
unsigned int msi_base;
- unsigned int msi32_support;
struct msi_bitmap msi_bmp;
- int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int virq,
- unsigned int is_64, struct msi_msg *msg);
int (*init_m64)(struct pnv_phb *phb);
int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
@@ -118,7 +134,6 @@ struct pnv_phb {
unsigned int total_pe_num;
unsigned int reserved_pe_idx;
unsigned int root_pe_idx;
- bool root_pe_populated;
/* 32-bit MMIO window */
unsigned int m32_size;
@@ -130,6 +145,7 @@ struct pnv_phb {
unsigned long m64_size;
unsigned long m64_segsize;
unsigned long m64_base;
+#define MAX_M64_BARS 64
unsigned long m64_bar_alloc;
/* IO ports */
@@ -170,6 +186,89 @@ struct pnv_phb {
u8 *diag_data;
};
+
+/* IODA PE management */
+
+static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
+{
+ /*
+ * WARNING: We cannot rely on the resource flags. The Linux PCI
+ * allocation code sometimes decides to put a 64-bit prefetchable
+ * BAR in the 32-bit window, so we have to compare the addresses.
+ *
+ * For simplicity we only test resource start.
+ */
+ return (r->start >= phb->ioda.m64_base &&
+ r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
+}
+
+static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
+{
+ unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
+
+ return (resource_flags & flags) == flags;
+}
+
+int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe);
+int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe);
+
+void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe);
+void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe);
+
+struct pnv_ioda_pe *pnv_ioda_alloc_pe(struct pnv_phb *phb, int count);
+void pnv_ioda_free_pe(struct pnv_ioda_pe *pe);
+
+#ifdef CONFIG_PCI_IOV
+/*
+ * For SR-IOV we want to put each VF's MMIO resource in to a separate PE.
+ * This requires a bit of acrobatics with the MMIO -> PE configuration
+ * and this structure is used to keep track of it all.
+ */
+struct pnv_iov_data {
+ /* number of VFs enabled */
+ u16 num_vfs;
+
+ /* pointer to the array of VF PEs. num_vfs long*/
+ struct pnv_ioda_pe *vf_pe_arr;
+
+ /* Did we map the VF BAR with single-PE IODA BARs? */
+ bool m64_single_mode[PCI_SRIOV_NUM_BARS];
+
+ /*
+ * True if we're using any segmented windows. In that case we need
+ * shift the start of the IOV resource the segment corresponding to
+ * the allocated PE.
+ */
+ bool need_shift;
+
+ /*
+ * Bit mask used to track which m64 windows are used to map the
+ * SR-IOV BARs for this device.
+ */
+ DECLARE_BITMAP(used_m64_bar_mask, MAX_M64_BARS);
+
+ /*
+ * If we map the SR-IOV BARs with a segmented window then
+ * parts of that window will be "claimed" by other PEs.
+ *
+ * "holes" here is used to reserve the leading portion
+ * of the window that is used by other (non VF) PEs.
+ */
+ struct resource holes[PCI_SRIOV_NUM_BARS];
+};
+
+static inline struct pnv_iov_data *pnv_iov_get(struct pci_dev *pdev)
+{
+ return pdev->dev.archdata.iov_data;
+}
+
+void pnv_pci_ioda_fixup_iov(struct pci_dev *pdev);
+resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev, int resno);
+
+int pnv_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs);
+int pnv_pcibios_sriov_disable(struct pci_dev *pdev);
+#endif /* CONFIG_PCI_IOV */
+
extern struct pci_ops pnv_pci_ops;
void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
@@ -182,14 +281,11 @@ extern struct iommu_table *pnv_pci_table_alloc(int nid);
extern void pnv_pci_init_ioda_hub(struct device_node *np);
extern void pnv_pci_init_ioda2_phb(struct device_node *np);
-extern void pnv_pci_init_npu_phb(struct device_node *np);
extern void pnv_pci_init_npu2_opencapi_phb(struct device_node *np);
-extern void pnv_npu2_map_lpar(struct pnv_ioda_pe *gpe, unsigned long msr);
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
-extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
-extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
+extern struct pnv_ioda_pe *pnv_pci_bdfn_to_pe(struct pnv_phb *phb, u16 bdfn);
extern struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev);
extern void pnv_set_msi_irq_chip(struct pnv_phb *phb, unsigned int virq);
extern unsigned long pnv_pci_ioda2_get_table_size(__u32 page_shift,
@@ -206,15 +302,6 @@ extern void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
#define pe_info(pe, fmt, ...) \
pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
-/* Nvlink functions */
-extern void pnv_npu_try_dma_set_bypass(struct pci_dev *gpdev, bool bypass);
-extern void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_phb *phb, bool rm);
-extern struct pnv_ioda_pe *pnv_pci_npu_setup_iommu(struct pnv_ioda_pe *npe);
-extern struct iommu_table_group *pnv_try_setup_npu_table_group(
- struct pnv_ioda_pe *pe);
-extern struct iommu_table_group *pnv_npu_compound_attach(
- struct pnv_ioda_pe *pe);
-
/* pci-ioda-tce.c */
#define POWERNV_IOMMU_DEFAULT_LEVELS 2
#define POWERNV_IOMMU_MAX_LEVELS 5
@@ -224,8 +311,7 @@ extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long attrs);
extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
- unsigned long *hpa, enum dma_data_direction *direction,
- bool alloc);
+ unsigned long *hpa, enum dma_data_direction *direction);
extern __be64 *pnv_tce_useraddrptr(struct iommu_table *tbl, long index,
bool alloc);
extern unsigned long pnv_tce_get(struct iommu_table *tbl, long index);
@@ -244,4 +330,16 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
u64 dma_offset, unsigned int page_shift);
+extern unsigned long pnv_ioda_parse_tce_sizes(struct pnv_phb *phb);
+
+static inline struct pnv_phb *pci_bus_to_pnvhb(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ if (hose)
+ return hose->private_data;
+
+ return NULL;
+}
+
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 1aa51c4fa904..866efdc103fd 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -2,6 +2,13 @@
#ifndef _POWERNV_H
#define _POWERNV_H
+/*
+ * There's various hacks scattered throughout the generic powerpc arch code
+ * that needs to call into powernv platform stuff. The prototypes for those
+ * functions are in asm/powernv.h
+ */
+#include <asm/powernv.h>
+
#ifdef CONFIG_SMP
extern void pnv_smp_init(void);
#else
@@ -32,7 +39,9 @@ bool cpu_core_split_required(void);
struct memcons;
ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count);
-u32 memcons_get_size(struct memcons *mc);
-struct memcons *memcons_init(struct device_node *node, const char *mc_prop_name);
+u32 __init memcons_get_size(struct memcons *mc);
+struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name);
+
+void pnv_rng_init(void);
#endif /* _POWERNV_H */
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 8035caf6e297..196aa70fe043 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -17,33 +17,28 @@
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/smp.h>
+#include "powernv.h"
#define DARN_ERR 0xFFFFFFFFFFFFFFFFul
-struct powernv_rng {
+struct pnv_rng {
void __iomem *regs;
void __iomem *regs_real;
unsigned long mask;
};
-static DEFINE_PER_CPU(struct powernv_rng *, powernv_rng);
+static DEFINE_PER_CPU(struct pnv_rng *, pnv_rng);
-
-int powernv_hwrng_present(void)
-{
- struct powernv_rng *rng;
-
- rng = get_cpu_var(powernv_rng);
- put_cpu_var(rng);
- return rng != NULL;
-}
-
-static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val)
+static unsigned long rng_whiten(struct pnv_rng *rng, unsigned long val)
{
unsigned long parity;
/* Calculate the parity of the value */
- asm ("popcntd %0,%1" : "=r" (parity) : "r" (val));
+ asm (".machine push; \
+ .machine power7; \
+ popcntd %0,%1; \
+ .machine pop;"
+ : "=r" (parity) : "r" (val));
/* xor our value with the previous mask */
val ^= rng->mask;
@@ -54,18 +49,7 @@ static unsigned long rng_whiten(struct powernv_rng *rng, unsigned long val)
return val;
}
-int powernv_get_random_real_mode(unsigned long *v)
-{
- struct powernv_rng *rng;
-
- rng = raw_cpu_read(powernv_rng);
-
- *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real));
-
- return 1;
-}
-
-int powernv_get_random_darn(unsigned long *v)
+static int pnv_get_random_darn(unsigned long *v)
{
unsigned long val;
@@ -80,7 +64,7 @@ int powernv_get_random_darn(unsigned long *v)
return 1;
}
-static int initialise_darn(void)
+static int __init initialise_darn(void)
{
unsigned long val;
int i;
@@ -89,32 +73,31 @@ static int initialise_darn(void)
return -ENODEV;
for (i = 0; i < 10; i++) {
- if (powernv_get_random_darn(&val)) {
- ppc_md.get_random_seed = powernv_get_random_darn;
+ if (pnv_get_random_darn(&val)) {
+ ppc_md.get_random_seed = pnv_get_random_darn;
return 0;
}
}
-
- pr_warn("Unable to use DARN for get_random_seed()\n");
-
return -EIO;
}
-int powernv_get_random_long(unsigned long *v)
+int pnv_get_random_long(unsigned long *v)
{
- struct powernv_rng *rng;
-
- rng = get_cpu_var(powernv_rng);
-
- *v = rng_whiten(rng, in_be64(rng->regs));
-
- put_cpu_var(rng);
-
+ struct pnv_rng *rng;
+
+ if (mfmsr() & MSR_DR) {
+ rng = get_cpu_var(pnv_rng);
+ *v = rng_whiten(rng, in_be64(rng->regs));
+ put_cpu_var(rng);
+ } else {
+ rng = raw_cpu_read(pnv_rng);
+ *v = rng_whiten(rng, __raw_rm_readq(rng->regs_real));
+ }
return 1;
}
-EXPORT_SYMBOL_GPL(powernv_get_random_long);
+EXPORT_SYMBOL_GPL(pnv_get_random_long);
-static __init void rng_init_per_cpu(struct powernv_rng *rng,
+static __init void rng_init_per_cpu(struct pnv_rng *rng,
struct device_node *dn)
{
int chip_id, cpu;
@@ -124,16 +107,16 @@ static __init void rng_init_per_cpu(struct powernv_rng *rng,
pr_warn("No ibm,chip-id found for %pOF.\n", dn);
for_each_possible_cpu(cpu) {
- if (per_cpu(powernv_rng, cpu) == NULL ||
+ if (per_cpu(pnv_rng, cpu) == NULL ||
cpu_to_chip_id(cpu) == chip_id) {
- per_cpu(powernv_rng, cpu) = rng;
+ per_cpu(pnv_rng, cpu) = rng;
}
}
}
static __init int rng_create(struct device_node *dn)
{
- struct powernv_rng *rng;
+ struct pnv_rng *rng;
struct resource res;
unsigned long val;
@@ -159,32 +142,59 @@ static __init int rng_create(struct device_node *dn)
rng_init_per_cpu(rng, dn);
- pr_info_once("Registering arch random hook.\n");
-
- ppc_md.get_random_seed = powernv_get_random_long;
+ ppc_md.get_random_seed = pnv_get_random_long;
return 0;
}
-static __init int rng_init(void)
+static int __init pnv_get_random_long_early(unsigned long *v)
{
struct device_node *dn;
- int rc;
-
- for_each_compatible_node(dn, NULL, "ibm,power-rng") {
- rc = rng_create(dn);
- if (rc) {
- pr_err("Failed creating rng for %pOF (%d).\n",
- dn, rc);
- continue;
- }
- /* Create devices for hwrng driver */
- of_platform_device_create(dn, NULL, NULL);
- }
+ if (!slab_is_available())
+ return 0;
+
+ if (cmpxchg(&ppc_md.get_random_seed, pnv_get_random_long_early,
+ NULL) != pnv_get_random_long_early)
+ return 0;
+
+ for_each_compatible_node(dn, NULL, "ibm,power-rng")
+ rng_create(dn);
+
+ if (!ppc_md.get_random_seed)
+ return 0;
+ return ppc_md.get_random_seed(v);
+}
+
+void __init pnv_rng_init(void)
+{
+ struct device_node *dn;
+
+ /* Prefer darn over the rest. */
+ if (!initialise_darn())
+ return;
+
+ dn = of_find_compatible_node(NULL, NULL, "ibm,power-rng");
+ if (dn)
+ ppc_md.get_random_seed = pnv_get_random_long_early;
+
+ of_node_put(dn);
+}
- initialise_darn();
+static int __init pnv_rng_late_init(void)
+{
+ struct device_node *dn;
+ unsigned long v;
+
+ /* In case it wasn't called during init for some other reason. */
+ if (ppc_md.get_random_seed == pnv_get_random_long_early)
+ pnv_get_random_long_early(&v);
+
+ if (ppc_md.get_random_seed == pnv_get_random_long) {
+ for_each_compatible_node(dn, NULL, "ibm,power-rng")
+ of_platform_device_create(dn, NULL, NULL);
+ }
return 0;
}
-machine_subsys_initcall(powernv, rng_init);
+machine_subsys_initcall(powernv, pnv_rng_late_init);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 11fdae81b5dd..61ab2d38ff4b 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -17,6 +17,7 @@
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/irq.h>
+#include <linux/seq_buf.h>
#include <linux/seq_file.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -40,7 +41,7 @@
#include "powernv.h"
-static bool fw_feature_is(const char *state, const char *name,
+static bool __init fw_feature_is(const char *state, const char *name,
struct device_node *fw_features)
{
struct device_node *np;
@@ -55,7 +56,7 @@ static bool fw_feature_is(const char *state, const char *name,
return rc;
}
-static void init_fw_feat_flags(struct device_node *np)
+static void __init init_fw_feat_flags(struct device_node *np)
{
if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
@@ -96,9 +97,18 @@ static void init_fw_feat_flags(struct device_node *np)
if (fw_feature_is("disabled", "needs-spec-barrier-for-bound-checks", np))
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
+
+ if (fw_feature_is("enabled", "no-need-l1d-flush-msr-pr-1-to-0", np))
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
+
+ if (fw_feature_is("enabled", "no-need-l1d-flush-kernel-on-user-access", np))
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
+
+ if (fw_feature_is("enabled", "no-need-store-drain-on-priv-state-switch", np))
+ security_ftr_clear(SEC_FTR_STF_BARRIER);
}
-static void pnv_setup_rfi_flush(void)
+static void __init pnv_setup_security_mitigations(void)
{
struct device_node *np, *fw_features;
enum l1d_flush_type type;
@@ -122,27 +132,68 @@ static void pnv_setup_rfi_flush(void)
type = L1D_FLUSH_ORI;
}
+ /*
+ * The issues addressed by the entry and uaccess flush don't affect P7
+ * or P8, so on bare metal disable them explicitly in case firmware does
+ * not include the features to disable them. POWER9 and newer processors
+ * should have the appropriate firmware flags.
+ */
+ if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p) ||
+ pvr_version_is(PVR_POWER8E) || pvr_version_is(PVR_POWER8NVL) ||
+ pvr_version_is(PVR_POWER8)) {
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
+ }
+
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
(security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
setup_rfi_flush(type, enable);
setup_count_cache_flush();
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
+ setup_entry_flush(enable);
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
+ setup_uaccess_flush(enable);
+
+ setup_stf_barrier();
+}
+
+static void __init pnv_check_guarded_cores(void)
+{
+ struct device_node *dn;
+ int bad_count = 0;
+
+ for_each_node_by_type(dn, "cpu") {
+ if (of_property_match_string(dn, "status", "bad") >= 0)
+ bad_count++;
+ }
+
+ if (bad_count) {
+ printk(" _ _______________\n");
+ pr_cont(" | | / \\\n");
+ pr_cont(" | | | WARNING! |\n");
+ pr_cont(" | | | |\n");
+ pr_cont(" | | | It looks like |\n");
+ pr_cont(" |_| | you have %*d |\n", 3, bad_count);
+ pr_cont(" _ | guarded cores |\n");
+ pr_cont(" (_) \\_______________/\n");
+ }
}
static void __init pnv_setup_arch(void)
{
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
- pnv_setup_rfi_flush();
- setup_stf_barrier();
+ pnv_setup_security_mitigations();
/* Initialize SMP */
pnv_smp_init();
- /* Setup PCI */
- pnv_pci_init();
-
/* Setup RTC and NVRAM callbacks */
if (firmware_has_feature(FW_FEATURE_OPAL))
opal_nvram_init();
@@ -150,11 +201,36 @@ static void __init pnv_setup_arch(void)
/* Enable NAP mode */
powersave_nap = 1;
+ pnv_check_guarded_cores();
+
/* XXX PMCS */
+
+ pnv_rng_init();
+}
+
+static void __init pnv_add_hw_description(void)
+{
+ struct device_node *dn;
+ const char *s;
+
+ dn = of_find_node_by_path("/ibm,opal/firmware");
+ if (!dn)
+ return;
+
+ if (of_property_read_string(dn, "version", &s) == 0 ||
+ of_property_read_string(dn, "git-id", &s) == 0)
+ seq_buf_printf(&ppc_hw_desc, "opal:%s ", s);
+
+ if (of_property_read_string(dn, "mi-version", &s) == 0)
+ seq_buf_printf(&ppc_hw_desc, "mi:%s ", s);
+
+ of_node_put(dn);
}
static void __init pnv_init(void)
{
+ pnv_add_hw_description();
+
/*
* Initialize the LPC bus now so that legacy serial
* ports can be found on it
@@ -168,13 +244,20 @@ static void __init pnv_init(void)
#endif
add_preferred_console("hvc", 0, NULL);
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (!radix_enabled()) {
+ size_t size = sizeof(struct slb_entry) * mmu_slb_size;
int i;
/* Allocate per cpu area to save old slb contents during MCE */
- for_each_possible_cpu(i)
- paca_ptrs[i]->mce_faulty_slbs = memblock_alloc_node(mmu_slb_size, __alignof__(*paca_ptrs[i]->mce_faulty_slbs), cpu_to_node(i));
+ for_each_possible_cpu(i) {
+ paca_ptrs[i]->mce_faulty_slbs =
+ memblock_alloc_node(size,
+ __alignof__(struct slb_entry),
+ cpu_to_node(i));
+ }
}
+#endif
}
static void __init pnv_init_IRQ(void)
@@ -229,7 +312,7 @@ static void __noreturn pnv_restart(char *cmd)
pnv_prepare_going_down();
do {
- if (!cmd)
+ if (!cmd || !strlen(cmd))
rc = opal_cec_reboot();
else if (strcmp(cmd, "full") == 0)
rc = opal_cec_reboot2(OPAL_REBOOT_FULL_IPL, NULL);
@@ -237,6 +320,8 @@ static void __noreturn pnv_restart(char *cmd)
rc = opal_cec_reboot2(OPAL_REBOOT_MPIPL, NULL);
else if (strcmp(cmd, "error") == 0)
rc = opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, NULL);
+ else if (strcmp(cmd, "fast") == 0)
+ rc = opal_cec_reboot2(OPAL_REBOOT_FAST, NULL);
else
rc = OPAL_UNSUPPORTED;
@@ -394,10 +479,18 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
}
#endif /* CONFIG_KEXEC_CORE */
-#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifdef CONFIG_MEMORY_HOTPLUG
static unsigned long pnv_memory_block_size(void)
{
- return 256UL * 1024 * 1024;
+ /*
+ * We map the kernel linear region with 1GB large pages on radix. For
+ * memory hot unplug to work our memory block size must be at least
+ * this size.
+ */
+ if (radix_enabled())
+ return radix_mem_block_size;
+ else
+ return 256UL * 1024 * 1024;
}
#endif
@@ -490,6 +583,7 @@ define_machine(powernv) {
.init_IRQ = pnv_init_IRQ,
.show_cpuinfo = pnv_show_cpuinfo,
.get_proc_freq = pnv_get_proc_freq,
+ .discover_phbs = pnv_pci_init,
.progress = pnv_progress,
.machine_shutdown = pnv_shutdown,
.power_save = NULL,
@@ -498,7 +592,7 @@ define_machine(powernv) {
#ifdef CONFIG_KEXEC_CORE
.kexec_cpu_down = pnv_kexec_cpu_down,
#endif
-#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifdef CONFIG_MEMORY_HOTPLUG
.memory_block_size = pnv_memory_block_size,
#endif
};
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 13e251699346..9e1a25398f98 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -43,7 +43,7 @@
#include <asm/udbg.h>
#define DBG(fmt...) udbg_printf(fmt)
#else
-#define DBG(fmt...)
+#define DBG(fmt...) do { } while (0)
#endif
static void pnv_smp_setup_cpu(int cpu)
@@ -143,6 +143,9 @@ static int pnv_smp_cpu_disable(void)
xive_smp_disable_cpu();
else
xics_migrate_irqs_away();
+
+ cleanup_cpu_mmu_context();
+
return 0;
}
@@ -158,7 +161,7 @@ static void pnv_flush_interrupts(void)
}
}
-static void pnv_smp_cpu_kill_self(void)
+static void pnv_cpu_offline_self(void)
{
unsigned long srr1, unexpected_mask, wmask;
unsigned int cpu;
@@ -167,7 +170,6 @@ static void pnv_smp_cpu_kill_self(void)
/* Standard hot unplug procedure */
idle_task_exit();
- current->active_mm = NULL; /* for sanity */
cpu = smp_processor_id();
DBG("CPU%d offline\n", cpu);
generic_set_cpu_dead(cpu);
@@ -343,7 +345,7 @@ static void __init pnv_smp_probe(void)
}
}
-static int pnv_system_reset_exception(struct pt_regs *regs)
+noinstr static int pnv_system_reset_exception(struct pt_regs *regs)
{
if (smp_handle_nmi_ipi(regs))
return 1;
@@ -418,6 +420,7 @@ static struct smp_ops_t pnv_smp_ops = {
#ifdef CONFIG_HOTPLUG_CPU
.cpu_disable = pnv_smp_cpu_disable,
.cpu_die = generic_cpu_die,
+ .cpu_offline_self = pnv_cpu_offline_self,
#endif /* CONFIG_HOTPLUG_CPU */
};
@@ -431,7 +434,6 @@ void __init pnv_smp_init(void)
smp_ops = &pnv_smp_ops;
#ifdef CONFIG_HOTPLUG_CPU
- ppc_md.cpu_die = pnv_smp_cpu_kill_self;
#ifdef CONFIG_KEXEC_CORE
crash_wake_offline = 1;
#endif
diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
index 73207b53dc2b..7e98b00ea2e8 100644
--- a/arch/powerpc/platforms/powernv/subcore.c
+++ b/arch/powerpc/platforms/powernv/subcore.c
@@ -169,6 +169,16 @@ static void update_hid_in_slw(u64 hid0)
}
}
+static inline void update_power8_hid0(unsigned long hid0)
+{
+ /*
+ * The HID0 update on Power8 should at the very least be
+ * preceded by a SYNC instruction followed by an ISYNC
+ * instruction
+ */
+ asm volatile("sync; mtspr %0,%1; isync":: "i"(SPRN_HID0), "r"(hid0));
+}
+
static void unsplit_core(void)
{
u64 hid0, mask;
diff --git a/arch/powerpc/platforms/powernv/subcore.h b/arch/powerpc/platforms/powernv/subcore.h
index c8f574d1c04a..77feee8436d4 100644
--- a/arch/powerpc/platforms/powernv/subcore.h
+++ b/arch/powerpc/platforms/powernv/subcore.h
@@ -15,7 +15,7 @@
void split_core_secondary_loop(u8 *state);
extern void update_subcore_sibling_mask(void);
#else
-static inline void update_subcore_sibling_mask(void) { };
+static inline void update_subcore_sibling_mask(void) { }
#endif /* CONFIG_SMP */
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/platforms/powernv/ultravisor.c b/arch/powerpc/platforms/powernv/ultravisor.c
index e4a00ad06f9d..67c8c4b2d8b1 100644
--- a/arch/powerpc/platforms/powernv/ultravisor.c
+++ b/arch/powerpc/platforms/powernv/ultravisor.c
@@ -55,6 +55,7 @@ static int __init uv_init(void)
return -ENODEV;
uv_memcons = memcons_init(node, "memcons");
+ of_node_put(node);
if (!uv_memcons)
return -ENOENT;
diff --git a/arch/powerpc/platforms/powernv/vas-debug.c b/arch/powerpc/platforms/powernv/vas-debug.c
index 09e63df53c30..3ce89a4b54be 100644
--- a/arch/powerpc/platforms/powernv/vas-debug.c
+++ b/arch/powerpc/platforms/powernv/vas-debug.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <asm/vas.h>
#include "vas.h"
static struct dentry *vas_debugfs;
@@ -28,7 +29,7 @@ static char *cop_to_str(int cop)
static int info_show(struct seq_file *s, void *private)
{
- struct vas_window *window = s->private;
+ struct pnv_vas_window *window = s->private;
mutex_lock(&vas_mutex);
@@ -36,9 +37,9 @@ static int info_show(struct seq_file *s, void *private)
if (!window->hvwc_map)
goto unlock;
- seq_printf(s, "Type: %s, %s\n", cop_to_str(window->cop),
+ seq_printf(s, "Type: %s, %s\n", cop_to_str(window->vas_win.cop),
window->tx_win ? "Send" : "Receive");
- seq_printf(s, "Pid : %d\n", window->pid);
+ seq_printf(s, "Pid : %d\n", vas_window_pid(&window->vas_win));
unlock:
mutex_unlock(&vas_mutex);
@@ -47,7 +48,7 @@ unlock:
DEFINE_SHOW_ATTRIBUTE(info);
-static inline void print_reg(struct seq_file *s, struct vas_window *win,
+static inline void print_reg(struct seq_file *s, struct pnv_vas_window *win,
char *name, u32 reg)
{
seq_printf(s, "0x%016llx %s\n", read_hvwc_reg(win, name, reg), name);
@@ -55,7 +56,7 @@ static inline void print_reg(struct seq_file *s, struct vas_window *win,
static int hvwc_show(struct seq_file *s, void *private)
{
- struct vas_window *window = s->private;
+ struct pnv_vas_window *window = s->private;
mutex_lock(&vas_mutex);
@@ -103,8 +104,10 @@ unlock:
DEFINE_SHOW_ATTRIBUTE(hvwc);
-void vas_window_free_dbgdir(struct vas_window *window)
+void vas_window_free_dbgdir(struct pnv_vas_window *pnv_win)
{
+ struct vas_window *window = &pnv_win->vas_win;
+
if (window->dbgdir) {
debugfs_remove_recursive(window->dbgdir);
kfree(window->dbgname);
@@ -113,42 +116,24 @@ void vas_window_free_dbgdir(struct vas_window *window)
}
}
-void vas_window_init_dbgdir(struct vas_window *window)
+void vas_window_init_dbgdir(struct pnv_vas_window *window)
{
- struct dentry *f, *d;
+ struct dentry *d;
if (!window->vinst->dbgdir)
return;
- window->dbgname = kzalloc(16, GFP_KERNEL);
- if (!window->dbgname)
+ window->vas_win.dbgname = kzalloc(16, GFP_KERNEL);
+ if (!window->vas_win.dbgname)
return;
- snprintf(window->dbgname, 16, "w%d", window->winid);
-
- d = debugfs_create_dir(window->dbgname, window->vinst->dbgdir);
- if (IS_ERR(d))
- goto free_name;
-
- window->dbgdir = d;
-
- f = debugfs_create_file("info", 0444, d, window, &info_fops);
- if (IS_ERR(f))
- goto remove_dir;
+ snprintf(window->vas_win.dbgname, 16, "w%d", window->vas_win.winid);
- f = debugfs_create_file("hvwc", 0444, d, window, &hvwc_fops);
- if (IS_ERR(f))
- goto remove_dir;
+ d = debugfs_create_dir(window->vas_win.dbgname, window->vinst->dbgdir);
+ window->vas_win.dbgdir = d;
- return;
-
-remove_dir:
- debugfs_remove_recursive(window->dbgdir);
- window->dbgdir = NULL;
-
-free_name:
- kfree(window->dbgname);
- window->dbgname = NULL;
+ debugfs_create_file("info", 0444, d, window, &info_fops);
+ debugfs_create_file("hvwc", 0444, d, window, &hvwc_fops);
}
void vas_instance_init_dbgdir(struct vas_instance *vinst)
@@ -156,8 +141,6 @@ void vas_instance_init_dbgdir(struct vas_instance *vinst)
struct dentry *d;
vas_init_dbgdir();
- if (!vas_debugfs)
- return;
vinst->dbgname = kzalloc(16, GFP_KERNEL);
if (!vinst->dbgname)
@@ -166,16 +149,7 @@ void vas_instance_init_dbgdir(struct vas_instance *vinst)
snprintf(vinst->dbgname, 16, "v%d", vinst->vas_id);
d = debugfs_create_dir(vinst->dbgname, vas_debugfs);
- if (IS_ERR(d))
- goto free_name;
-
vinst->dbgdir = d;
- return;
-
-free_name:
- kfree(vinst->dbgname);
- vinst->dbgname = NULL;
- vinst->dbgdir = NULL;
}
/*
@@ -191,6 +165,4 @@ void vas_init_dbgdir(void)
first_time = false;
vas_debugfs = debugfs_create_dir("vas", NULL);
- if (IS_ERR(vas_debugfs))
- vas_debugfs = NULL;
}
diff --git a/arch/powerpc/platforms/powernv/vas-fault.c b/arch/powerpc/platforms/powernv/vas-fault.c
new file mode 100644
index 000000000000..2b47d5a86328
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/vas-fault.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VAS Fault handling.
+ * Copyright 2019, IBM Corporation
+ */
+
+#define pr_fmt(fmt) "vas: " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/mmu_context.h>
+#include <asm/icswx.h>
+
+#include "vas.h"
+
+/*
+ * The maximum FIFO size for fault window can be 8MB
+ * (VAS_RX_FIFO_SIZE_MAX). Using 4MB FIFO since each VAS
+ * instance will be having fault window.
+ * 8MB FIFO can be used if expects more faults for each VAS
+ * instance.
+ */
+#define VAS_FAULT_WIN_FIFO_SIZE (4 << 20)
+
+static void dump_fifo(struct vas_instance *vinst, void *entry)
+{
+ unsigned long *end = vinst->fault_fifo + vinst->fault_fifo_size;
+ unsigned long *fifo = entry;
+ int i;
+
+ pr_err("Fault fifo size %d, Max crbs %d\n", vinst->fault_fifo_size,
+ vinst->fault_fifo_size / CRB_SIZE);
+
+ /* Dump 10 CRB entries or until end of FIFO */
+ pr_err("Fault FIFO Dump:\n");
+ for (i = 0; i < 10*(CRB_SIZE/8) && fifo < end; i += 4, fifo += 4) {
+ pr_err("[%.3d, %p]: 0x%.16lx 0x%.16lx 0x%.16lx 0x%.16lx\n",
+ i, fifo, *fifo, *(fifo+1), *(fifo+2), *(fifo+3));
+ }
+}
+
+/*
+ * Process valid CRBs in fault FIFO.
+ * NX process user space requests, return credit and update the status
+ * in CRB. If it encounters transalation error when accessing CRB or
+ * request buffers, raises interrupt on the CPU to handle the fault.
+ * It takes credit on fault window, updates nx_fault_stamp in CRB with
+ * the following information and pastes CRB in fault FIFO.
+ *
+ * pswid - window ID of the window on which the request is sent.
+ * fault_storage_addr - fault address
+ *
+ * It can raise a single interrupt for multiple faults. Expects OS to
+ * process all valid faults and return credit for each fault on user
+ * space and fault windows. This fault FIFO control will be done with
+ * credit mechanism. NX can continuously paste CRBs until credits are not
+ * available on fault window. Otherwise, returns with RMA_reject.
+ *
+ * Total credits available on fault window: FIFO_SIZE(4MB)/CRBS_SIZE(128)
+ *
+ */
+irqreturn_t vas_fault_thread_fn(int irq, void *data)
+{
+ struct vas_instance *vinst = data;
+ struct coprocessor_request_block *crb, *entry;
+ struct coprocessor_request_block buf;
+ struct pnv_vas_window *window;
+ unsigned long flags;
+ void *fifo;
+
+ crb = &buf;
+
+ /*
+ * VAS can interrupt with multiple page faults. So process all
+ * valid CRBs within fault FIFO until reaches invalid CRB.
+ * We use CCW[0] and pswid to validate CRBs:
+ *
+ * CCW[0] Reserved bit. When NX pastes CRB, CCW[0]=0
+ * OS sets this bit to 1 after reading CRB.
+ * pswid NX assigns window ID. Set pswid to -1 after
+ * reading CRB from fault FIFO.
+ *
+ * We exit this function if no valid CRBs are available to process.
+ * So acquire fault_lock and reset fifo_in_progress to 0 before
+ * exit.
+ * In case kernel receives another interrupt with different page
+ * fault, interrupt handler returns with IRQ_HANDLED if
+ * fifo_in_progress is set. Means these new faults will be
+ * handled by the current thread. Otherwise set fifo_in_progress
+ * and return IRQ_WAKE_THREAD to wake up thread.
+ */
+ while (true) {
+ spin_lock_irqsave(&vinst->fault_lock, flags);
+ /*
+ * Advance the fault fifo pointer to next CRB.
+ * Use CRB_SIZE rather than sizeof(*crb) since the latter is
+ * aligned to CRB_ALIGN (256) but the CRB written to by VAS is
+ * only CRB_SIZE in len.
+ */
+ fifo = vinst->fault_fifo + (vinst->fault_crbs * CRB_SIZE);
+ entry = fifo;
+
+ if ((entry->stamp.nx.pswid == cpu_to_be32(FIFO_INVALID_ENTRY))
+ || (entry->ccw & cpu_to_be32(CCW0_INVALID))) {
+ vinst->fifo_in_progress = 0;
+ spin_unlock_irqrestore(&vinst->fault_lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&vinst->fault_lock, flags);
+ vinst->fault_crbs++;
+ if (vinst->fault_crbs == (vinst->fault_fifo_size / CRB_SIZE))
+ vinst->fault_crbs = 0;
+
+ memcpy(crb, fifo, CRB_SIZE);
+ entry->stamp.nx.pswid = cpu_to_be32(FIFO_INVALID_ENTRY);
+ entry->ccw |= cpu_to_be32(CCW0_INVALID);
+ /*
+ * Return credit for the fault window.
+ */
+ vas_return_credit(vinst->fault_win, false);
+
+ pr_devel("VAS[%d] fault_fifo %p, fifo %p, fault_crbs %d\n",
+ vinst->vas_id, vinst->fault_fifo, fifo,
+ vinst->fault_crbs);
+
+ vas_dump_crb(crb);
+ window = vas_pswid_to_window(vinst,
+ be32_to_cpu(crb->stamp.nx.pswid));
+
+ if (IS_ERR(window)) {
+ /*
+ * We got an interrupt about a specific send
+ * window but we can't find that window and we can't
+ * even clean it up (return credit on user space
+ * window).
+ * But we should not get here.
+ * TODO: Disable IRQ.
+ */
+ dump_fifo(vinst, (void *)entry);
+ pr_err("VAS[%d] fault_fifo %p, fifo %p, pswid 0x%x, fault_crbs %d bad CRB?\n",
+ vinst->vas_id, vinst->fault_fifo, fifo,
+ be32_to_cpu(crb->stamp.nx.pswid),
+ vinst->fault_crbs);
+
+ WARN_ON_ONCE(1);
+ } else {
+ /*
+ * NX sees faults only with user space windows.
+ */
+ if (window->user_win)
+ vas_update_csb(crb, &window->vas_win.task_ref);
+ else
+ WARN_ON_ONCE(!window->user_win);
+
+ /*
+ * Return credit for send window after processing
+ * fault CRB.
+ */
+ vas_return_credit(window, true);
+ }
+ }
+}
+
+irqreturn_t vas_fault_handler(int irq, void *dev_id)
+{
+ struct vas_instance *vinst = dev_id;
+ irqreturn_t ret = IRQ_WAKE_THREAD;
+ unsigned long flags;
+
+ /*
+ * NX can generate an interrupt for multiple faults. So the
+ * fault handler thread process all CRBs until finds invalid
+ * entry. In case if NX sees continuous faults, it is possible
+ * that the thread function entered with the first interrupt
+ * can execute and process all valid CRBs.
+ * So wake up thread only if the fault thread is not in progress.
+ */
+ spin_lock_irqsave(&vinst->fault_lock, flags);
+
+ if (vinst->fifo_in_progress)
+ ret = IRQ_HANDLED;
+ else
+ vinst->fifo_in_progress = 1;
+
+ spin_unlock_irqrestore(&vinst->fault_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Fault window is opened per VAS instance. NX pastes fault CRB in fault
+ * FIFO upon page faults.
+ */
+int vas_setup_fault_window(struct vas_instance *vinst)
+{
+ struct vas_rx_win_attr attr;
+ struct vas_window *win;
+
+ vinst->fault_fifo_size = VAS_FAULT_WIN_FIFO_SIZE;
+ vinst->fault_fifo = kzalloc(vinst->fault_fifo_size, GFP_KERNEL);
+ if (!vinst->fault_fifo) {
+ pr_err("Unable to alloc %d bytes for fault_fifo\n",
+ vinst->fault_fifo_size);
+ return -ENOMEM;
+ }
+
+ /*
+ * Invalidate all CRB entries. NX pastes valid entry for each fault.
+ */
+ memset(vinst->fault_fifo, FIFO_INVALID_ENTRY, vinst->fault_fifo_size);
+ vas_init_rx_win_attr(&attr, VAS_COP_TYPE_FAULT);
+
+ attr.rx_fifo_size = vinst->fault_fifo_size;
+ attr.rx_fifo = __pa(vinst->fault_fifo);
+
+ /*
+ * Max creds is based on number of CRBs can fit in the FIFO.
+ * (fault_fifo_size/CRB_SIZE). If 8MB FIFO is used, max creds
+ * will be 0xffff since the receive creds field is 16bits wide.
+ */
+ attr.wcreds_max = vinst->fault_fifo_size / CRB_SIZE;
+ attr.lnotify_lpid = 0;
+ attr.lnotify_pid = mfspr(SPRN_PID);
+ attr.lnotify_tid = mfspr(SPRN_PID);
+
+ win = vas_rx_win_open(vinst->vas_id, VAS_COP_TYPE_FAULT, &attr);
+ if (IS_ERR(win)) {
+ pr_err("VAS: Error %ld opening FaultWin\n", PTR_ERR(win));
+ kfree(vinst->fault_fifo);
+ return PTR_ERR(win);
+ }
+
+ vinst->fault_win = container_of(win, struct pnv_vas_window, vas_win);
+
+ pr_devel("VAS: Created FaultWin %d, LPID/PID/TID [%d/%d/%d]\n",
+ vinst->fault_win->vas_win.winid, attr.lnotify_lpid,
+ attr.lnotify_pid, attr.lnotify_tid);
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/powernv/vas-trace.h b/arch/powerpc/platforms/powernv/vas-trace.h
index a449b9f0c12e..ca2e08f2ddc0 100644
--- a/arch/powerpc/platforms/powernv/vas-trace.h
+++ b/arch/powerpc/platforms/powernv/vas-trace.h
@@ -80,7 +80,7 @@ TRACE_EVENT( vas_tx_win_open,
TRACE_EVENT( vas_paste_crb,
TP_PROTO(struct task_struct *tsk,
- struct vas_window *win),
+ struct pnv_vas_window *win),
TP_ARGS(tsk, win),
@@ -96,7 +96,7 @@ TRACE_EVENT( vas_paste_crb,
TP_fast_assign(
__entry->pid = tsk->pid;
__entry->vasid = win->vinst->vas_id;
- __entry->winid = win->winid;
+ __entry->winid = win->vas_win.winid;
__entry->paste_kaddr = (unsigned long)win->paste_kaddr
),
diff --git a/arch/powerpc/platforms/powernv/vas-window.c b/arch/powerpc/platforms/powernv/vas-window.c
index 0c0d27d17976..0072682531d8 100644
--- a/arch/powerpc/platforms/powernv/vas-window.c
+++ b/arch/powerpc/platforms/powernv/vas-window.c
@@ -12,8 +12,11 @@
#include <linux/log2.h>
#include <linux/rcupdate.h>
#include <linux/cred.h>
+#include <linux/sched/mm.h>
+#include <linux/mmu_context.h>
#include <asm/switch_to.h>
#include <asm/ppc-opcode.h>
+#include <asm/vas.h>
#include "vas.h"
#include "copy-paste.h"
@@ -24,14 +27,14 @@
* Compute the paste address region for the window @window using the
* ->paste_base_addr and ->paste_win_id_shift we got from device tree.
*/
-static void compute_paste_address(struct vas_window *window, u64 *addr, int *len)
+void vas_win_paste_addr(struct pnv_vas_window *window, u64 *addr, int *len)
{
int winid;
u64 base, shift;
base = window->vinst->paste_base_addr;
shift = window->vinst->paste_win_id_shift;
- winid = window->winid;
+ winid = window->vas_win.winid;
*addr = base + (winid << shift);
if (len)
@@ -40,23 +43,23 @@ static void compute_paste_address(struct vas_window *window, u64 *addr, int *len
pr_debug("Txwin #%d: Paste addr 0x%llx\n", winid, *addr);
}
-static inline void get_hvwc_mmio_bar(struct vas_window *window,
+static inline void get_hvwc_mmio_bar(struct pnv_vas_window *window,
u64 *start, int *len)
{
u64 pbaddr;
pbaddr = window->vinst->hvwc_bar_start;
- *start = pbaddr + window->winid * VAS_HVWC_SIZE;
+ *start = pbaddr + window->vas_win.winid * VAS_HVWC_SIZE;
*len = VAS_HVWC_SIZE;
}
-static inline void get_uwc_mmio_bar(struct vas_window *window,
+static inline void get_uwc_mmio_bar(struct pnv_vas_window *window,
u64 *start, int *len)
{
u64 pbaddr;
pbaddr = window->vinst->uwc_bar_start;
- *start = pbaddr + window->winid * VAS_UWC_SIZE;
+ *start = pbaddr + window->vas_win.winid * VAS_UWC_SIZE;
*len = VAS_UWC_SIZE;
}
@@ -65,7 +68,7 @@ static inline void get_uwc_mmio_bar(struct vas_window *window,
* space. Unlike MMIO regions (map_mmio_region() below), paste region must
* be mapped cache-able and is only applicable to send windows.
*/
-static void *map_paste_region(struct vas_window *txwin)
+static void *map_paste_region(struct pnv_vas_window *txwin)
{
int len;
void *map;
@@ -73,12 +76,12 @@ static void *map_paste_region(struct vas_window *txwin)
u64 start;
name = kasprintf(GFP_KERNEL, "window-v%d-w%d", txwin->vinst->vas_id,
- txwin->winid);
+ txwin->vas_win.winid);
if (!name)
goto free_name;
txwin->paste_addr_name = name;
- compute_paste_address(txwin, &start, &len);
+ vas_win_paste_addr(txwin, &start, &len);
if (!request_mem_region(start, len, name)) {
pr_devel("%s(): request_mem_region(0x%llx, %d) failed\n",
@@ -130,13 +133,13 @@ static void unmap_region(void *addr, u64 start, int len)
/*
* Unmap the paste address region for a window.
*/
-static void unmap_paste_region(struct vas_window *window)
+static void unmap_paste_region(struct pnv_vas_window *window)
{
int len;
u64 busaddr_start;
if (window->paste_kaddr) {
- compute_paste_address(window, &busaddr_start, &len);
+ vas_win_paste_addr(window, &busaddr_start, &len);
unmap_region(window->paste_kaddr, busaddr_start, len);
window->paste_kaddr = NULL;
kfree(window->paste_addr_name);
@@ -151,7 +154,7 @@ static void unmap_paste_region(struct vas_window *window)
* path, just minimize the time we hold the mutex for now. We can add
* a per-instance mutex later if necessary.
*/
-static void unmap_winctx_mmio_bars(struct vas_window *window)
+static void unmap_winctx_mmio_bars(struct pnv_vas_window *window)
{
int len;
void *uwc_map;
@@ -184,7 +187,7 @@ static void unmap_winctx_mmio_bars(struct vas_window *window)
* OS/User Window Context (UWC) MMIO Base Address Region for the given window.
* Map these bus addresses and save the mapped kernel addresses in @window.
*/
-int map_winctx_mmio_bars(struct vas_window *window)
+static int map_winctx_mmio_bars(struct pnv_vas_window *window)
{
int len;
u64 start;
@@ -212,7 +215,7 @@ int map_winctx_mmio_bars(struct vas_window *window)
* registers are not sequential. And, we can only write to offsets
* with valid registers.
*/
-void reset_window_regs(struct vas_window *window)
+static void reset_window_regs(struct pnv_vas_window *window)
{
write_hvwc_reg(window, VREG(LPID), 0ULL);
write_hvwc_reg(window, VREG(PID), 0ULL);
@@ -268,7 +271,7 @@ void reset_window_regs(struct vas_window *window)
* want to add fields to vas_winctx and move the initialization to
* init_vas_winctx_regs().
*/
-static void init_xlate_regs(struct vas_window *window, bool user_win)
+static void init_xlate_regs(struct pnv_vas_window *window, bool user_win)
{
u64 lpcr, val;
@@ -333,7 +336,7 @@ static void init_xlate_regs(struct vas_window *window, bool user_win)
*
* TODO: Reserved (aka dedicated) send buffers are not supported yet.
*/
-static void init_rsvd_tx_buf_count(struct vas_window *txwin,
+static void init_rsvd_tx_buf_count(struct pnv_vas_window *txwin,
struct vas_winctx *winctx)
{
write_hvwc_reg(txwin, VREG(TX_RSVD_BUF_COUNT), 0ULL);
@@ -355,7 +358,8 @@ static void init_rsvd_tx_buf_count(struct vas_window *txwin,
* as a one-time task? That could work for NX but what about other
* receivers? Let the receivers tell us the rx-fifo buffers for now.
*/
-int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
+static void init_winctx_regs(struct pnv_vas_window *window,
+ struct vas_winctx *winctx)
{
u64 val;
int fifo_size;
@@ -373,7 +377,7 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
init_xlate_regs(window, winctx->user_win);
val = 0ULL;
- val = SET_FIELD(VAS_FAULT_TX_WIN, val, 0);
+ val = SET_FIELD(VAS_FAULT_TX_WIN, val, winctx->fault_win_id);
write_hvwc_reg(window, VREG(FAULT_TX_WIN), val);
/* In PowerNV, interrupts go to HV. */
@@ -400,7 +404,7 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
*
* See also: Design note in function header.
*/
- val = __pa(winctx->rx_fifo);
+ val = winctx->rx_fifo;
val = SET_FIELD(VAS_PAGE_MIGRATION_SELECT, val, 0);
write_hvwc_reg(window, VREG(LFIFO_BAR), val);
@@ -497,8 +501,6 @@ int init_winctx_regs(struct vas_window *window, struct vas_winctx *winctx)
val = SET_FIELD(VAS_WINCTL_NX_WIN, val, winctx->nx_win);
val = SET_FIELD(VAS_WINCTL_OPEN, val, 1);
write_hvwc_reg(window, VREG(WINCTL), val);
-
- return 0;
}
static void vas_release_window_id(struct ida *ida, int winid)
@@ -518,10 +520,10 @@ static int vas_assign_window_id(struct ida *ida)
return winid;
}
-static void vas_window_free(struct vas_window *window)
+static void vas_window_free(struct pnv_vas_window *window)
{
- int winid = window->winid;
struct vas_instance *vinst = window->vinst;
+ int winid = window->vas_win.winid;
unmap_winctx_mmio_bars(window);
@@ -532,10 +534,10 @@ static void vas_window_free(struct vas_window *window)
vas_release_window_id(&vinst->ida, winid);
}
-static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
+static struct pnv_vas_window *vas_window_alloc(struct vas_instance *vinst)
{
int winid;
- struct vas_window *window;
+ struct pnv_vas_window *window;
winid = vas_assign_window_id(&vinst->ida);
if (winid < 0)
@@ -546,7 +548,7 @@ static struct vas_window *vas_window_alloc(struct vas_instance *vinst)
goto out_free;
window->vinst = vinst;
- window->winid = winid;
+ window->vas_win.winid = winid;
if (map_winctx_mmio_bars(window))
goto out_free;
@@ -561,7 +563,7 @@ out_free:
return ERR_PTR(-ENOMEM);
}
-static void put_rx_win(struct vas_window *rxwin)
+static void put_rx_win(struct pnv_vas_window *rxwin)
{
/* Better not be a send window! */
WARN_ON_ONCE(rxwin->tx_win);
@@ -577,10 +579,11 @@ static void put_rx_win(struct vas_window *rxwin)
*
* NOTE: We access ->windows[] table and assume that vinst->mutex is held.
*/
-static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
+static struct pnv_vas_window *get_user_rxwin(struct vas_instance *vinst,
+ u32 pswid)
{
int vasid, winid;
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
decode_pswid(pswid, &vasid, &winid);
@@ -589,7 +592,7 @@ static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
rxwin = vinst->windows[winid];
- if (!rxwin || rxwin->tx_win || rxwin->cop != VAS_COP_TYPE_FTW)
+ if (!rxwin || rxwin->tx_win || rxwin->vas_win.cop != VAS_COP_TYPE_FTW)
return ERR_PTR(-EINVAL);
return rxwin;
@@ -601,10 +604,10 @@ static struct vas_window *get_user_rxwin(struct vas_instance *vinst, u32 pswid)
*
* See also function header of set_vinst_win().
*/
-static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
+static struct pnv_vas_window *get_vinst_rxwin(struct vas_instance *vinst,
enum vas_cop_type cop, u32 pswid)
{
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
mutex_lock(&vinst->mutex);
@@ -637,9 +640,9 @@ static struct vas_window *get_vinst_rxwin(struct vas_instance *vinst,
* window, we also save the window in the ->rxwin[] table.
*/
static void set_vinst_win(struct vas_instance *vinst,
- struct vas_window *window)
+ struct pnv_vas_window *window)
{
- int id = window->winid;
+ int id = window->vas_win.winid;
mutex_lock(&vinst->mutex);
@@ -648,8 +651,8 @@ static void set_vinst_win(struct vas_instance *vinst,
* unless its a user (FTW) window.
*/
if (!window->user_win && !window->tx_win) {
- WARN_ON_ONCE(vinst->rxwin[window->cop]);
- vinst->rxwin[window->cop] = window;
+ WARN_ON_ONCE(vinst->rxwin[window->vas_win.cop]);
+ vinst->rxwin[window->vas_win.cop] = window;
}
WARN_ON_ONCE(vinst->windows[id] != NULL);
@@ -662,16 +665,16 @@ static void set_vinst_win(struct vas_instance *vinst,
* Clear this window from the table(s) of windows for this VAS instance.
* See also function header of set_vinst_win().
*/
-static void clear_vinst_win(struct vas_window *window)
+static void clear_vinst_win(struct pnv_vas_window *window)
{
- int id = window->winid;
+ int id = window->vas_win.winid;
struct vas_instance *vinst = window->vinst;
mutex_lock(&vinst->mutex);
if (!window->user_win && !window->tx_win) {
- WARN_ON_ONCE(!vinst->rxwin[window->cop]);
- vinst->rxwin[window->cop] = NULL;
+ WARN_ON_ONCE(!vinst->rxwin[window->vas_win.cop]);
+ vinst->rxwin[window->vas_win.cop] = NULL;
}
WARN_ON_ONCE(vinst->windows[id] != window);
@@ -680,7 +683,7 @@ static void clear_vinst_win(struct vas_window *window)
mutex_unlock(&vinst->mutex);
}
-static void init_winctx_for_rxwin(struct vas_window *rxwin,
+static void init_winctx_for_rxwin(struct pnv_vas_window *rxwin,
struct vas_rx_win_attr *rxattr,
struct vas_winctx *winctx)
{
@@ -701,7 +704,7 @@ static void init_winctx_for_rxwin(struct vas_window *rxwin,
winctx->rx_fifo = rxattr->rx_fifo;
winctx->rx_fifo_size = rxattr->rx_fifo_size;
- winctx->wcreds_max = rxwin->wcreds_max;
+ winctx->wcreds_max = rxwin->vas_win.wcreds_max;
winctx->pin_win = rxattr->pin_win;
winctx->nx_win = rxattr->nx_win;
@@ -736,7 +739,7 @@ static void init_winctx_for_rxwin(struct vas_window *rxwin,
*/
winctx->fifo_disable = true;
winctx->intr_disable = true;
- winctx->rx_fifo = NULL;
+ winctx->rx_fifo = 0;
}
winctx->lnotify_lpid = rxattr->lnotify_lpid;
@@ -748,6 +751,8 @@ static void init_winctx_for_rxwin(struct vas_window *rxwin,
winctx->min_scope = VAS_SCOPE_LOCAL;
winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
+ if (rxwin->vinst->virq)
+ winctx->irq_port = rxwin->vinst->irq_port;
}
static bool rx_win_args_valid(enum vas_cop_type cop,
@@ -768,7 +773,7 @@ static bool rx_win_args_valid(enum vas_cop_type cop,
if (attr->rx_fifo_size > VAS_RX_FIFO_SIZE_MAX)
return false;
- if (attr->wcreds_max > VAS_RX_WCREDS_MAX)
+ if (!attr->wcreds_max)
return false;
if (attr->nx_win) {
@@ -813,7 +818,8 @@ void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop)
{
memset(rxattr, 0, sizeof(*rxattr));
- if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) {
+ if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI ||
+ cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) {
rxattr->pin_win = true;
rxattr->nx_win = true;
rxattr->fault_win = false;
@@ -827,9 +833,9 @@ void vas_init_rx_win_attr(struct vas_rx_win_attr *rxattr, enum vas_cop_type cop)
rxattr->fault_win = true;
rxattr->notify_disable = true;
rxattr->rx_wcred_mode = true;
- rxattr->tx_wcred_mode = true;
rxattr->rx_win_ord_mode = true;
- rxattr->tx_win_ord_mode = true;
+ rxattr->rej_no_credit = true;
+ rxattr->tc_mode = VAS_THRESH_DISABLED;
} else if (cop == VAS_COP_TYPE_FTW) {
rxattr->user_win = true;
rxattr->intr_disable = true;
@@ -847,7 +853,7 @@ EXPORT_SYMBOL_GPL(vas_init_rx_win_attr);
struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
struct vas_rx_win_attr *rxattr)
{
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
struct vas_winctx winctx;
struct vas_instance *vinst;
@@ -866,23 +872,21 @@ struct vas_window *vas_rx_win_open(int vasid, enum vas_cop_type cop,
rxwin = vas_window_alloc(vinst);
if (IS_ERR(rxwin)) {
pr_devel("Unable to allocate memory for Rx window\n");
- return rxwin;
+ return (struct vas_window *)rxwin;
}
rxwin->tx_win = false;
rxwin->nx_win = rxattr->nx_win;
rxwin->user_win = rxattr->user_win;
- rxwin->cop = cop;
- rxwin->wcreds_max = rxattr->wcreds_max ?: VAS_WCREDS_DEFAULT;
- if (rxattr->user_win)
- rxwin->pid = task_pid_vnr(current);
+ rxwin->vas_win.cop = cop;
+ rxwin->vas_win.wcreds_max = rxattr->wcreds_max;
init_winctx_for_rxwin(rxwin, rxattr, &winctx);
init_winctx_regs(rxwin, &winctx);
set_vinst_win(vinst, rxwin);
- return rxwin;
+ return &rxwin->vas_win;
}
EXPORT_SYMBOL_GPL(vas_rx_win_open);
@@ -890,7 +894,8 @@ void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop)
{
memset(txattr, 0, sizeof(*txattr));
- if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI) {
+ if (cop == VAS_COP_TYPE_842 || cop == VAS_COP_TYPE_842_HIPRI ||
+ cop == VAS_COP_TYPE_GZIP || cop == VAS_COP_TYPE_GZIP_HIPRI) {
txattr->rej_no_credit = false;
txattr->rx_wcred_mode = true;
txattr->tx_wcred_mode = true;
@@ -902,7 +907,7 @@ void vas_init_tx_win_attr(struct vas_tx_win_attr *txattr, enum vas_cop_type cop)
}
EXPORT_SYMBOL_GPL(vas_init_tx_win_attr);
-static void init_winctx_for_txwin(struct vas_window *txwin,
+static void init_winctx_for_txwin(struct pnv_vas_window *txwin,
struct vas_tx_win_attr *txattr,
struct vas_winctx *winctx)
{
@@ -923,7 +928,7 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
*/
memset(winctx, 0, sizeof(struct vas_winctx));
- winctx->wcreds_max = txwin->wcreds_max;
+ winctx->wcreds_max = txwin->vas_win.wcreds_max;
winctx->user_win = txattr->user_win;
winctx->nx_win = txwin->rxwin->nx_win;
@@ -943,14 +948,24 @@ static void init_winctx_for_txwin(struct vas_window *txwin,
winctx->lpid = txattr->lpid;
winctx->pidr = txattr->pidr;
- winctx->rx_win_id = txwin->rxwin->winid;
+ winctx->rx_win_id = txwin->rxwin->vas_win.winid;
+ /*
+ * IRQ and fault window setup is successful. Set fault window
+ * for the send window so that ready to handle faults.
+ */
+ if (txwin->vinst->virq)
+ winctx->fault_win_id = txwin->vinst->fault_win->vas_win.winid;
winctx->dma_type = VAS_DMA_TYPE_INJECT;
winctx->tc_mode = txattr->tc_mode;
winctx->min_scope = VAS_SCOPE_LOCAL;
winctx->max_scope = VAS_SCOPE_VECTORED_GROUP;
+ if (txwin->vinst->virq)
+ winctx->irq_port = txwin->vinst->irq_port;
- winctx->pswid = 0;
+ winctx->pswid = txattr->pswid ? txattr->pswid :
+ encode_pswid(txwin->vinst->vas_id,
+ txwin->vas_win.winid);
}
static bool tx_win_args_valid(enum vas_cop_type cop,
@@ -965,9 +980,14 @@ static bool tx_win_args_valid(enum vas_cop_type cop,
if (attr->wcreds_max > VAS_TX_WCREDS_MAX)
return false;
- if (attr->user_win &&
- (cop != VAS_COP_TYPE_FTW || attr->rsvd_txbuf_count))
- return false;
+ if (attr->user_win) {
+ if (attr->rsvd_txbuf_count)
+ return false;
+
+ if (cop != VAS_COP_TYPE_FTW && cop != VAS_COP_TYPE_GZIP &&
+ cop != VAS_COP_TYPE_GZIP_HIPRI)
+ return false;
+ }
return true;
}
@@ -976,8 +996,8 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
struct vas_tx_win_attr *attr)
{
int rc;
- struct vas_window *txwin;
- struct vas_window *rxwin;
+ struct pnv_vas_window *txwin;
+ struct pnv_vas_window *rxwin;
struct vas_winctx winctx;
struct vas_instance *vinst;
@@ -1003,7 +1023,7 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
rxwin = get_vinst_rxwin(vinst, cop, attr->pswid);
if (IS_ERR(rxwin)) {
pr_devel("No RxWin for vasid %d, cop %d\n", vasid, cop);
- return rxwin;
+ return (struct vas_window *)rxwin;
}
txwin = vas_window_alloc(vinst);
@@ -1012,13 +1032,12 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
goto put_rxwin;
}
- txwin->cop = cop;
+ txwin->vas_win.cop = cop;
txwin->tx_win = 1;
txwin->rxwin = rxwin;
txwin->nx_win = txwin->rxwin->nx_win;
- txwin->pid = attr->pid;
txwin->user_win = attr->user_win;
- txwin->wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
+ txwin->vas_win.wcreds_max = attr->wcreds_max ?: VAS_WCREDS_DEFAULT;
init_winctx_for_txwin(txwin, attr, &winctx);
@@ -1040,17 +1059,24 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
}
} else {
/*
- * A user mapping must ensure that context switch issues
- * CP_ABORT for this thread.
+ * Interrupt hanlder or fault window setup failed. Means
+ * NX can not generate fault for page fault. So not
+ * opening for user space tx window.
*/
- rc = set_thread_uses_vas();
+ if (!vinst->virq) {
+ rc = -ENODEV;
+ goto free_window;
+ }
+ rc = get_vas_user_win_ref(&txwin->vas_win.task_ref);
if (rc)
goto free_window;
+
+ vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
}
set_vinst_win(vinst, txwin);
- return txwin;
+ return &txwin->vas_win;
free_window:
vas_window_free(txwin);
@@ -1069,12 +1095,14 @@ int vas_copy_crb(void *crb, int offset)
EXPORT_SYMBOL_GPL(vas_copy_crb);
#define RMA_LSMP_REPORT_ENABLE PPC_BIT(53)
-int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
+int vas_paste_crb(struct vas_window *vwin, int offset, bool re)
{
+ struct pnv_vas_window *txwin;
int rc;
void *addr;
uint64_t val;
+ txwin = container_of(vwin, struct pnv_vas_window, vas_win);
trace_vas_paste_crb(current, txwin);
/*
@@ -1104,7 +1132,7 @@ int vas_paste_crb(struct vas_window *txwin, int offset, bool re)
else
rc = -EINVAL;
- pr_debug("Txwin #%d: Msg count %llu\n", txwin->winid,
+ pr_debug("Txwin #%d: Msg count %llu\n", txwin->vas_win.winid,
read_hvwc_reg(txwin, VREG(LRFIFO_PUSH)));
return rc;
@@ -1124,10 +1152,11 @@ EXPORT_SYMBOL_GPL(vas_paste_crb);
* user space. (NX-842 driver waits for CSB and Fast thread-wakeup
* doesn't use credit checking).
*/
-static void poll_window_credits(struct vas_window *window)
+static void poll_window_credits(struct pnv_vas_window *window)
{
u64 val;
int creds, mode;
+ int count = 0;
val = read_hvwc_reg(window, VREG(WINCTL));
if (window->tx_win)
@@ -1146,10 +1175,28 @@ retry:
creds = GET_FIELD(VAS_LRX_WCRED, val);
}
- if (creds < window->wcreds_max) {
+ /*
+ * Takes around few milliseconds to complete all pending requests
+ * and return credits.
+ * TODO: Scan fault FIFO and invalidate CRBs points to this window
+ * and issue CRB Kill to stop all pending requests. Need only
+ * if there is a bug in NX or fault handling in kernel.
+ */
+ if (creds < window->vas_win.wcreds_max) {
val = 0;
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(10));
+ count++;
+ /*
+ * Process can not close send window until all credits are
+ * returned.
+ */
+ if (!(count % 1000))
+ pr_warn_ratelimited("VAS: pid %d stuck. Waiting for credits returned for Window(%d). creds %d, Retries %d\n",
+ vas_window_pid(&window->vas_win),
+ window->vas_win.winid,
+ creds, count);
+
goto retry;
}
}
@@ -1159,10 +1206,11 @@ retry:
* short time to queue a CRB, so window should not be busy for too long.
* Trying 5ms intervals.
*/
-static void poll_window_busy_state(struct vas_window *window)
+static void poll_window_busy_state(struct pnv_vas_window *window)
{
int busy;
u64 val;
+ int count = 0;
retry:
val = read_hvwc_reg(window, VREG(WIN_STATUS));
@@ -1170,7 +1218,17 @@ retry:
if (busy) {
val = 0;
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(5));
+ schedule_timeout(msecs_to_jiffies(10));
+ count++;
+ /*
+ * Takes around few milliseconds to process all pending
+ * requests.
+ */
+ if (!(count % 1000))
+ pr_warn_ratelimited("VAS: pid %d stuck. Window (ID=%d) is in busy state. Retries %d\n",
+ vas_window_pid(&window->vas_win),
+ window->vas_win.winid, count);
+
goto retry;
}
}
@@ -1191,7 +1249,7 @@ retry:
* casting out becomes necessary we should consider offloading the
* job to a worker thread, so the window close can proceed quickly.
*/
-static void poll_window_castout(struct vas_window *window)
+static void poll_window_castout(struct pnv_vas_window *window)
{
/* stub for now */
}
@@ -1200,7 +1258,7 @@ static void poll_window_castout(struct vas_window *window)
* Unpin and close a window so no new requests are accepted and the
* hardware can evict this window from cache if necessary.
*/
-static void unpin_close_window(struct vas_window *window)
+static void unpin_close_window(struct pnv_vas_window *window)
{
u64 val;
@@ -1222,11 +1280,15 @@ static void unpin_close_window(struct vas_window *window)
*
* Besides the hardware, kernel has some bookkeeping of course.
*/
-int vas_win_close(struct vas_window *window)
+int vas_win_close(struct vas_window *vwin)
{
- if (!window)
+ struct pnv_vas_window *window;
+
+ if (!vwin)
return 0;
+ window = container_of(vwin, struct pnv_vas_window, vas_win);
+
if (!window->tx_win && atomic_read(&window->num_txwins) != 0) {
pr_devel("Attempting to close an active Rx window!\n");
WARN_ON_ONCE(1);
@@ -1235,22 +1297,175 @@ int vas_win_close(struct vas_window *window)
unmap_paste_region(window);
- clear_vinst_win(window);
-
poll_window_busy_state(window);
unpin_close_window(window);
poll_window_credits(window);
+ clear_vinst_win(window);
+
poll_window_castout(window);
/* if send window, drop reference to matching receive window */
- if (window->tx_win)
+ if (window->tx_win) {
+ if (window->user_win) {
+ put_vas_user_win_ref(&vwin->task_ref);
+ mm_context_remove_vas_window(vwin->task_ref.mm);
+ }
put_rx_win(window->rxwin);
+ }
vas_window_free(window);
return 0;
}
EXPORT_SYMBOL_GPL(vas_win_close);
+
+/*
+ * Return credit for the given window.
+ * Send windows and fault window uses credit mechanism as follows:
+ *
+ * Send windows:
+ * - The default number of credits available for each send window is
+ * 1024. It means 1024 requests can be issued asynchronously at the
+ * same time. If the credit is not available, that request will be
+ * returned with RMA_Busy.
+ * - One credit is taken when NX request is issued.
+ * - This credit is returned after NX processed that request.
+ * - If NX encounters translation error, kernel will return the
+ * credit on the specific send window after processing the fault CRB.
+ *
+ * Fault window:
+ * - The total number credits available is FIFO_SIZE/CRB_SIZE.
+ * Means 4MB/128 in the current implementation. If credit is not
+ * available, RMA_Reject is returned.
+ * - A credit is taken when NX pastes CRB in fault FIFO.
+ * - The kernel with return credit on fault window after reading entry
+ * from fault FIFO.
+ */
+void vas_return_credit(struct pnv_vas_window *window, bool tx)
+{
+ uint64_t val;
+
+ val = 0ULL;
+ if (tx) { /* send window */
+ val = SET_FIELD(VAS_TX_WCRED, val, 1);
+ write_hvwc_reg(window, VREG(TX_WCRED_ADDER), val);
+ } else {
+ val = SET_FIELD(VAS_LRX_WCRED, val, 1);
+ write_hvwc_reg(window, VREG(LRX_WCRED_ADDER), val);
+ }
+}
+
+struct pnv_vas_window *vas_pswid_to_window(struct vas_instance *vinst,
+ uint32_t pswid)
+{
+ struct pnv_vas_window *window;
+ int winid;
+
+ if (!pswid) {
+ pr_devel("%s: called for pswid 0!\n", __func__);
+ return ERR_PTR(-ESRCH);
+ }
+
+ decode_pswid(pswid, NULL, &winid);
+
+ if (winid >= VAS_WINDOWS_PER_CHIP)
+ return ERR_PTR(-ESRCH);
+
+ /*
+ * If application closes the window before the hardware
+ * returns the fault CRB, we should wait in vas_win_close()
+ * for the pending requests. so the window must be active
+ * and the process alive.
+ *
+ * If its a kernel process, we should not get any faults and
+ * should not get here.
+ */
+ window = vinst->windows[winid];
+
+ if (!window) {
+ pr_err("PSWID decode: Could not find window for winid %d pswid %d vinst 0x%p\n",
+ winid, pswid, vinst);
+ return NULL;
+ }
+
+ /*
+ * Do some sanity checks on the decoded window. Window should be
+ * NX GZIP user send window. FTW windows should not incur faults
+ * since their CRBs are ignored (not queued on FIFO or processed
+ * by NX).
+ */
+ if (!window->tx_win || !window->user_win || !window->nx_win ||
+ window->vas_win.cop == VAS_COP_TYPE_FAULT ||
+ window->vas_win.cop == VAS_COP_TYPE_FTW) {
+ pr_err("PSWID decode: id %d, tx %d, user %d, nx %d, cop %d\n",
+ winid, window->tx_win, window->user_win,
+ window->nx_win, window->vas_win.cop);
+ WARN_ON(1);
+ }
+
+ return window;
+}
+
+static struct vas_window *vas_user_win_open(int vas_id, u64 flags,
+ enum vas_cop_type cop_type)
+{
+ struct vas_tx_win_attr txattr = {};
+
+ vas_init_tx_win_attr(&txattr, cop_type);
+
+ txattr.lpid = mfspr(SPRN_LPID);
+ txattr.pidr = mfspr(SPRN_PID);
+ txattr.user_win = true;
+ txattr.rsvd_txbuf_count = false;
+ txattr.pswid = false;
+
+ pr_devel("Pid %d: Opening txwin, PIDR %ld\n", txattr.pidr,
+ mfspr(SPRN_PID));
+
+ return vas_tx_win_open(vas_id, cop_type, &txattr);
+}
+
+static u64 vas_user_win_paste_addr(struct vas_window *txwin)
+{
+ struct pnv_vas_window *win;
+ u64 paste_addr;
+
+ win = container_of(txwin, struct pnv_vas_window, vas_win);
+ vas_win_paste_addr(win, &paste_addr, NULL);
+
+ return paste_addr;
+}
+
+static int vas_user_win_close(struct vas_window *txwin)
+{
+ vas_win_close(txwin);
+
+ return 0;
+}
+
+static const struct vas_user_win_ops vops = {
+ .open_win = vas_user_win_open,
+ .paste_addr = vas_user_win_paste_addr,
+ .close_win = vas_user_win_close,
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_api_powernv(struct module *mod, enum vas_cop_type cop_type,
+ const char *name)
+{
+
+ return vas_register_coproc_api(mod, cop_type, name, &vops);
+}
+EXPORT_SYMBOL_GPL(vas_register_api_powernv);
+
+void vas_unregister_api_powernv(void)
+{
+ vas_unregister_coproc_api();
+}
+EXPORT_SYMBOL_GPL(vas_unregister_api_powernv);
diff --git a/arch/powerpc/platforms/powernv/vas.c b/arch/powerpc/platforms/powernv/vas.c
index ed9cc6df329a..b65256a63e87 100644
--- a/arch/powerpc/platforms/powernv/vas.c
+++ b/arch/powerpc/platforms/powernv/vas.c
@@ -14,7 +14,10 @@
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
#include <asm/prom.h>
+#include <asm/xive.h>
#include "vas.h"
@@ -23,12 +26,35 @@ static LIST_HEAD(vas_instances);
static DEFINE_PER_CPU(int, cpu_vas_id);
+static int vas_irq_fault_window_setup(struct vas_instance *vinst)
+{
+ int rc = 0;
+
+ rc = request_threaded_irq(vinst->virq, vas_fault_handler,
+ vas_fault_thread_fn, 0, vinst->name, vinst);
+
+ if (rc) {
+ pr_err("VAS[%d]: Request IRQ(%d) failed with %d\n",
+ vinst->vas_id, vinst->virq, rc);
+ goto out;
+ }
+
+ rc = vas_setup_fault_window(vinst);
+ if (rc)
+ free_irq(vinst->virq, vinst);
+
+out:
+ return rc;
+}
+
static int init_vas_instance(struct platform_device *pdev)
{
- int rc, cpu, vasid;
- struct resource *res;
- struct vas_instance *vinst;
struct device_node *dn = pdev->dev.of_node;
+ struct vas_instance *vinst;
+ struct xive_irq_data *xd;
+ uint32_t chipid, hwirq;
+ struct resource *res;
+ int rc, cpu, vasid;
rc = of_property_read_u32(dn, "ibm,vas-id", &vasid);
if (rc) {
@@ -36,6 +62,12 @@ static int init_vas_instance(struct platform_device *pdev)
return -ENODEV;
}
+ rc = of_property_read_u32(dn, "ibm,chip-id", &chipid);
+ if (rc) {
+ pr_err("No ibm,chip-id property for %s?\n", pdev->name);
+ return -ENODEV;
+ }
+
if (pdev->num_resources != 4) {
pr_err("Unexpected DT configuration for [%s, %d]\n",
pdev->name, vasid);
@@ -46,6 +78,12 @@ static int init_vas_instance(struct platform_device *pdev)
if (!vinst)
return -ENOMEM;
+ vinst->name = kasprintf(GFP_KERNEL, "vas-%d", vasid);
+ if (!vinst->name) {
+ kfree(vinst);
+ return -ENOMEM;
+ }
+
INIT_LIST_HEAD(&vinst->node);
ida_init(&vinst->ida);
mutex_init(&vinst->mutex);
@@ -69,9 +107,32 @@ static int init_vas_instance(struct platform_device *pdev)
vinst->paste_win_id_shift = 63 - res->end;
- pr_devel("Initialized instance [%s, %d], paste_base 0x%llx, "
- "paste_win_id_shift 0x%llx\n", pdev->name, vasid,
- vinst->paste_base_addr, vinst->paste_win_id_shift);
+ hwirq = xive_native_alloc_irq_on_chip(chipid);
+ if (!hwirq) {
+ pr_err("Inst%d: Unable to allocate global irq for chip %d\n",
+ vinst->vas_id, chipid);
+ return -ENOENT;
+ }
+
+ vinst->virq = irq_create_mapping(NULL, hwirq);
+ if (!vinst->virq) {
+ pr_err("Inst%d: Unable to map global irq %d\n",
+ vinst->vas_id, hwirq);
+ return -EINVAL;
+ }
+
+ xd = irq_get_handler_data(vinst->virq);
+ if (!xd) {
+ pr_err("Inst%d: Invalid virq %d\n",
+ vinst->vas_id, vinst->virq);
+ return -EINVAL;
+ }
+
+ vinst->irq_port = xd->trig_page;
+ pr_devel("Initialized instance [%s, %d] paste_base 0x%llx paste_win_id_shift 0x%llx IRQ %d Port 0x%llx\n",
+ pdev->name, vasid, vinst->paste_base_addr,
+ vinst->paste_win_id_shift, vinst->virq,
+ vinst->irq_port);
for_each_possible_cpu(cpu) {
if (cpu_to_chip_id(cpu) == of_get_ibm_chip_id(dn))
@@ -82,6 +143,22 @@ static int init_vas_instance(struct platform_device *pdev)
list_add(&vinst->node, &vas_instances);
mutex_unlock(&vas_mutex);
+ spin_lock_init(&vinst->fault_lock);
+ /*
+ * IRQ and fault handling setup is needed only for user space
+ * send windows.
+ */
+ if (vinst->virq) {
+ rc = vas_irq_fault_window_setup(vinst);
+ /*
+ * Fault window is used only for user space send windows.
+ * So if vinst->virq is NULL, tx_win_open returns -ENODEV
+ * for user space.
+ */
+ if (rc)
+ vinst->virq = 0;
+ }
+
vas_instance_init_dbgdir(vinst);
dev_set_drvdata(&pdev->dev, vinst);
@@ -89,6 +166,7 @@ static int init_vas_instance(struct platform_device *pdev)
return 0;
free_vinst:
+ kfree(vinst->name);
kfree(vinst);
return -ENODEV;
diff --git a/arch/powerpc/platforms/powernv/vas.h b/arch/powerpc/platforms/powernv/vas.h
index 5574aec9ee88..08d9d3d5a22b 100644
--- a/arch/powerpc/platforms/powernv/vas.h
+++ b/arch/powerpc/platforms/powernv/vas.h
@@ -101,11 +101,9 @@
/*
* Initial per-process credits.
* Max send window credits: 4K-1 (12-bits in VAS_TX_WCRED)
- * Max receive window credits: 64K-1 (16 bits in VAS_LRX_WCRED)
*
* TODO: Needs tuning for per-process credits
*/
-#define VAS_RX_WCREDS_MAX ((64 << 10) - 1)
#define VAS_TX_WCREDS_MAX ((4 << 10) - 1)
#define VAS_WCREDS_DEFAULT (1 << 10)
@@ -296,6 +294,22 @@ enum vas_notify_after_count {
};
/*
+ * NX can generate an interrupt for multiple faults and expects kernel
+ * to process all of them. So read all valid CRB entries until find the
+ * invalid one. So use pswid which is pasted by NX and ccw[0] (reserved
+ * bit in BE) to check valid CRB. CCW[0] will not be touched by user
+ * space. Application gets CRB formt error if it updates this bit.
+ *
+ * Invalidate FIFO during allocation and process all entries from last
+ * successful read until finds invalid pswid and ccw[0] values.
+ * After reading each CRB entry from fault FIFO, the kernel invalidate
+ * it by updating pswid with FIFO_INVALID_ENTRY and CCW[0] with
+ * CCW0_INVALID.
+ */
+#define FIFO_INVALID_ENTRY 0xffffffff
+#define CCW0_INVALID 1
+
+/*
* One per instance of VAS. Each instance will have a separate set of
* receive windows, one per coprocessor type.
*
@@ -313,39 +327,43 @@ struct vas_instance {
u64 paste_base_addr;
u64 paste_win_id_shift;
+ u64 irq_port;
+ int virq;
+ int fault_crbs;
+ int fault_fifo_size;
+ int fifo_in_progress; /* To wake up thread or return IRQ_HANDLED */
+ spinlock_t fault_lock; /* Protects fifo_in_progress update */
+ void *fault_fifo;
+ struct pnv_vas_window *fault_win; /* Fault window */
+
struct mutex mutex;
- struct vas_window *rxwin[VAS_COP_TYPE_MAX];
- struct vas_window *windows[VAS_WINDOWS_PER_CHIP];
+ struct pnv_vas_window *rxwin[VAS_COP_TYPE_MAX];
+ struct pnv_vas_window *windows[VAS_WINDOWS_PER_CHIP];
+ char *name;
char *dbgname;
struct dentry *dbgdir;
};
/*
- * In-kernel state a VAS window. One per window.
+ * In-kernel state a VAS window on PowerNV. One per window.
*/
-struct vas_window {
+struct pnv_vas_window {
+ struct vas_window vas_win;
/* Fields common to send and receive windows */
struct vas_instance *vinst;
- int winid;
bool tx_win; /* True if send window */
bool nx_win; /* True if NX window */
bool user_win; /* True if user space window */
void *hvwc_map; /* HV window context */
void *uwc_map; /* OS/User window context */
- pid_t pid; /* Linux process id of owner */
- int wcreds_max; /* Window credits */
-
- char *dbgname;
- struct dentry *dbgdir;
/* Fields applicable only to send windows */
void *paste_kaddr;
char *paste_addr_name;
- struct vas_window *rxwin;
+ struct pnv_vas_window *rxwin;
- /* Feilds applicable only to receive windows */
- enum vas_cop_type cop;
+ /* Fields applicable only to receive windows */
atomic_t num_txwins;
};
@@ -358,7 +376,7 @@ struct vas_window {
* is a container for the register fields in the window context.
*/
struct vas_winctx {
- void *rx_fifo;
+ u64 rx_fifo;
int rx_fifo_size;
int wcreds_max;
int rsvd_txbuf_count;
@@ -404,19 +422,32 @@ extern struct mutex vas_mutex;
extern struct vas_instance *find_vas_instance(int vasid);
extern void vas_init_dbgdir(void);
extern void vas_instance_init_dbgdir(struct vas_instance *vinst);
-extern void vas_window_init_dbgdir(struct vas_window *win);
-extern void vas_window_free_dbgdir(struct vas_window *win);
+extern void vas_window_init_dbgdir(struct pnv_vas_window *win);
+extern void vas_window_free_dbgdir(struct pnv_vas_window *win);
+extern int vas_setup_fault_window(struct vas_instance *vinst);
+extern irqreturn_t vas_fault_thread_fn(int irq, void *data);
+extern irqreturn_t vas_fault_handler(int irq, void *dev_id);
+extern void vas_return_credit(struct pnv_vas_window *window, bool tx);
+extern struct pnv_vas_window *vas_pswid_to_window(struct vas_instance *vinst,
+ uint32_t pswid);
+extern void vas_win_paste_addr(struct pnv_vas_window *window, u64 *addr,
+ int *len);
+
+static inline int vas_window_pid(struct vas_window *window)
+{
+ return pid_vnr(window->task_ref.pid);
+}
-static inline void vas_log_write(struct vas_window *win, char *name,
+static inline void vas_log_write(struct pnv_vas_window *win, char *name,
void *regptr, u64 val)
{
if (val)
pr_debug("%swin #%d: %s reg %p, val 0x%016llx\n",
- win->tx_win ? "Tx" : "Rx", win->winid, name,
- regptr, val);
+ win->tx_win ? "Tx" : "Rx", win->vas_win.winid,
+ name, regptr, val);
}
-static inline void write_uwc_reg(struct vas_window *win, char *name,
+static inline void write_uwc_reg(struct pnv_vas_window *win, char *name,
s32 reg, u64 val)
{
void *regptr;
@@ -427,7 +458,7 @@ static inline void write_uwc_reg(struct vas_window *win, char *name,
out_be64(regptr, val);
}
-static inline void write_hvwc_reg(struct vas_window *win, char *name,
+static inline void write_hvwc_reg(struct pnv_vas_window *win, char *name,
s32 reg, u64 val)
{
void *regptr;
@@ -438,12 +469,27 @@ static inline void write_hvwc_reg(struct vas_window *win, char *name,
out_be64(regptr, val);
}
-static inline u64 read_hvwc_reg(struct vas_window *win,
+static inline u64 read_hvwc_reg(struct pnv_vas_window *win,
char *name __maybe_unused, s32 reg)
{
return in_be64(win->hvwc_map+reg);
}
+/*
+ * Encode/decode the Partition Send Window ID (PSWID) for a window in
+ * a way that we can uniquely identify any window in the system. i.e.
+ * we should be able to locate the 'struct vas_window' given the PSWID.
+ *
+ * Bits Usage
+ * 0:7 VAS id (8 bits)
+ * 8:15 Unused, 0 (3 bits)
+ * 16:31 Window id (16 bits)
+ */
+static inline u32 encode_pswid(int vasid, int winid)
+{
+ return ((u32)winid | (vasid << (31 - 7)));
+}
+
static inline void decode_pswid(u32 pswid, int *vasid, int *winid)
{
if (vasid)
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index e32406e918d0..a44869e5ea70 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -7,6 +7,7 @@ config PPC_PS3
select USB_OHCI_BIG_ENDIAN_MMIO
select USB_EHCI_BIG_ENDIAN_MMIO
select HAVE_PCI
+ select IRQ_DOMAIN_NOMAP
help
This option enables support for the Sony PS3 game console
and other platforms using the PS3 hypervisor. Enabling this
@@ -85,6 +86,15 @@ config PS3_SYS_MANAGER
This support is required for PS3 system control. In
general, all users will say Y or M.
+config PS3_VERBOSE_RESULT
+ bool "PS3 Verbose LV1 hypercall results" if PS3_ADVANCED
+ depends on PPC_PS3
+ help
+ Enables more verbose log messages for LV1 hypercall results.
+
+ If in doubt, say N here and reduce the size of the kernel by a
+ small amount.
+
config PS3_REPOSITORY_WRITE
bool "PS3 Repository write support" if PS3_ADVANCED
depends on PPC_PS3
@@ -155,7 +165,7 @@ config PS3_LPM
If you intend to use the advanced performance monitoring and
profiling support of the Cell processor with programs like
- oprofile and perfmon2, then say Y or M, otherwise say N.
+ perfmon2, then say Y or M, otherwise say N.
config PS3GELIC_UDBG
bool "PS3 udbg output via UDP broadcasts on Ethernet"
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c
index 2735ec90414d..e87360a0fb40 100644
--- a/arch/powerpc/platforms/ps3/device-init.c
+++ b/arch/powerpc/platforms/ps3/device-init.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/reboot.h>
+#include <linux/rcuwait.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
@@ -670,7 +671,8 @@ struct ps3_notification_device {
spinlock_t lock;
u64 tag;
u64 lv1_status;
- struct completion done;
+ struct rcuwait wait;
+ bool done;
};
enum ps3_notify_type {
@@ -712,7 +714,8 @@ static irqreturn_t ps3_notification_interrupt(int irq, void *data)
pr_debug("%s:%u: completed, status 0x%llx\n", __func__,
__LINE__, status);
dev->lv1_status = status;
- complete(&dev->done);
+ dev->done = true;
+ rcuwait_wake_up(&dev->wait);
}
spin_unlock(&dev->lock);
return IRQ_HANDLED;
@@ -725,12 +728,12 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
unsigned long flags;
int res;
- init_completion(&dev->done);
spin_lock_irqsave(&dev->lock, flags);
res = write ? lv1_storage_write(dev->sbd.dev_id, 0, 0, 1, 0, lpar,
&dev->tag)
: lv1_storage_read(dev->sbd.dev_id, 0, 0, 1, 0, lpar,
&dev->tag);
+ dev->done = false;
spin_unlock_irqrestore(&dev->lock, flags);
if (res) {
pr_err("%s:%u: %s failed %d\n", __func__, __LINE__, op, res);
@@ -738,14 +741,10 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev,
}
pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op);
- res = wait_event_interruptible(dev->done.wait,
- dev->done.done || kthread_should_stop());
+ rcuwait_wait_event(&dev->wait, dev->done || kthread_should_stop(), TASK_IDLE);
+
if (kthread_should_stop())
res = -EINTR;
- if (res) {
- pr_debug("%s:%u: interrupted %s\n", __func__, __LINE__, op);
- return res;
- }
if (dev->lv1_status) {
pr_err("%s:%u: %s not completed, status 0x%llx\n", __func__,
@@ -810,6 +809,7 @@ static int ps3_probe_thread(void *data)
}
spin_lock_init(&dev.lock);
+ rcuwait_init(&dev.wait);
res = request_irq(irq, ps3_notification_interrupt, 0,
"ps3_notification", &dev);
diff --git a/arch/powerpc/platforms/ps3/gelic_udbg.c b/arch/powerpc/platforms/ps3/gelic_udbg.c
index cba4f8f5b8d7..6b298010fd84 100644
--- a/arch/powerpc/platforms/ps3/gelic_udbg.c
+++ b/arch/powerpc/platforms/ps3/gelic_udbg.c
@@ -113,7 +113,7 @@ static int unmap_dma_mem(int bus_id, int dev_id, u64 bus_addr, size_t len)
return lv1_free_device_dma_region(bus_id, dev_id, real_bus_addr);
}
-static void gelic_debug_init(void)
+static void __init gelic_debug_init(void)
{
s64 result;
u64 v2;
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 7ddc7ec6a7c0..c27e6cf85272 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -10,7 +10,6 @@
#include <linux/memblock.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/ps3fb.h>
@@ -169,7 +168,8 @@ static void ps3_hpte_invalidate(unsigned long slot, unsigned long vpn,
spin_unlock_irqrestore(&ps3_htab_lock, flags);
}
-static void ps3_hpte_clear(void)
+/* Called during kexec sequence with MMU off */
+static notrace void ps3_hpte_clear(void)
{
unsigned long hpte_count = (1UL << ppc64_pft_size) >> 4;
u64 i;
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 78f2339ed5cb..49871427f599 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
@@ -45,7 +46,7 @@
* implementation equates HV plug value to Linux virq value, constrains each
* interrupt to have a system wide unique plug number, and limits the range
* of the plug values to map into the first dword of the bitmaps. This
- * gives a usable range of plug values of {NUM_ISA_INTERRUPTS..63}. Note
+ * gives a usable range of plug values of {NR_IRQS_LEGACY..63}. Note
* that there is no constraint on how many in this set an individual thread
* can acquire.
*
@@ -721,7 +722,7 @@ static unsigned int ps3_get_irq(void)
}
#if defined(DEBUG)
- if (unlikely(plug < NUM_ISA_INTERRUPTS || plug > PS3_PLUG_MAX)) {
+ if (unlikely(plug < NR_IRQS_LEGACY || plug > PS3_PLUG_MAX)) {
dump_bmp(&per_cpu(ps3_private, 0));
dump_bmp(&per_cpu(ps3_private, 1));
BUG();
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 423be34f0f5f..1326de55fda6 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -6,6 +6,7 @@
* Copyright 2006 Sony Corp.
*/
+#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/memblock.h>
@@ -13,7 +14,6 @@
#include <asm/cell-regs.h>
#include <asm/firmware.h>
-#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/lv1call.h>
#include <asm/setup.h>
@@ -40,7 +40,7 @@ enum {
PAGE_SHIFT_16M = 24U,
};
-static unsigned long make_page_sizes(unsigned long a, unsigned long b)
+static unsigned long __init make_page_sizes(unsigned long a, unsigned long b)
{
return (a << 56) | (b << 48);
}
@@ -194,24 +194,27 @@ fail:
/**
* ps3_mm_vas_destroy -
+ *
+ * called during kexec sequence with MMU off.
*/
-void ps3_mm_vas_destroy(void)
+notrace void ps3_mm_vas_destroy(void)
{
int result;
- DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id);
-
if (map.vas_id) {
result = lv1_select_virtual_address_space(0);
- BUG_ON(result);
- result = lv1_destruct_virtual_address_space(map.vas_id);
- BUG_ON(result);
+ result += lv1_destruct_virtual_address_space(map.vas_id);
+
+ if (result) {
+ lv1_panic(0);
+ }
+
map.vas_id = 0;
}
}
-static int ps3_mm_get_repository_highmem(struct mem_region *r)
+static int __init ps3_mm_get_repository_highmem(struct mem_region *r)
{
int result;
@@ -263,7 +266,7 @@ static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
int result;
u64 muid;
- r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
+ r->size = ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size);
@@ -304,19 +307,20 @@ static void ps3_mm_region_destroy(struct mem_region *r)
int result;
if (!r->destroy) {
- pr_info("%s:%d: Not destroying high region: %llxh %llxh\n",
- __func__, __LINE__, r->base, r->size);
return;
}
- DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
-
if (r->base) {
result = lv1_release_memory(r->base);
- BUG_ON(result);
+
+ if (result) {
+ lv1_panic(0);
+ }
+
r->size = r->base = r->offset = 0;
map.total = map.rm.size;
}
+
ps3_mm_set_repository_highmem(NULL);
}
@@ -359,7 +363,7 @@ static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
* @bus_addr: Starting ioc bus address of the area to map.
* @len: Length in bytes of the area to map.
* @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
- * list of all chuncks owned by the region.
+ * list of all chunks owned by the region.
*
* This implementation uses a very simple dma page manager
* based on the dma_chunk structure. This scheme assumes
@@ -394,8 +398,8 @@ static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
unsigned long bus_addr, unsigned long len)
{
struct dma_chunk *c;
- unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
+ unsigned long aligned_bus = ALIGN_DOWN(bus_addr, 1 << r->page_size);
+ unsigned long aligned_len = ALIGN(len+bus_addr-aligned_bus,
1 << r->page_size);
list_for_each_entry(c, &r->chunk_list.head, link) {
@@ -423,8 +427,8 @@ static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
unsigned long lpar_addr, unsigned long len)
{
struct dma_chunk *c;
- unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
+ unsigned long aligned_lpar = ALIGN_DOWN(lpar_addr, 1 << r->page_size);
+ unsigned long aligned_len = ALIGN(len + lpar_addr - aligned_lpar,
1 << r->page_size);
list_for_each_entry(c, &r->chunk_list.head, link) {
@@ -775,8 +779,8 @@ static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
struct dma_chunk *c;
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
: virt_addr;
- unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
+ unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
+ unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
1 << r->page_size);
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
@@ -830,8 +834,8 @@ static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
struct dma_chunk *c;
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
: virt_addr;
- unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
+ unsigned long aligned_phys = ALIGN_DOWN(phys_addr, 1 << r->page_size);
+ unsigned long aligned_len = ALIGN(len + phys_addr - aligned_phys,
1 << r->page_size);
DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
@@ -889,9 +893,9 @@ static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
c = dma_find_chunk(r, bus_addr, len);
if (!c) {
- unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
+ unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len + bus_addr
+ unsigned long aligned_len = ALIGN(len + bus_addr
- aligned_bus, 1 << r->page_size);
DBG("%s:%d: not found: bus_addr %llxh\n",
__func__, __LINE__, bus_addr);
@@ -926,9 +930,9 @@ static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
c = dma_find_chunk(r, bus_addr, len);
if (!c) {
- unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
+ unsigned long aligned_bus = ALIGN_DOWN(bus_addr,
1 << r->page_size);
- unsigned long aligned_len = _ALIGN_UP(len + bus_addr
+ unsigned long aligned_len = ALIGN(len + bus_addr
- aligned_bus,
1 << r->page_size);
DBG("%s:%d: not found: bus_addr %llxh\n",
@@ -974,7 +978,7 @@ static int dma_sb_region_create_linear(struct ps3_dma_region *r)
pr_info("%s:%d: forcing 16M pages for linear map\n",
__func__, __LINE__);
r->page_size = PS3_DMA_16M;
- r->len = _ALIGN_UP(r->len, 1 << r->page_size);
+ r->len = ALIGN(r->len, 1 << r->page_size);
}
}
@@ -1116,6 +1120,7 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
enum ps3_dma_region_type region_type, void *addr, unsigned long len)
{
unsigned long lpar_addr;
+ int result;
lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
@@ -1125,7 +1130,17 @@ int ps3_dma_region_init(struct ps3_system_bus_device *dev,
r->offset = lpar_addr;
if (r->offset >= map.rm.size)
r->offset -= map.r1.offset;
- r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
+ r->len = len ? len : ALIGN(map.total, 1 << r->page_size);
+
+ dev->core.dma_mask = &r->dma_mask;
+
+ result = dma_set_mask_and_coherent(&dev->core, DMA_BIT_MASK(32));
+
+ if (result < 0) {
+ dev_err(&dev->core, "%s:%d: dma_set_mask_and_coherent failed: %d\n",
+ __func__, __LINE__, result);
+ return result;
+ }
switch (dev->dev_type) {
case PS3_DEVICE_TYPE_SB:
@@ -1229,9 +1244,11 @@ void __init ps3_mm_init(void)
/**
* ps3_mm_shutdown - final cleanup of address space
+ *
+ * called during kexec sequence with MMU off.
*/
-void ps3_mm_shutdown(void)
+notrace void ps3_mm_shutdown(void)
{
ps3_mm_region_destroy(&map.r1);
}
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index cbddd63caf2d..b384cd2d6b99 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -17,8 +17,6 @@
#include <linux/of.h>
#include <linux/slab.h>
-#include <asm/prom.h>
-
#include "platform.h"
enum {
@@ -501,7 +499,7 @@ static int db_set_64(struct os_area_db *db, const struct os_area_db_id *id,
return -1;
}
-static int db_get_64(const struct os_area_db *db,
+static int __init db_get_64(const struct os_area_db *db,
const struct os_area_db_id *id, uint64_t *value)
{
struct db_iterator i;
@@ -517,7 +515,7 @@ static int db_get_64(const struct os_area_db *db,
return -1;
}
-static int db_get_rtc_diff(const struct os_area_db *db, int64_t *rtc_diff)
+static int __init db_get_rtc_diff(const struct os_area_db *db, int64_t *rtc_diff)
{
return db_get_64(db, &os_area_db_id_rtc_diff, (uint64_t*)rtc_diff);
}
@@ -613,10 +611,8 @@ static int update_flash_db(void)
/* Read in header and db from flash. */
header = kmalloc(buf_len, GFP_KERNEL);
- if (!header) {
- pr_debug("%s: kmalloc failed\n", __func__);
+ if (!header)
return -ENOMEM;
- }
count = os_area_flash_read(header, buf_len, 0);
if (count < 0) {
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 07bd39ef71ff..6beecdb0d51f 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -35,7 +35,7 @@ void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
/* smp */
-void smp_init_ps3(void);
+void __init smp_init_ps3(void);
#ifdef CONFIG_SMP
void ps3_smp_cleanup_cpu(int cpu);
#else
@@ -134,9 +134,9 @@ struct ps3_repository_device {
int ps3_repository_find_device(struct ps3_repository_device *repo);
int ps3_repository_find_device_by_id(struct ps3_repository_device *repo,
u64 bus_id, u64 dev_id);
-int ps3_repository_find_devices(enum ps3_bus_type bus_type,
+int __init ps3_repository_find_devices(enum ps3_bus_type bus_type,
int (*callback)(const struct ps3_repository_device *repo));
-int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
+int __init ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
unsigned int *bus_index);
int ps3_repository_find_interrupt(const struct ps3_repository_device *repo,
enum ps3_interrupt_type intr_type, unsigned int *interrupt_id);
@@ -211,8 +211,8 @@ static inline int ps3_repository_delete_highmem_info(unsigned int region_index)
int ps3_repository_read_num_be(unsigned int *num_be);
int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id);
int ps3_repository_read_be_id(u64 node_id, u64 *be_id);
-int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq);
-int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq);
+int __init ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq);
+int __init ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq);
/* repository performance monitor info */
@@ -247,7 +247,7 @@ int ps3_repository_read_spu_resource_id(unsigned int res_index,
/* repository vuart info */
-int ps3_repository_read_vuart_av_port(unsigned int *port);
-int ps3_repository_read_vuart_sysmgr_port(unsigned int *port);
+int __init ps3_repository_read_vuart_av_port(unsigned int *port);
+int __init ps3_repository_read_vuart_sysmgr_port(unsigned int *port);
#endif
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index 21712964e76f..205763061a2d 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -413,7 +413,7 @@ found_dev:
return 0;
}
-int ps3_repository_find_devices(enum ps3_bus_type bus_type,
+int __init ps3_repository_find_devices(enum ps3_bus_type bus_type,
int (*callback)(const struct ps3_repository_device *repo))
{
int result = 0;
@@ -455,7 +455,7 @@ int ps3_repository_find_devices(enum ps3_bus_type bus_type,
return result;
}
-int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
+int __init ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
unsigned int *bus_index)
{
unsigned int i;
@@ -908,7 +908,7 @@ int ps3_repository_read_boot_dat_size(unsigned int *size)
return result;
}
-int ps3_repository_read_vuart_av_port(unsigned int *port)
+int __init ps3_repository_read_vuart_av_port(unsigned int *port)
{
int result;
u64 v1 = 0;
@@ -923,7 +923,7 @@ int ps3_repository_read_vuart_av_port(unsigned int *port)
return result;
}
-int ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
+int __init ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
{
int result;
u64 v1 = 0;
@@ -1005,7 +1005,7 @@ int ps3_repository_read_be_id(u64 node_id, u64 *be_id)
be_id, NULL);
}
-int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
+int __init ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("be", 0),
@@ -1015,7 +1015,7 @@ int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
tb_freq, NULL);
}
-int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
+int __init ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
{
int result;
u64 node_id;
@@ -1178,7 +1178,7 @@ int ps3_repository_delete_highmem_info(unsigned int region_index)
#if defined(DEBUG)
-int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
+int __init ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
{
int result = 0;
unsigned int res_index;
@@ -1231,7 +1231,7 @@ int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
return result;
}
-static int dump_stor_dev_info(struct ps3_repository_device *repo)
+static int __init dump_stor_dev_info(struct ps3_repository_device *repo)
{
int result = 0;
unsigned int num_regions, region_index;
@@ -1279,7 +1279,7 @@ out:
return result;
}
-static int dump_device_info(struct ps3_repository_device *repo,
+static int __init dump_device_info(struct ps3_repository_device *repo,
unsigned int num_dev)
{
int result = 0;
@@ -1323,7 +1323,7 @@ static int dump_device_info(struct ps3_repository_device *repo,
return result;
}
-int ps3_repository_dump_bus_info(void)
+int __init ps3_repository_dump_bus_info(void)
{
int result = 0;
struct ps3_repository_device repo;
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index b29368931c56..d7495785fe47 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -13,13 +13,13 @@
#include <linux/console.h>
#include <linux/export.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/time.h>
#include <asm/iommu.h>
#include <asm/udbg.h>
-#include <asm/prom.h>
#include <asm/lv1call.h>
#include <asm/ps3gpu.h>
@@ -36,6 +36,7 @@ DEFINE_MUTEX(ps3_gpu_mutex);
EXPORT_SYMBOL_GPL(ps3_gpu_mutex);
static union ps3_firmware_version ps3_firmware_version;
+static char ps3_firmware_version_str[16];
void ps3_get_firmware_version(union ps3_firmware_version *v)
{
@@ -138,7 +139,7 @@ static int __init early_parse_ps3fb(char *p)
if (!p)
return 1;
- ps3fb_videomemory.size = _ALIGN_UP(memparse(p, &p),
+ ps3fb_videomemory.size = ALIGN(memparse(p, &p),
ps3fb_videomemory.align);
return 0;
}
@@ -182,6 +183,40 @@ static int ps3_set_dabr(unsigned long dabr, unsigned long dabrx)
return lv1_set_dabr(dabr, dabrx) ? -1 : 0;
}
+static ssize_t ps3_fw_version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s", ps3_firmware_version_str);
+}
+
+static int __init ps3_setup_sysfs(void)
+{
+ static struct kobj_attribute attr = __ATTR(fw-version, S_IRUGO,
+ ps3_fw_version_show, NULL);
+ static struct kobject *kobj;
+ int result;
+
+ kobj = kobject_create_and_add("ps3", firmware_kobj);
+
+ if (!kobj) {
+ pr_warn("%s:%d: kobject_create_and_add failed.\n", __func__,
+ __LINE__);
+ return -ENOMEM;
+ }
+
+ result = sysfs_create_file(kobj, &attr.attr);
+
+ if (result) {
+ pr_warn("%s:%d: sysfs_create_file failed.\n", __func__,
+ __LINE__);
+ kobject_put(kobj);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+core_initcall(ps3_setup_sysfs);
+
static void __init ps3_setup_arch(void)
{
u64 tmp;
@@ -190,9 +225,11 @@ static void __init ps3_setup_arch(void)
lv1_get_version_info(&ps3_firmware_version.raw, &tmp);
- printk(KERN_INFO "PS3 firmware version %u.%u.%u\n",
- ps3_firmware_version.major, ps3_firmware_version.minor,
- ps3_firmware_version.rev);
+ snprintf(ps3_firmware_version_str, sizeof(ps3_firmware_version_str),
+ "%u.%u.%u", ps3_firmware_version.major,
+ ps3_firmware_version.minor, ps3_firmware_version.rev);
+
+ printk(KERN_INFO "PS3 firmware version %s\n", ps3_firmware_version_str);
ps3_spu_set_platform();
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 93b1e73b3529..85295756005a 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -112,7 +112,7 @@ static struct smp_ops_t ps3_smp_ops = {
.kick_cpu = smp_generic_kick_cpu,
};
-void smp_init_ps3(void)
+void __init smp_init_ps3(void)
{
DBG(" -> %s\n", __func__);
smp_ops = &ps3_smp_ops;
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 1193c294b8d0..4a2520ec6d7f 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -137,7 +137,7 @@ u64 ps3_get_spe_id(void *arg)
}
EXPORT_SYMBOL_GPL(ps3_get_spe_id);
-static unsigned long get_vas_id(void)
+static unsigned long __init get_vas_id(void)
{
u64 id;
@@ -448,7 +448,7 @@ static void ps3_disable_spu(struct spu_context *ctx)
ctx->ops->runcntl_stop(ctx);
}
-const struct spu_management_ops spu_management_ps3_ops = {
+static const struct spu_management_ops spu_management_ps3_ops = {
.enumerate_spus = ps3_enumerate_spus,
.create_spu = ps3_create_spu,
.destroy_spu = ps3_destroy_spu,
@@ -589,7 +589,7 @@ static u64 resource_allocation_enable_get(struct spu *spu)
return 0; /* No support. */
}
-const struct spu_priv1_ops spu_priv1_ps3_ops = {
+static const struct spu_priv1_ops spu_priv1_ps3_ops = {
.int_mask_and = int_mask_and,
.int_mask_or = int_mask_or,
.int_mask_set = int_mask_set,
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 3542b7bd6a46..2502e9b17df4 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/export.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -64,9 +64,10 @@ static int ps3_open_hv_device_sb(struct ps3_system_bus_device *dev)
result = lv1_open_device(dev->bus_id, dev->dev_id, 0);
if (result) {
- pr_debug("%s:%d: lv1_open_device failed: %s\n", __func__,
- __LINE__, ps3_result(result));
- result = -EPERM;
+ pr_warn("%s:%d: lv1_open_device dev=%u.%u(%s) failed: %s\n",
+ __func__, __LINE__, dev->match_id, dev->match_sub_id,
+ dev_name(&dev->core), ps3_result(result));
+ result = -EPERM;
}
done:
@@ -120,7 +121,7 @@ static int ps3_open_hv_device_gpu(struct ps3_system_bus_device *dev)
result = lv1_gpu_open(0);
if (result) {
- pr_debug("%s:%d: lv1_gpu_open failed: %s\n", __func__,
+ pr_warn("%s:%d: lv1_gpu_open failed: %s\n", __func__,
__LINE__, ps3_result(result));
result = -EPERM;
}
@@ -380,9 +381,8 @@ static int ps3_system_bus_probe(struct device *_dev)
return result;
}
-static int ps3_system_bus_remove(struct device *_dev)
+static void ps3_system_bus_remove(struct device *_dev)
{
- int result = 0;
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
struct ps3_system_bus_driver *drv;
@@ -393,13 +393,12 @@ static int ps3_system_bus_remove(struct device *_dev)
BUG_ON(!drv);
if (drv->remove)
- result = drv->remove(dev);
+ drv->remove(dev);
else
dev_dbg(&dev->core, "%s:%d %s: no remove method\n",
__func__, __LINE__, drv->core.name);
pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
- return result;
}
static void ps3_system_bus_shutdown(struct device *_dev)
@@ -602,9 +601,9 @@ static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
iopte_flag |= CBE_IOPTE_PP_W | CBE_IOPTE_SO_RW;
break;
default:
- /* not happned */
+ /* not happened */
BUG();
- };
+ }
result = ps3_dma_map(dev->d_region, (unsigned long)ptr, size,
&bus_addr, iopte_flag);
@@ -663,7 +662,7 @@ static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg,
unsigned long attrs)
{
BUG();
- return 0;
+ return -EINVAL;
}
static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
@@ -696,6 +695,8 @@ static const struct dma_map_ops ps3_sb_dma_ops = {
.unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
};
static const struct dma_map_ops ps3_ioc0_dma_ops = {
@@ -708,6 +709,8 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = {
.unmap_page = ps3_unmap_page,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
};
/**
@@ -759,7 +762,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
break;
default:
BUG();
- };
+ }
dev->core.of_node = NULL;
set_dev_node(&dev->core, 0);
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 24c18362e5ea..a3b4d99567cb 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -17,23 +17,36 @@ config PPC_PSERIES
select PPC_RTAS_DAEMON
select RTAS_ERROR_LOGGING
select PPC_UDBG_16550
- select PPC_NATIVE
select PPC_DOORBELL
select HOTPLUG_CPU
- select ARCH_RANDOM
select FORCE_SMP
select SWIOTLB
default y
+config PARAVIRT
+ bool
+
+config PARAVIRT_SPINLOCKS
+ bool
+
+config PARAVIRT_TIME_ACCOUNTING
+ select PARAVIRT
+ bool
+
config PPC_SPLPAR
- depends on PPC_PSERIES
bool "Support for shared-processor logical partitions"
+ depends on PPC_PSERIES
+ select PARAVIRT_SPINLOCKS if PPC_QUEUED_SPINLOCKS
+ select PARAVIRT_TIME_ACCOUNTING if VIRT_CPU_ACCOUNTING_GEN
+ default y
help
Enabling this option will make the kernel run more efficiently
on logically-partitioned pSeries systems which use shared
processors, that is, which share physical processors between
two or more partitions.
+ Say Y if you are unsure.
+
config DTL
bool "Dispatch Trace Log"
depends on PPC_SPLPAR && DEBUG_FS
@@ -54,10 +67,6 @@ config PSERIES_ENERGY
Provides: /sys/devices/system/cpu/pseries_(de)activation_hint_list
and /sys/devices/system/cpu/cpuN/pseries_(de)activation_hint
-config SCANLOG
- tristate "Scanlog dump interface"
- depends on RTAS_PROC && PPC_PSERIES
-
config IO_EVENT_IRQ
bool "IO Event Interrupt support"
depends on PPC_PSERIES
@@ -140,6 +149,19 @@ config IBMEBUS
help
Bus device driver for GX bus based adapters.
+config PSERIES_PLPKS
+ depends on PPC_PSERIES
+ bool "Support for the Platform Key Storage"
+ help
+ PowerVM provides an isolated Platform Keystore(PKS) storage
+ allocation for each LPAR with individually managed access
+ controls to store sensitive information securely. It can be
+ used to store asymmetric public keys or secrets as required
+ by different usecases. Select this config to enable
+ operating system interface to hypervisor to access this space.
+
+ If unsure, select N.
+
config PAPR_SCM
depends on PPC_PSERIES && MEMORY_HOTPLUG && LIBNVDIMM
tristate "Support for the PAPR Storage Class Memory interface"
@@ -152,6 +174,7 @@ config PPC_SVM
select SWIOTLB
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
+ select ARCH_HAS_CC_PLATFORM
help
There are certain POWER platforms which support secure guests using
the Protected Execution Facility, with the help of an Ultravisor
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index a3c74a5cf20d..92310202bdd7 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -6,9 +6,9 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
of_helpers.o \
setup.o iommu.o event_sources.o ras.o \
firmware.o power.o dlpar.o mobility.o rng.o \
- pci.o pci_dlpar.o eeh_pseries.o msi.o
+ pci.o pci_dlpar.o eeh_pseries.o msi.o \
+ papr_platform_attributes.o dtl.o
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o
obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o
@@ -19,7 +19,6 @@ obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
obj-$(CONFIG_CMM) += cmm.o
-obj-$(CONFIG_DTL) += dtl.o
obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
@@ -28,7 +27,13 @@ obj-$(CONFIG_PAPR_SCM) += papr_scm.o
obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_PPC_SVM) += svm.o
obj-$(CONFIG_FA_DUMP) += rtas-fadump.o
+obj-$(CONFIG_PSERIES_PLPKS) += plpks.o
-ifdef CONFIG_PPC_PSERIES
obj-$(CONFIG_SUSPEND) += suspend.o
-endif
+obj-$(CONFIG_PPC_VAS) += vas.o vas-sysfs.o
+
+obj-$(CONFIG_ARCH_HAS_CC_PLATFORM) += cc_platform.o
+
+# nothing that operates in real mode is safe for KASAN
+KASAN_SANITIZE_ras.o := n
+KASAN_SANITIZE_kexec.o := n
diff --git a/arch/powerpc/platforms/pseries/cc_platform.c b/arch/powerpc/platforms/pseries/cc_platform.c
new file mode 100644
index 000000000000..e8021af83a19
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/cc_platform.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Confidential Computing Platform Capability checks
+ *
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ */
+
+#include <linux/export.h>
+#include <linux/cc_platform.h>
+
+#include <asm/machdep.h>
+#include <asm/svm.h>
+
+bool cc_platform_has(enum cc_attr attr)
+{
+ switch (attr) {
+ case CC_ATTR_MEM_ENCRYPT:
+ return is_secure_guest();
+
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(cc_platform_has);
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 9dba7e880885..5f4037c1d7fe 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -19,14 +19,10 @@
#include <linux/stringify.h>
#include <linux/swap.h>
#include <linux/device.h>
-#include <linux/mount.h>
-#include <linux/pseudo_fs.h>
-#include <linux/magic.h>
#include <linux/balloon_compaction.h>
#include <asm/firmware.h>
#include <asm/hvcall.h>
#include <asm/mmu.h>
-#include <asm/pgalloc.h>
#include <linux/uaccess.h>
#include <linux/memory.h>
#include <asm/plpar_wrappers.h>
@@ -476,8 +472,6 @@ static struct notifier_block cmm_reboot_nb = {
static int cmm_memory_cb(struct notifier_block *self,
unsigned long action, void *arg)
{
- int ret = 0;
-
switch (action) {
case MEM_GOING_OFFLINE:
mutex_lock(&hotplug_mutex);
@@ -494,7 +488,7 @@ static int cmm_memory_cb(struct notifier_block *self,
break;
}
- return notifier_from_errno(ret);
+ return NOTIFY_OK;
}
static struct notifier_block cmm_mem_nb = {
@@ -503,19 +497,6 @@ static struct notifier_block cmm_mem_nb = {
};
#ifdef CONFIG_BALLOON_COMPACTION
-static struct vfsmount *balloon_mnt;
-
-static int cmm_init_fs_context(struct fs_context *fc)
-{
- return init_pseudo(fc, PPC_CMM_MAGIC) ? 0 : -ENOMEM;
-}
-
-static struct file_system_type balloon_fs = {
- .name = "ppc-cmm",
- .init_fs_context = cmm_init_fs_context,
- .kill_sb = kill_anon_super,
-};
-
static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
struct page *newpage, struct page *page,
enum migrate_mode mode)
@@ -567,47 +548,13 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
return MIGRATEPAGE_SUCCESS;
}
-static int cmm_balloon_compaction_init(void)
+static void cmm_balloon_compaction_init(void)
{
- int rc;
-
balloon_devinfo_init(&b_dev_info);
b_dev_info.migratepage = cmm_migratepage;
-
- balloon_mnt = kern_mount(&balloon_fs);
- if (IS_ERR(balloon_mnt)) {
- rc = PTR_ERR(balloon_mnt);
- balloon_mnt = NULL;
- return rc;
- }
-
- b_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
- if (IS_ERR(b_dev_info.inode)) {
- rc = PTR_ERR(b_dev_info.inode);
- b_dev_info.inode = NULL;
- kern_unmount(balloon_mnt);
- balloon_mnt = NULL;
- return rc;
- }
-
- b_dev_info.inode->i_mapping->a_ops = &balloon_aops;
- return 0;
-}
-static void cmm_balloon_compaction_deinit(void)
-{
- if (b_dev_info.inode)
- iput(b_dev_info.inode);
- b_dev_info.inode = NULL;
- kern_unmount(balloon_mnt);
- balloon_mnt = NULL;
}
#else /* CONFIG_BALLOON_COMPACTION */
-static int cmm_balloon_compaction_init(void)
-{
- return 0;
-}
-
-static void cmm_balloon_compaction_deinit(void)
+static void cmm_balloon_compaction_init(void)
{
}
#endif /* CONFIG_BALLOON_COMPACTION */
@@ -625,9 +572,7 @@ static int cmm_init(void)
if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
return -EOPNOTSUPP;
- rc = cmm_balloon_compaction_init();
- if (rc)
- return rc;
+ cmm_balloon_compaction_init();
rc = register_oom_notifier(&cmm_oom_nb);
if (rc < 0)
@@ -661,7 +606,6 @@ out_reboot_notifier:
out_oom_notifier:
unregister_oom_notifier(&cmm_oom_nb);
out_balloon_compaction:
- cmm_balloon_compaction_deinit();
return rc;
}
@@ -680,7 +624,6 @@ static void cmm_exit(void)
unregister_memory_notifier(&cmm_mem_nb);
cmm_free_pages(atomic_long_read(&loaned_pages));
cmm_unregister_sysfs(&cmm_dev);
- cmm_balloon_compaction_deinit();
}
/**
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 16e86ba8aa20..498d6efcb5ae 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -19,7 +19,6 @@
#include "of_helpers.h"
#include "pseries.h"
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/rtas.h>
@@ -127,7 +126,6 @@ void dlpar_free_cc_nodes(struct device_node *dn)
#define NEXT_PROPERTY 3
#define PREV_PARENT 4
#define MORE_MEMORY 5
-#define CALL_AGAIN -2
#define ERR_CFG_USE -9003
struct device_node *dlpar_configure_connector(__be32 drc_index,
@@ -168,6 +166,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
spin_unlock(&rtas_data_buf_lock);
+ if (rtas_busy_delay(rc))
+ continue;
+
switch (rc) {
case COMPLETE:
break;
@@ -216,9 +217,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
last_dn = last_dn->parent;
break;
- case CALL_AGAIN:
- break;
-
case MORE_MEMORY:
case ERR_CFG_USE:
default:
@@ -290,8 +288,7 @@ int dlpar_acquire_drc(u32 drc_index)
{
int dr_status, rc;
- rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
- DR_ENTITY_SENSE, drc_index);
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_UNUSABLE)
return -1;
@@ -312,8 +309,7 @@ int dlpar_release_drc(u32 drc_index)
{
int dr_status, rc;
- rc = rtas_call(rtas_token("get-sensor-state"), 2, 2, &dr_status,
- DR_ENTITY_SENSE, drc_index);
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
if (rc || dr_status != DR_ENTITY_PRESENT)
return -1;
@@ -330,6 +326,19 @@ int dlpar_release_drc(u32 drc_index)
return 0;
}
+int dlpar_unisolate_drc(u32 drc_index)
+{
+ int dr_status, rc;
+
+ rc = rtas_get_sensor(DR_ENTITY_SENSE, drc_index, &dr_status);
+ if (rc || dr_status != DR_ENTITY_PRESENT)
+ return -1;
+
+ rtas_set_indicator(ISOLATION_STATE, drc_index, UNISOLATE);
+
+ return 0;
+}
+
int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
{
int rc;
@@ -379,7 +388,7 @@ static void pseries_hp_work_fn(struct work_struct *work)
handle_dlpar_errorlog(hp_work->errlog);
kfree(hp_work->errlog);
- kfree((void *)work);
+ kfree(work);
}
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
@@ -521,11 +530,8 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
int rc;
args = argbuf = kstrdup(buf, GFP_KERNEL);
- if (!argbuf) {
- pr_info("Could not allocate resources for DLPAR operation\n");
- kfree(argbuf);
+ if (!argbuf)
return -ENOMEM;
- }
/*
* Parse out the request from the user, this will be in the form:
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index eab8aa293743..3f1cdccebc9c 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -11,12 +11,14 @@
#include <linux/spinlock.h>
#include <asm/smp.h>
#include <linux/uaccess.h>
+#include <linux/debugfs.h>
#include <asm/firmware.h>
+#include <asm/dtl.h>
#include <asm/lppaca.h>
-#include <asm/debugfs.h>
#include <asm/plpar_wrappers.h>
#include <asm/machdep.h>
+#ifdef CONFIG_DTL
struct dtl {
struct dtl_entry *buf;
int cpu;
@@ -36,6 +38,15 @@ static u8 dtl_event_mask = DTL_LOG_ALL;
static int dtl_buf_entries = N_DISPATCH_LOG;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+
+/*
+ * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
+ * reading from the dispatch trace log. If other code wants to consume
+ * DTL entries, it can set this pointer to a function that will get
+ * called once for each DTL entry that gets processed.
+ */
+static void (*dtl_consumer)(struct dtl_entry *entry, u64 index);
+
struct dtl_ring {
u64 write_index;
struct dtl_entry *write_ptr;
@@ -337,7 +348,7 @@ static int dtl_init(void)
/* set up common debugfs structure */
- dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
+ dtl_dir = debugfs_create_dir("dtl", arch_debugfs_dir);
debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask);
debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries);
@@ -354,3 +365,81 @@ static int dtl_init(void)
return 0;
}
machine_arch_initcall(pseries, dtl_init);
+#endif /* CONFIG_DTL */
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+/*
+ * Scan the dispatch trace log and count up the stolen time.
+ * Should be called with interrupts disabled.
+ */
+static notrace u64 scan_dispatch_log(u64 stop_tb)
+{
+ u64 i = local_paca->dtl_ridx;
+ struct dtl_entry *dtl = local_paca->dtl_curr;
+ struct dtl_entry *dtl_end = local_paca->dispatch_log_end;
+ struct lppaca *vpa = local_paca->lppaca_ptr;
+ u64 tb_delta;
+ u64 stolen = 0;
+ u64 dtb;
+
+ if (!dtl)
+ return 0;
+
+ if (i == be64_to_cpu(vpa->dtl_idx))
+ return 0;
+ while (i < be64_to_cpu(vpa->dtl_idx)) {
+ dtb = be64_to_cpu(dtl->timebase);
+ tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
+ be32_to_cpu(dtl->ready_to_enqueue_time);
+ barrier();
+ if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
+ /* buffer has overflowed */
+ i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
+ dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
+ continue;
+ }
+ if (dtb > stop_tb)
+ break;
+#ifdef CONFIG_DTL
+ if (dtl_consumer)
+ dtl_consumer(dtl, i);
+#endif
+ stolen += tb_delta;
+ ++i;
+ ++dtl;
+ if (dtl == dtl_end)
+ dtl = local_paca->dispatch_log;
+ }
+ local_paca->dtl_ridx = i;
+ local_paca->dtl_curr = dtl;
+ return stolen;
+}
+
+/*
+ * Accumulate stolen time by scanning the dispatch trace log.
+ * Called on entry from user mode.
+ */
+void notrace pseries_accumulate_stolen_time(void)
+{
+ u64 sst, ust;
+ struct cpu_accounting_data *acct = &local_paca->accounting;
+
+ sst = scan_dispatch_log(acct->starttime_user);
+ ust = scan_dispatch_log(acct->starttime);
+ acct->stime -= sst;
+ acct->utime -= ust;
+ acct->steal_time += ust + sst;
+}
+
+u64 pseries_calculate_stolen_time(u64 stop_tb)
+{
+ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+ return 0;
+
+ if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
+ return scan_dispatch_log(stop_tb);
+
+ return 0;
+}
+
+#endif
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 893ba3f562c4..8e40ccac0f44 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -24,6 +24,7 @@
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <linux/crash_dump.h>
#include <asm/eeh.h>
#include <asm/eeh_event.h>
@@ -42,7 +43,9 @@ static int ibm_get_config_addr_info;
static int ibm_get_config_addr_info2;
static int ibm_configure_pe;
-void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
+static void pseries_eeh_init_edev(struct pci_dn *pdn);
+
+static void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
{
struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -52,8 +55,6 @@ void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "EEH: Setting up device\n");
#ifdef CONFIG_PCI_IOV
if (pdev->is_virtfn) {
- struct pci_dn *physfn_pdn;
-
pdn->device_id = pdev->device;
pdn->vendor_id = pdev->vendor;
pdn->class_code = pdev->class;
@@ -63,95 +64,187 @@ void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
* completion from platform.
*/
pdn->last_allow_rc = 0;
- physfn_pdn = pci_get_pdn(pdev->physfn);
- pdn->pe_number = physfn_pdn->pe_num_map[pdn->vf_index];
}
#endif
- eeh_add_device_early(pdn);
- eeh_add_device_late(pdev);
+ pseries_eeh_init_edev(pdn);
#ifdef CONFIG_PCI_IOV
if (pdev->is_virtfn) {
+ /*
+ * FIXME: This really should be handled by choosing the right
+ * parent PE in pseries_eeh_init_edev().
+ */
+ struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe;
struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
- eeh_rmv_from_parent_pe(edev); /* Remove as it is adding to bus pe */
- eeh_add_to_parent_pe(edev); /* Add as VF PE type */
+ eeh_pe_tree_remove(edev); /* Remove as it is adding to bus pe */
+ eeh_pe_tree_insert(edev, physfn_pe); /* Add as VF PE type */
}
#endif
- eeh_sysfs_add_device(pdev);
+ eeh_probe_device(pdev);
}
-/*
- * Buffer for reporting slot-error-detail rtas calls. Its here
- * in BSS, and not dynamically alloced, so that it ends up in
- * RMO where RTAS can access it.
- */
-static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
-static DEFINE_SPINLOCK(slot_errbuf_lock);
-static int eeh_error_buf_size;
/**
- * pseries_eeh_init - EEH platform dependent initialization
+ * pseries_eeh_get_pe_config_addr - Find the pe_config_addr for a device
+ * @pdn: pci_dn of the input device
*
- * EEH platform dependent initialization on pseries.
+ * The EEH RTAS calls use a tuple consisting of: (buid_hi, buid_lo,
+ * pe_config_addr) as a handle to a given PE. This function finds the
+ * pe_config_addr based on the device's config addr.
+ *
+ * Keep in mind that the pe_config_addr *might* be numerically identical to the
+ * device's config addr, but the two are conceptually distinct.
+ *
+ * Returns the pe_config_addr, or a negative error code.
*/
-static int pseries_eeh_init(void)
+static int pseries_eeh_get_pe_config_addr(struct pci_dn *pdn)
{
- /* figure out EEH RTAS function call tokens */
- ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
- ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
- ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
- ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
- ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
- ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
- ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
- ibm_configure_pe = rtas_token("ibm,configure-pe");
+ int config_addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
+ struct pci_controller *phb = pdn->phb;
+ int ret, rets[3];
- /*
- * ibm,configure-pe and ibm,configure-bridge have the same semantics,
- * however ibm,configure-pe can be faster. If we can't find
- * ibm,configure-pe then fall back to using ibm,configure-bridge.
- */
- if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
- ibm_configure_pe = rtas_token("ibm,configure-bridge");
+ if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
+ /*
+ * First of all, use function 1 to determine if this device is
+ * part of a PE or not. ret[0] being zero indicates it's not.
+ */
+ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
+ config_addr, BUID_HI(phb->buid),
+ BUID_LO(phb->buid), 1);
+ if (ret || (rets[0] == 0))
+ return -ENOENT;
+
+ /* Retrieve the associated PE config address with function 0 */
+ ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
+ config_addr, BUID_HI(phb->buid),
+ BUID_LO(phb->buid), 0);
+ if (ret) {
+ pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
+ __func__, phb->global_number, config_addr);
+ return -ENXIO;
+ }
+
+ return rets[0];
+ }
+
+ if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
+ ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
+ config_addr, BUID_HI(phb->buid),
+ BUID_LO(phb->buid), 0);
+ if (ret) {
+ pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
+ __func__, phb->global_number, config_addr);
+ return -ENXIO;
+ }
+
+ return rets[0];
+ }
/*
- * Necessary sanity check. We needn't check "get-config-addr-info"
- * and its variant since the old firmware probably support address
- * of domain/bus/slot/function for EEH RTAS operations.
+ * PAPR does describe a process for finding the pe_config_addr that was
+ * used before the ibm,get-config-addr-info calls were added. However,
+ * I haven't found *any* systems that don't have that RTAS call
+ * implemented. If you happen to find one that needs the old DT based
+ * process, patches are welcome!
*/
- if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE ||
- ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE ||
- (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
- ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
- ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
- ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
- pr_info("EEH functionality not supported\n");
- return -EINVAL;
- }
+ return -ENOENT;
+}
- /* Initialize error log lock and size */
- spin_lock_init(&slot_errbuf_lock);
- eeh_error_buf_size = rtas_token("rtas-error-log-max");
- if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
- pr_info("%s: unknown EEH error log size\n",
- __func__);
- eeh_error_buf_size = 1024;
- } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
- pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
- __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
- eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
+/**
+ * pseries_eeh_phb_reset - Reset the specified PHB
+ * @phb: PCI controller
+ * @config_adddr: the associated config address
+ * @option: reset option
+ *
+ * Reset the specified PHB/PE
+ */
+static int pseries_eeh_phb_reset(struct pci_controller *phb, int config_addr, int option)
+{
+ int ret;
+
+ /* Reset PE through RTAS call */
+ ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
+ config_addr, BUID_HI(phb->buid),
+ BUID_LO(phb->buid), option);
+
+ /* If fundamental-reset not supported, try hot-reset */
+ if (option == EEH_RESET_FUNDAMENTAL && ret == -8) {
+ option = EEH_RESET_HOT;
+ ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
+ config_addr, BUID_HI(phb->buid),
+ BUID_LO(phb->buid), option);
}
- /* Set EEH probe mode */
- eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
+ /* We need reset hold or settlement delay */
+ if (option == EEH_RESET_FUNDAMENTAL || option == EEH_RESET_HOT)
+ msleep(EEH_PE_RST_HOLD_TIME);
+ else
+ msleep(EEH_PE_RST_SETTLE_TIME);
- /* Set EEH machine dependent code */
- ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device;
+ return ret;
+}
- return 0;
+/**
+ * pseries_eeh_phb_configure_bridge - Configure PCI bridges in the indicated PE
+ * @phb: PCI controller
+ * @config_adddr: the associated config address
+ *
+ * The function will be called to reconfigure the bridges included
+ * in the specified PE so that the mulfunctional PE would be recovered
+ * again.
+ */
+static int pseries_eeh_phb_configure_bridge(struct pci_controller *phb, int config_addr)
+{
+ int ret;
+ /* Waiting 0.2s maximum before skipping configuration */
+ int max_wait = 200;
+
+ while (max_wait > 0) {
+ ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
+ config_addr, BUID_HI(phb->buid),
+ BUID_LO(phb->buid));
+
+ if (!ret)
+ return ret;
+ if (ret < 0)
+ break;
+
+ /*
+ * If RTAS returns a delay value that's above 100ms, cut it
+ * down to 100ms in case firmware made a mistake. For more
+ * on how these delay values work see rtas_busy_delay_time
+ */
+ if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
+ ret <= RTAS_EXTENDED_DELAY_MAX)
+ ret = RTAS_EXTENDED_DELAY_MIN+2;
+
+ max_wait -= rtas_busy_delay_time(ret);
+
+ if (max_wait < 0)
+ break;
+
+ rtas_busy_delay(ret);
+ }
+
+ pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
+ __func__, phb->global_number, config_addr, ret);
+ /* PAPR defines -3 as "Parameter Error" for this function: */
+ if (ret == -3)
+ return -EINVAL;
+ else
+ return -EIO;
}
+/*
+ * Buffer for reporting slot-error-detail rtas calls. Its here
+ * in BSS, and not dynamically alloced, so that it ends up in
+ * RMO where RTAS can access it.
+ */
+static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
+static DEFINE_SPINLOCK(slot_errbuf_lock);
+static int eeh_error_buf_size;
+
static int pseries_eeh_cap_start(struct pci_dn *pdn)
{
u32 status;
@@ -222,34 +315,88 @@ static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
}
/**
- * pseries_eeh_probe - EEH probe on the given device
+ * pseries_eeh_pe_get_parent - Retrieve the parent PE
+ * @edev: EEH device
+ *
+ * The whole PEs existing in the system are organized as hierarchy
+ * tree. The function is used to retrieve the parent PE according
+ * to the parent EEH device.
+ */
+static struct eeh_pe *pseries_eeh_pe_get_parent(struct eeh_dev *edev)
+{
+ struct eeh_dev *parent;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
+
+ /*
+ * It might have the case for the indirect parent
+ * EEH device already having associated PE, but
+ * the direct parent EEH device doesn't have yet.
+ */
+ if (edev->physfn)
+ pdn = pci_get_pdn(edev->physfn);
+ else
+ pdn = pdn ? pdn->parent : NULL;
+ while (pdn) {
+ /* We're poking out of PCI territory */
+ parent = pdn_to_eeh_dev(pdn);
+ if (!parent)
+ return NULL;
+
+ if (parent->pe)
+ return parent->pe;
+
+ pdn = pdn->parent;
+ }
+
+ return NULL;
+}
+
+/**
+ * pseries_eeh_init_edev - initialise the eeh_dev and eeh_pe for a pci_dn
+ *
* @pdn: PCI device node
- * @data: Unused
*
- * When EEH module is installed during system boot, all PCI devices
- * are checked one by one to see if it supports EEH. The function
- * is introduced for the purpose.
+ * When we discover a new PCI device via the device-tree we create a
+ * corresponding pci_dn and we allocate, but don't initialise, an eeh_dev.
+ * This function takes care of the initialisation and inserts the eeh_dev
+ * into the correct eeh_pe. If no eeh_pe exists we'll allocate one.
*/
-static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
+static void pseries_eeh_init_edev(struct pci_dn *pdn)
{
+ struct eeh_pe pe, *parent;
struct eeh_dev *edev;
- struct eeh_pe pe;
u32 pcie_flags;
- int enable = 0;
int ret;
- /* Retrieve OF node and eeh device */
+ if (WARN_ON_ONCE(!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)))
+ return;
+
+ /*
+ * Find the eeh_dev for this pdn. The storage for the eeh_dev was
+ * allocated at the same time as the pci_dn.
+ *
+ * XXX: We should probably re-visit that.
+ */
edev = pdn_to_eeh_dev(pdn);
- if (!edev || edev->pe)
- return NULL;
+ if (!edev)
+ return;
+
+ /*
+ * If ->pe is set then we've already probed this device. We hit
+ * this path when a pci_dev is removed and rescanned while recovering
+ * a PE (i.e. for devices where the driver doesn't support error
+ * recovery).
+ */
+ if (edev->pe)
+ return;
/* Check class/vendor/device IDs */
if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
- return NULL;
+ return;
/* Skip for PCI-ISA bridge */
if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
- return NULL;
+ return;
eeh_edev_dbg(edev, "Probing device\n");
@@ -258,12 +405,11 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
* correctly reflects that current device is root port
* or PCIe switch downstream port.
*/
- edev->class_code = pdn->class_code;
edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
edev->mode &= 0xFFFFFF00;
- if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
edev->mode |= EEH_DEV_BRIDGE;
if (edev->pcie_cap) {
rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
@@ -276,50 +422,82 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
}
}
- /* Initialize the fake PE */
+ /* first up, find the pe_config_addr for the PE containing the device */
+ ret = pseries_eeh_get_pe_config_addr(pdn);
+ if (ret < 0) {
+ eeh_edev_dbg(edev, "Unable to find pe_config_addr\n");
+ goto err;
+ }
+
+ /* Try enable EEH on the fake PE */
memset(&pe, 0, sizeof(struct eeh_pe));
pe.phb = pdn->phb;
- pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
+ pe.addr = ret;
- /* Enable EEH on the device */
eeh_edev_dbg(edev, "Enabling EEH on device\n");
ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
if (ret) {
eeh_edev_dbg(edev, "EEH failed to enable on device (code %d)\n", ret);
- } else {
- /* Retrieve PE address */
- edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
- pe.addr = edev->pe_config_addr;
-
- /* Some older systems (Power4) allow the ibm,set-eeh-option
- * call to succeed even on nodes where EEH is not supported.
- * Verify support explicitly.
- */
- ret = eeh_ops->get_state(&pe, NULL);
- if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
- enable = 1;
-
- if (enable) {
- eeh_add_flag(EEH_ENABLED);
- eeh_add_to_parent_pe(edev);
- } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) &&
- (pdn_to_eeh_dev(pdn->parent))->pe) {
- /* This device doesn't support EEH, but it may have an
- * EEH parent, in which case we mark it as supported.
- */
- edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr;
- eeh_add_to_parent_pe(edev);
- }
- eeh_edev_dbg(edev, "EEH is %s on device (code %d)\n",
- (enable ? "enabled" : "unsupported"), ret);
+ goto err;
}
- /* Save memory bars */
+ edev->pe_config_addr = pe.addr;
+
+ eeh_add_flag(EEH_ENABLED);
+
+ parent = pseries_eeh_pe_get_parent(edev);
+ eeh_pe_tree_insert(edev, parent);
eeh_save_bars(edev);
+ eeh_edev_dbg(edev, "EEH enabled for device");
- return NULL;
+ return;
+
+err:
+ eeh_edev_dbg(edev, "EEH is unsupported on device (code = %d)\n", ret);
}
+static struct eeh_dev *pseries_eeh_probe(struct pci_dev *pdev)
+{
+ struct eeh_dev *edev;
+ struct pci_dn *pdn;
+
+ pdn = pci_get_pdn_by_devfn(pdev->bus, pdev->devfn);
+ if (!pdn)
+ return NULL;
+
+ /*
+ * If the system supports EEH on this device then the eeh_dev was
+ * configured and inserted into a PE in pseries_eeh_init_edev()
+ */
+ edev = pdn_to_eeh_dev(pdn);
+ if (!edev || !edev->pe)
+ return NULL;
+
+ return edev;
+}
+
+/**
+ * pseries_eeh_init_edev_recursive - Enable EEH for the indicated device
+ * @pdn: PCI device node
+ *
+ * This routine must be used to perform EEH initialization for the
+ * indicated PCI device that was added after system boot (e.g.
+ * hotplug, dlpar).
+ */
+void pseries_eeh_init_edev_recursive(struct pci_dn *pdn)
+{
+ struct pci_dn *n;
+
+ if (!pdn)
+ return;
+
+ list_for_each_entry(n, &pdn->child_list, list)
+ pseries_eeh_init_edev_recursive(n);
+
+ pseries_eeh_init_edev(pdn);
+}
+EXPORT_SYMBOL_GPL(pseries_eeh_init_edev_recursive);
+
/**
* pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
* @pe: EEH PE
@@ -332,10 +510,9 @@ static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
{
int ret = 0;
- int config_addr;
/*
- * When we're enabling or disabling EEH functioality on
+ * When we're enabling or disabling EEH functionality on
* the particular PE, the PE config address is possibly
* unavailable. Therefore, we have to figure it out from
* the FDT node.
@@ -345,86 +522,23 @@ static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
case EEH_OPT_ENABLE:
case EEH_OPT_THAW_MMIO:
case EEH_OPT_THAW_DMA:
- config_addr = pe->config_addr;
- if (pe->addr)
- config_addr = pe->addr;
break;
case EEH_OPT_FREEZE_PE:
/* Not support */
return 0;
default:
- pr_err("%s: Invalid option %d\n",
- __func__, option);
+ pr_err("%s: Invalid option %d\n", __func__, option);
return -EINVAL;
}
ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
- config_addr, BUID_HI(pe->phb->buid),
+ pe->addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid), option);
return ret;
}
/**
- * pseries_eeh_get_pe_addr - Retrieve PE address
- * @pe: EEH PE
- *
- * Retrieve the assocated PE address. Actually, there're 2 RTAS
- * function calls dedicated for the purpose. We need implement
- * it through the new function and then the old one. Besides,
- * you should make sure the config address is figured out from
- * FDT node before calling the function.
- *
- * It's notable that zero'ed return value means invalid PE config
- * address.
- */
-static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
-{
- int ret = 0;
- int rets[3];
-
- if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
- /*
- * First of all, we need to make sure there has one PE
- * associated with the device. Otherwise, PE address is
- * meaningless.
- */
- ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
- pe->config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid), 1);
- if (ret || (rets[0] == 0))
- return 0;
-
- /* Retrieve the associated PE config address */
- ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
- pe->config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid), 0);
- if (ret) {
- pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
- __func__, pe->phb->global_number, pe->config_addr);
- return 0;
- }
-
- return rets[0];
- }
-
- if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
- ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
- pe->config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid), 0);
- if (ret) {
- pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
- __func__, pe->phb->global_number, pe->config_addr);
- return 0;
- }
-
- return rets[0];
- }
-
- return ret;
-}
-
-/**
* pseries_eeh_get_state - Retrieve PE state
* @pe: EEH PE
* @delay: suggested time to wait if state is unavailable
@@ -439,25 +553,19 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
*/
static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
{
- int config_addr;
int ret;
int rets[4];
int result;
- /* Figure out PE config address if possible */
- config_addr = pe->config_addr;
- if (pe->addr)
- config_addr = pe->addr;
-
if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
- config_addr, BUID_HI(pe->phb->buid),
+ pe->addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid));
} else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
/* Fake PE unavailable info */
rets[2] = 0;
ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
- config_addr, BUID_HI(pe->phb->buid),
+ pe->addr, BUID_HI(pe->phb->buid),
BUID_LO(pe->phb->buid));
} else {
return EEH_STATE_NOT_SUPPORT;
@@ -511,36 +619,7 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
*/
static int pseries_eeh_reset(struct eeh_pe *pe, int option)
{
- int config_addr;
- int ret;
-
- /* Figure out PE address */
- config_addr = pe->config_addr;
- if (pe->addr)
- config_addr = pe->addr;
-
- /* Reset PE through RTAS call */
- ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
- config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid), option);
-
- /* If fundamental-reset not supported, try hot-reset */
- if (option == EEH_RESET_FUNDAMENTAL &&
- ret == -8) {
- option = EEH_RESET_HOT;
- ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
- config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid), option);
- }
-
- /* We need reset hold or settlement delay */
- if (option == EEH_RESET_FUNDAMENTAL ||
- option == EEH_RESET_HOT)
- msleep(EEH_PE_RST_HOLD_TIME);
- else
- msleep(EEH_PE_RST_SETTLE_TIME);
-
- return ret;
+ return pseries_eeh_phb_reset(pe->phb, pe->addr, option);
}
/**
@@ -556,19 +635,13 @@ static int pseries_eeh_reset(struct eeh_pe *pe, int option)
*/
static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
{
- int config_addr;
unsigned long flags;
int ret;
spin_lock_irqsave(&slot_errbuf_lock, flags);
memset(slot_errbuf, 0, eeh_error_buf_size);
- /* Figure out the PE address */
- config_addr = pe->config_addr;
- if (pe->addr)
- config_addr = pe->addr;
-
- ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
+ ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, pe->addr,
BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
virt_to_phys(drv_log), len,
virt_to_phys(slot_errbuf), eeh_error_buf_size,
@@ -584,107 +657,46 @@ static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, u
* pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
* @pe: EEH PE
*
- * The function will be called to reconfigure the bridges included
- * in the specified PE so that the mulfunctional PE would be recovered
- * again.
*/
static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
{
- int config_addr;
- int ret;
- /* Waiting 0.2s maximum before skipping configuration */
- int max_wait = 200;
-
- /* Figure out the PE address */
- config_addr = pe->config_addr;
- if (pe->addr)
- config_addr = pe->addr;
-
- while (max_wait > 0) {
- ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
- config_addr, BUID_HI(pe->phb->buid),
- BUID_LO(pe->phb->buid));
-
- if (!ret)
- return ret;
-
- /*
- * If RTAS returns a delay value that's above 100ms, cut it
- * down to 100ms in case firmware made a mistake. For more
- * on how these delay values work see rtas_busy_delay_time
- */
- if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
- ret <= RTAS_EXTENDED_DELAY_MAX)
- ret = RTAS_EXTENDED_DELAY_MIN+2;
-
- max_wait -= rtas_busy_delay_time(ret);
-
- if (max_wait < 0)
- break;
-
- rtas_busy_delay(ret);
- }
-
- pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
- __func__, pe->phb->global_number, pe->addr, ret);
- return ret;
+ return pseries_eeh_phb_configure_bridge(pe->phb, pe->addr);
}
/**
* pseries_eeh_read_config - Read PCI config space
- * @pdn: PCI device node
- * @where: PCI address
+ * @edev: EEH device handle
+ * @where: PCI config space offset
* @size: size to read
* @val: return value
*
* Read config space from the speicifed device
*/
-static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
+static int pseries_eeh_read_config(struct eeh_dev *edev, int where, int size, u32 *val)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
+
return rtas_read_config(pdn, where, size, val);
}
/**
* pseries_eeh_write_config - Write PCI config space
- * @pdn: PCI device node
- * @where: PCI address
+ * @edev: EEH device handle
+ * @where: PCI config space offset
* @size: size to write
* @val: value to be written
*
* Write config space to the specified device
*/
-static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val)
-{
- return rtas_write_config(pdn, where, size, val);
-}
-
-static int pseries_eeh_restore_config(struct pci_dn *pdn)
+static int pseries_eeh_write_config(struct eeh_dev *edev, int where, int size, u32 val)
{
- struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
- s64 ret = 0;
-
- if (!edev)
- return -EEXIST;
-
- /*
- * FIXME: The MPS, error routing rules, timeout setting are worthy
- * to be exported by firmware in extendible way.
- */
- if (edev->physfn)
- ret = eeh_restore_vf_config(pdn);
-
- if (ret) {
- pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
- __func__, edev->pe_config_addr, ret);
- return -EIO;
- }
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
- return ret;
+ return rtas_write_config(pdn, where, size, val);
}
#ifdef CONFIG_PCI_IOV
-int pseries_send_allow_unfreeze(struct pci_dn *pdn,
- u16 *vf_pe_array, int cur_vfs)
+static int pseries_send_allow_unfreeze(struct pci_dn *pdn, u16 *vf_pe_array, int cur_vfs)
{
int rc;
int ibm_allow_unfreeze = rtas_token("ibm,open-sriov-allow-unfreeze");
@@ -709,8 +721,8 @@ int pseries_send_allow_unfreeze(struct pci_dn *pdn,
static int pseries_call_allow_unfreeze(struct eeh_dev *edev)
{
+ int cur_vfs = 0, rc = 0, vf_index, bus, devfn, vf_pe_num;
struct pci_dn *pdn, *tmp, *parent, *physfn_pdn;
- int cur_vfs = 0, rc = 0, vf_index, bus, devfn;
u16 *vf_pe_array;
vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
@@ -743,8 +755,10 @@ static int pseries_call_allow_unfreeze(struct eeh_dev *edev)
}
} else {
pdn = pci_get_pdn(edev->pdev);
- vf_pe_array[0] = cpu_to_be16(pdn->pe_number);
physfn_pdn = pci_get_pdn(edev->physfn);
+
+ vf_pe_num = physfn_pdn->pe_num_map[edev->vf_index];
+ vf_pe_array[0] = cpu_to_be16(vf_pe_num);
rc = pseries_send_allow_unfreeze(physfn_pdn,
vf_pe_array, 1);
pdn->last_allow_rc = rc;
@@ -755,15 +769,12 @@ static int pseries_call_allow_unfreeze(struct eeh_dev *edev)
return rc;
}
-static int pseries_notify_resume(struct pci_dn *pdn)
+static int pseries_notify_resume(struct eeh_dev *edev)
{
- struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
-
if (!edev)
return -EEXIST;
- if (rtas_token("ibm,open-sriov-allow-unfreeze")
- == RTAS_UNKNOWN_SERVICE)
+ if (rtas_token("ibm,open-sriov-allow-unfreeze") == RTAS_UNKNOWN_SERVICE)
return -EINVAL;
if (edev->pdev->is_physfn || edev->pdev->is_virtfn)
@@ -775,10 +786,8 @@ static int pseries_notify_resume(struct pci_dn *pdn)
static struct eeh_ops pseries_eeh_ops = {
.name = "pseries",
- .init = pseries_eeh_init,
.probe = pseries_eeh_probe,
.set_option = pseries_eeh_set_option,
- .get_pe_addr = pseries_eeh_get_pe_addr,
.get_state = pseries_eeh_get_state,
.reset = pseries_eeh_reset,
.get_log = pseries_eeh_get_log,
@@ -787,7 +796,7 @@ static struct eeh_ops pseries_eeh_ops = {
.read_config = pseries_eeh_read_config,
.write_config = pseries_eeh_write_config,
.next_error = NULL,
- .restore_config = pseries_eeh_restore_config,
+ .restore_config = NULL, /* NB: configure_bridge() does this */
#ifdef CONFIG_PCI_IOV
.notify_resume = pseries_notify_resume
#endif
@@ -801,15 +810,87 @@ static struct eeh_ops pseries_eeh_ops = {
*/
static int __init eeh_pseries_init(void)
{
- int ret;
+ struct pci_controller *phb;
+ struct pci_dn *pdn;
+ int ret, config_addr;
- ret = eeh_ops_register(&pseries_eeh_ops);
+ /* figure out EEH RTAS function call tokens */
+ ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
+ ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
+ ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
+ ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
+ ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
+ ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
+ ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
+ ibm_configure_pe = rtas_token("ibm,configure-pe");
+
+ /*
+ * ibm,configure-pe and ibm,configure-bridge have the same semantics,
+ * however ibm,configure-pe can be faster. If we can't find
+ * ibm,configure-pe then fall back to using ibm,configure-bridge.
+ */
+ if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
+ ibm_configure_pe = rtas_token("ibm,configure-bridge");
+
+ /*
+ * Necessary sanity check. We needn't check "get-config-addr-info"
+ * and its variant since the old firmware probably support address
+ * of domain/bus/slot/function for EEH RTAS operations.
+ */
+ if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE ||
+ ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE ||
+ (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
+ ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
+ ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
+ ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
+ pr_info("EEH functionality not supported\n");
+ return -EINVAL;
+ }
+
+ /* Initialize error log size */
+ eeh_error_buf_size = rtas_token("rtas-error-log-max");
+ if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
+ pr_info("%s: unknown EEH error log size\n",
+ __func__);
+ eeh_error_buf_size = 1024;
+ } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
+ pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
+ __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
+ eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
+ }
+
+ /* Set EEH probe mode */
+ eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
+
+ /* Set EEH machine dependent code */
+ ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device;
+
+ if (is_kdump_kernel() || reset_devices) {
+ pr_info("Issue PHB reset ...\n");
+ list_for_each_entry(phb, &hose_list, list_node) {
+ // Skip if the slot is empty
+ if (list_empty(&PCI_DN(phb->dn)->child_list))
+ continue;
+
+ pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
+ config_addr = pseries_eeh_get_pe_config_addr(pdn);
+
+ /* invalid PE config addr */
+ if (config_addr < 0)
+ continue;
+
+ pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_FUNDAMENTAL);
+ pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_DEACTIVATE);
+ pseries_eeh_phb_configure_bridge(phb, config_addr);
+ }
+ }
+
+ ret = eeh_init(&pseries_eeh_ops);
if (!ret)
pr_info("EEH: pSeries platform initialized\n");
else
pr_info("EEH: pSeries platform initialization failure (%d)\n",
ret);
-
return ret;
}
-machine_early_initcall(pseries, eeh_pseries_init);
+machine_arch_initcall(pseries, eeh_pseries_init);
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index be661e919c76..623dfe0d8e1c 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -8,7 +8,7 @@
#include "pseries.h"
-void request_event_sources_irqs(struct device_node *np,
+void __init request_event_sources_irqs(struct device_node *np,
irq_handler_t handler,
const char *name)
{
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index 3e49cc23a97a..080108d129ed 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -65,6 +65,9 @@ hypertas_fw_features_table[] = {
{FW_FEATURE_HPT_RESIZE, "hcall-hpt-resize"},
{FW_FEATURE_BLOCK_REMOVE, "hcall-block-remove"},
{FW_FEATURE_PAPR_SCM, "hcall-scm"},
+ {FW_FEATURE_RPT_INVALIDATE, "hcall-rpt-invalidate"},
+ {FW_FEATURE_ENERGY_SCALE_INFO, "hcall-energy-scale-info"},
+ {FW_FEATURE_WATCHDOG, "hcall-watchdog"},
};
/* Build up the firmware features bitmask using the contents of
@@ -118,10 +121,11 @@ struct vec5_fw_feature {
static __initdata struct vec5_fw_feature
vec5_fw_features_table[] = {
- {FW_FEATURE_TYPE1_AFFINITY, OV5_TYPE1_AFFINITY},
+ {FW_FEATURE_FORM1_AFFINITY, OV5_FORM1_AFFINITY},
{FW_FEATURE_PRRN, OV5_PRRN},
{FW_FEATURE_DRMEM_V2, OV5_DRMEM_V2},
{FW_FEATURE_DRC_INFO, OV5_DRC_INFO},
+ {FW_FEATURE_FORM2_AFFINITY, OV5_FORM2_AFFINITY},
};
static void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 3e8cbfe7a80f..e0a7ac5db15d 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -35,53 +35,15 @@
#include <asm/topology.h>
#include "pseries.h"
-#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
-static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
- CPU_STATE_OFFLINE;
-static DEFINE_PER_CPU(enum cpu_state_vals, current_state) = CPU_STATE_OFFLINE;
-
-static enum cpu_state_vals default_offline_state = CPU_STATE_OFFLINE;
-
-static bool cede_offline_enabled __read_mostly = true;
-
/*
- * Enable/disable cede_offline when available.
+ * Record the CPU ids used on each nodes.
+ * Protected by cpu_add_remove_lock.
*/
-static int __init setup_cede_offline(char *str)
-{
- return (kstrtobool(str, &cede_offline_enabled) == 0);
-}
-
-__setup("cede_offline=", setup_cede_offline);
-
-enum cpu_state_vals get_cpu_current_state(int cpu)
-{
- return per_cpu(current_state, cpu);
-}
-
-void set_cpu_current_state(int cpu, enum cpu_state_vals state)
-{
- per_cpu(current_state, cpu) = state;
-}
-
-enum cpu_state_vals get_preferred_offline_state(int cpu)
-{
- return per_cpu(preferred_offline_state, cpu);
-}
-
-void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
-{
- per_cpu(preferred_offline_state, cpu) = state;
-}
-
-void set_default_offline_state(int cpu)
-{
- per_cpu(preferred_offline_state, cpu) = default_offline_state;
-}
+static cpumask_var_t node_recorded_ids_map[MAX_NUMNODES];
static void rtas_stop_self(void)
{
@@ -91,19 +53,14 @@ static void rtas_stop_self(void)
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
- printk("cpu %u (hwid %u) Ready to die...\n",
- smp_processor_id(), hard_smp_processor_id());
-
rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
panic("Alas, I survived.\n");
}
-static void pseries_mach_cpu_die(void)
+static void pseries_cpu_offline_self(void)
{
- unsigned int cpu = smp_processor_id();
unsigned int hwcpu = hard_smp_processor_id();
- u8 cede_latency_hint = 0;
local_irq_disable();
idle_task_exit();
@@ -112,49 +69,6 @@ static void pseries_mach_cpu_die(void)
else
xics_teardown_cpu();
- if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
- set_cpu_current_state(cpu, CPU_STATE_INACTIVE);
- if (ppc_md.suspend_disable_cpu)
- ppc_md.suspend_disable_cpu();
-
- cede_latency_hint = 2;
-
- get_lppaca()->idle = 1;
- if (!lppaca_shared_proc(get_lppaca()))
- get_lppaca()->donate_dedicated_cpu = 1;
-
- while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
- while (!prep_irq_for_idle()) {
- local_irq_enable();
- local_irq_disable();
- }
-
- extended_cede_processor(cede_latency_hint);
- }
-
- local_irq_disable();
-
- if (!lppaca_shared_proc(get_lppaca()))
- get_lppaca()->donate_dedicated_cpu = 0;
- get_lppaca()->idle = 0;
-
- if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) {
- unregister_slb_shadow(hwcpu);
-
- hard_irq_disable();
- /*
- * Call to start_secondary_resume() will not return.
- * Kernel stack will be reset and start_secondary()
- * will be called to continue the online operation.
- */
- start_secondary_resume();
- }
- }
-
- /* Requested state is CPU_STATE_OFFLINE at this point */
- WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE);
-
- set_cpu_current_state(cpu, CPU_STATE_OFFLINE);
unregister_slb_shadow(hwcpu);
rtas_stop_self();
@@ -179,6 +93,9 @@ static int pseries_cpu_disable(void)
xive_smp_disable_cpu();
else
xics_migrate_irqs_away();
+
+ cleanup_cpu_mmu_context();
+
return 0;
}
@@ -191,114 +108,180 @@ static int pseries_cpu_disable(void)
* to self-destroy so that the cpu-offline thread can send the CPU_DEAD
* notifications.
*
- * OTOH, pseries_mach_cpu_die() is called by the @cpu when it wants to
+ * OTOH, pseries_cpu_offline_self() is called by the @cpu when it wants to
* self-destruct.
*/
static void pseries_cpu_die(unsigned int cpu)
{
- int tries;
int cpu_status = 1;
unsigned int pcpu = get_hard_smp_processor_id(cpu);
+ unsigned long timeout = jiffies + msecs_to_jiffies(120000);
- if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
- cpu_status = 1;
- for (tries = 0; tries < 5000; tries++) {
- if (get_cpu_current_state(cpu) == CPU_STATE_INACTIVE) {
- cpu_status = 0;
- break;
- }
- msleep(1);
- }
- } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
+ while (true) {
+ cpu_status = smp_query_cpu_stopped(pcpu);
+ if (cpu_status == QCSS_STOPPED ||
+ cpu_status == QCSS_HARDWARE_ERROR)
+ break;
- for (tries = 0; tries < 25; tries++) {
- cpu_status = smp_query_cpu_stopped(pcpu);
- if (cpu_status == QCSS_STOPPED ||
- cpu_status == QCSS_HARDWARE_ERROR)
- break;
- cpu_relax();
+ if (time_after(jiffies, timeout)) {
+ pr_warn("CPU %i (hwid %i) didn't die after 120 seconds\n",
+ cpu, pcpu);
+ timeout = jiffies + msecs_to_jiffies(120000);
}
+
+ cond_resched();
}
- if (cpu_status != 0) {
- printk("Querying DEAD? cpu %i (%i) shows %i\n",
- cpu, pcpu, cpu_status);
+ if (cpu_status == QCSS_HARDWARE_ERROR) {
+ pr_warn("CPU %i (hwid %i) reported error while dying\n",
+ cpu, pcpu);
}
- /* Isolation and deallocation are definitely done by
- * drslot_chrp_cpu. If they were not they would be
- * done here. Change isolate state to Isolate and
- * change allocation-state to Unusable.
- */
paca_ptrs[cpu]->cpu_start = 0;
}
+/**
+ * find_cpu_id_range - found a linear ranger of @nthreads free CPU ids.
+ * @nthreads : the number of threads (cpu ids)
+ * @assigned_node : the node it belongs to or NUMA_NO_NODE if free ids from any
+ * node can be peek.
+ * @cpu_mask: the returned CPU mask.
+ *
+ * Returns 0 on success.
+ */
+static int find_cpu_id_range(unsigned int nthreads, int assigned_node,
+ cpumask_var_t *cpu_mask)
+{
+ cpumask_var_t candidate_mask;
+ unsigned int cpu, node;
+ int rc = -ENOSPC;
+
+ if (!zalloc_cpumask_var(&candidate_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_clear(*cpu_mask);
+ for (cpu = 0; cpu < nthreads; cpu++)
+ cpumask_set_cpu(cpu, *cpu_mask);
+
+ BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
+
+ /* Get a bitmap of unoccupied slots. */
+ cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
+
+ if (assigned_node != NUMA_NO_NODE) {
+ /*
+ * Remove free ids previously assigned on the other nodes. We
+ * can walk only online nodes because once a node became online
+ * it is not turned offlined back.
+ */
+ for_each_online_node(node) {
+ if (node == assigned_node)
+ continue;
+ cpumask_andnot(candidate_mask, candidate_mask,
+ node_recorded_ids_map[node]);
+ }
+ }
+
+ if (cpumask_empty(candidate_mask))
+ goto out;
+
+ while (!cpumask_empty(*cpu_mask)) {
+ if (cpumask_subset(*cpu_mask, candidate_mask))
+ /* Found a range where we can insert the new cpu(s) */
+ break;
+ cpumask_shift_left(*cpu_mask, *cpu_mask, nthreads);
+ }
+
+ if (!cpumask_empty(*cpu_mask))
+ rc = 0;
+
+out:
+ free_cpumask_var(candidate_mask);
+ return rc;
+}
+
/*
* Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
- * here is that a cpu device node may represent up to two logical cpus
+ * here is that a cpu device node may represent multiple logical cpus
* in the SMT case. We must honor the assumption in other code that
* the logical ids for sibling SMT threads x and y are adjacent, such
* that x^1 == y and y^1 == x.
*/
static int pseries_add_processor(struct device_node *np)
{
- unsigned int cpu;
- cpumask_var_t candidate_mask, tmp;
- int err = -ENOSPC, len, nthreads, i;
+ int len, nthreads, node, cpu, assigned_node;
+ int rc = 0;
+ cpumask_var_t cpu_mask;
const __be32 *intserv;
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
if (!intserv)
return 0;
- zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
- zalloc_cpumask_var(&tmp, GFP_KERNEL);
-
nthreads = len / sizeof(u32);
- for (i = 0; i < nthreads; i++)
- cpumask_set_cpu(i, tmp);
- cpu_maps_update_begin();
+ if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
- BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
+ /*
+ * Fetch from the DT nodes read by dlpar_configure_connector() the NUMA
+ * node id the added CPU belongs to.
+ */
+ node = of_node_to_nid(np);
+ if (node < 0 || !node_possible(node))
+ node = first_online_node;
- /* Get a bitmap of unoccupied slots. */
- cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
- if (cpumask_empty(candidate_mask)) {
- /* If we get here, it most likely means that NR_CPUS is
- * less than the partition's max processors setting.
+ BUG_ON(node == NUMA_NO_NODE);
+ assigned_node = node;
+
+ cpu_maps_update_begin();
+
+ rc = find_cpu_id_range(nthreads, node, &cpu_mask);
+ if (rc && nr_node_ids > 1) {
+ /*
+ * Try again, considering the free CPU ids from the other node.
*/
- printk(KERN_ERR "Cannot add cpu %pOF; this system configuration"
- " supports %d logical cpus.\n", np,
- num_possible_cpus());
- goto out_unlock;
+ node = NUMA_NO_NODE;
+ rc = find_cpu_id_range(nthreads, NUMA_NO_NODE, &cpu_mask);
}
- while (!cpumask_empty(tmp))
- if (cpumask_subset(tmp, candidate_mask))
- /* Found a range where we can insert the new cpu(s) */
- break;
- else
- cpumask_shift_left(tmp, tmp, nthreads);
-
- if (cpumask_empty(tmp)) {
- printk(KERN_ERR "Unable to find space in cpu_present_mask for"
- " processor %pOFn with %d thread(s)\n", np,
- nthreads);
- goto out_unlock;
+ if (rc) {
+ pr_err("Cannot add cpu %pOF; this system configuration"
+ " supports %d logical cpus.\n", np, num_possible_cpus());
+ goto out;
}
- for_each_cpu(cpu, tmp) {
+ for_each_cpu(cpu, cpu_mask) {
BUG_ON(cpu_present(cpu));
set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
}
- err = 0;
-out_unlock:
+
+ /* Record the newly used CPU ids for the associate node. */
+ cpumask_or(node_recorded_ids_map[assigned_node],
+ node_recorded_ids_map[assigned_node], cpu_mask);
+
+ /*
+ * If node is set to NUMA_NO_NODE, CPU ids have be reused from
+ * another node, remove them from its mask.
+ */
+ if (node == NUMA_NO_NODE) {
+ cpu = cpumask_first(cpu_mask);
+ pr_warn("Reusing free CPU ids %d-%d from another node\n",
+ cpu, cpu + nthreads - 1);
+ for_each_online_node(node) {
+ if (node == assigned_node)
+ continue;
+ cpumask_andnot(node_recorded_ids_map[node],
+ node_recorded_ids_map[node],
+ cpu_mask);
+ }
+ }
+
+out:
cpu_maps_update_done();
- free_cpumask_var(candidate_mask);
- free_cpumask_var(tmp);
- return err;
+ free_cpumask_var(cpu_mask);
+ return rc;
}
/*
@@ -359,28 +342,27 @@ static int dlpar_offline_cpu(struct device_node *dn)
if (get_hard_smp_processor_id(cpu) != thread)
continue;
- if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
+ if (!cpu_online(cpu))
break;
- if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
- set_preferred_offline_state(cpu,
- CPU_STATE_OFFLINE);
- cpu_maps_update_done();
- timed_topology_update(1);
- rc = device_offline(get_cpu_device(cpu));
- if (rc)
- goto out;
- cpu_maps_update_begin();
- break;
- }
-
/*
- * The cpu is in CPU_STATE_INACTIVE.
- * Upgrade it's state to CPU_STATE_OFFLINE.
+ * device_offline() will return -EBUSY (via cpu_down()) if there
+ * is only one CPU left. Check it here to fail earlier and with a
+ * more informative error message, while also retaining the
+ * cpu_add_remove_lock to be sure that no CPUs are being
+ * online/offlined during this check.
*/
- set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
- WARN_ON(plpar_hcall_norets(H_PROD, thread) != H_SUCCESS);
- __cpu_die(cpu);
+ if (num_online_cpus() == 1) {
+ pr_warn("Unable to remove last online CPU %pOFn\n", dn);
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
+ cpu_maps_update_done();
+ rc = device_offline(get_cpu_device(cpu));
+ if (rc)
+ goto out;
+ cpu_maps_update_begin();
break;
}
if (cpu == num_possible_cpus()) {
@@ -388,6 +370,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
thread);
}
}
+out_unlock:
cpu_maps_update_done();
out:
@@ -414,11 +397,8 @@ static int dlpar_online_cpu(struct device_node *dn)
for_each_present_cpu(cpu) {
if (get_hard_smp_processor_id(cpu) != thread)
continue;
- BUG_ON(get_cpu_current_state(cpu)
- != CPU_STATE_OFFLINE);
cpu_maps_update_done();
- timed_topology_update(1);
- find_and_online_cpu_nid(cpu);
+ find_and_update_cpu_nid(cpu);
rc = device_online(get_cpu_device(cpu));
if (rc) {
dlpar_offline_cpu(dn);
@@ -536,6 +516,27 @@ static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
return found;
}
+static int pseries_cpuhp_attach_nodes(struct device_node *dn)
+{
+ struct of_changeset cs;
+ int ret;
+
+ /*
+ * This device node is unattached but may have siblings; open-code the
+ * traversal.
+ */
+ for (of_changeset_init(&cs); dn != NULL; dn = dn->sibling) {
+ ret = of_changeset_attach_node(&cs, dn);
+ if (ret)
+ goto out;
+ }
+
+ ret = of_changeset_apply(&cs);
+out:
+ of_changeset_destroy(&cs);
+ return ret;
+}
+
static ssize_t dlpar_cpu_add(u32 drc_index)
{
struct device_node *dn, *parent;
@@ -578,7 +579,7 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
return -EINVAL;
}
- rc = dlpar_attach_node(dn, parent);
+ rc = pseries_cpuhp_attach_nodes(dn);
/* Regardless we are done with parent now */
of_node_put(parent);
@@ -595,6 +596,8 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
return saved_rc;
}
+ update_numa_distance(dn);
+
rc = dlpar_online_cpu(dn);
if (rc) {
saved_rc = rc;
@@ -613,6 +616,60 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
return rc;
}
+static unsigned int pseries_cpuhp_cache_use_count(const struct device_node *cachedn)
+{
+ unsigned int use_count = 0;
+ struct device_node *dn, *tn;
+
+ WARN_ON(!of_node_is_type(cachedn, "cache"));
+
+ for_each_of_cpu_node(dn) {
+ tn = of_find_next_cache_node(dn);
+ of_node_put(tn);
+ if (tn == cachedn)
+ use_count++;
+ }
+
+ for_each_node_by_type(dn, "cache") {
+ tn = of_find_next_cache_node(dn);
+ of_node_put(tn);
+ if (tn == cachedn)
+ use_count++;
+ }
+
+ return use_count;
+}
+
+static int pseries_cpuhp_detach_nodes(struct device_node *cpudn)
+{
+ struct device_node *dn;
+ struct of_changeset cs;
+ int ret = 0;
+
+ of_changeset_init(&cs);
+ ret = of_changeset_detach_node(&cs, cpudn);
+ if (ret)
+ goto out;
+
+ dn = cpudn;
+ while ((dn = of_find_next_cache_node(dn))) {
+ if (pseries_cpuhp_cache_use_count(dn) > 1) {
+ of_node_put(dn);
+ break;
+ }
+
+ ret = of_changeset_detach_node(&cs, dn);
+ of_node_put(dn);
+ if (ret)
+ goto out;
+ }
+
+ ret = of_changeset_apply(&cs);
+out:
+ of_changeset_destroy(&cs);
+ return ret;
+}
+
static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
{
int rc;
@@ -634,7 +691,7 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
return rc;
}
- rc = dlpar_detach_node(dn);
+ rc = pseries_cpuhp_detach_nodes(dn);
if (rc) {
int saved_rc = rc;
@@ -686,258 +743,32 @@ static int dlpar_cpu_remove_by_index(u32 drc_index)
return rc;
}
-static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove)
-{
- struct device_node *dn;
- int cpus_found = 0;
- int rc;
-
- /* We want to find cpus_to_remove + 1 CPUs to ensure we do not
- * remove the last CPU.
- */
- for_each_node_by_type(dn, "cpu") {
- cpus_found++;
-
- if (cpus_found > cpus_to_remove) {
- of_node_put(dn);
- break;
- }
-
- /* Note that cpus_found is always 1 ahead of the index
- * into the cpu_drcs array, so we use cpus_found - 1
- */
- rc = of_property_read_u32(dn, "ibm,my-drc-index",
- &cpu_drcs[cpus_found - 1]);
- if (rc) {
- pr_warn("Error occurred getting drc-index for %pOFn\n",
- dn);
- of_node_put(dn);
- return -1;
- }
- }
-
- if (cpus_found < cpus_to_remove) {
- pr_warn("Failed to find enough CPUs (%d of %d) to remove\n",
- cpus_found, cpus_to_remove);
- } else if (cpus_found == cpus_to_remove) {
- pr_warn("Cannot remove all CPUs\n");
- }
-
- return cpus_found;
-}
-
-static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
-{
- u32 *cpu_drcs;
- int cpus_found;
- int cpus_removed = 0;
- int i, rc;
-
- pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove);
-
- cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL);
- if (!cpu_drcs)
- return -EINVAL;
-
- cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove);
- if (cpus_found <= cpus_to_remove) {
- kfree(cpu_drcs);
- return -EINVAL;
- }
-
- for (i = 0; i < cpus_to_remove; i++) {
- rc = dlpar_cpu_remove_by_index(cpu_drcs[i]);
- if (rc)
- break;
-
- cpus_removed++;
- }
-
- if (cpus_removed != cpus_to_remove) {
- pr_warn("CPU hot-remove failed, adding back removed CPUs\n");
-
- for (i = 0; i < cpus_removed; i++)
- dlpar_cpu_add(cpu_drcs[i]);
-
- rc = -EINVAL;
- } else {
- rc = 0;
- }
-
- kfree(cpu_drcs);
- return rc;
-}
-
-static int find_drc_info_cpus_to_add(struct device_node *cpus,
- struct property *info,
- u32 *cpu_drcs, u32 cpus_to_add)
-{
- struct of_drc_info drc;
- const __be32 *value;
- u32 count, drc_index;
- int cpus_found = 0;
- int i, j;
-
- if (!info)
- return -1;
-
- value = of_prop_next_u32(info, NULL, &count);
- if (value)
- value++;
-
- for (i = 0; i < count; i++) {
- of_read_drc_info_cell(&info, &value, &drc);
- if (strncmp(drc.drc_type, "CPU", 3))
- break;
-
- drc_index = drc.drc_index_start;
- for (j = 0; j < drc.num_sequential_elems; j++) {
- if (dlpar_cpu_exists(cpus, drc_index))
- continue;
-
- cpu_drcs[cpus_found++] = drc_index;
-
- if (cpus_found == cpus_to_add)
- return cpus_found;
-
- drc_index += drc.sequential_inc;
- }
- }
-
- return cpus_found;
-}
-
-static int find_drc_index_cpus_to_add(struct device_node *cpus,
- u32 *cpu_drcs, u32 cpus_to_add)
-{
- int cpus_found = 0;
- int index, rc;
- u32 drc_index;
-
- /* Search the ibm,drc-indexes array for possible CPU drcs to
- * add. Note that the format of the ibm,drc-indexes array is
- * the number of entries in the array followed by the array
- * of drc values so we start looking at index = 1.
- */
- index = 1;
- while (cpus_found < cpus_to_add) {
- rc = of_property_read_u32_index(cpus, "ibm,drc-indexes",
- index++, &drc_index);
-
- if (rc)
- break;
-
- if (dlpar_cpu_exists(cpus, drc_index))
- continue;
-
- cpu_drcs[cpus_found++] = drc_index;
- }
-
- return cpus_found;
-}
-
-static int dlpar_cpu_add_by_count(u32 cpus_to_add)
-{
- struct device_node *parent;
- struct property *info;
- u32 *cpu_drcs;
- int cpus_added = 0;
- int cpus_found;
- int i, rc;
-
- pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add);
-
- cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL);
- if (!cpu_drcs)
- return -EINVAL;
-
- parent = of_find_node_by_path("/cpus");
- if (!parent) {
- pr_warn("Could not find CPU root node in device tree\n");
- kfree(cpu_drcs);
- return -1;
- }
-
- info = of_find_property(parent, "ibm,drc-info", NULL);
- if (info)
- cpus_found = find_drc_info_cpus_to_add(parent, info, cpu_drcs, cpus_to_add);
- else
- cpus_found = find_drc_index_cpus_to_add(parent, cpu_drcs, cpus_to_add);
-
- of_node_put(parent);
-
- if (cpus_found < cpus_to_add) {
- pr_warn("Failed to find enough CPUs (%d of %d) to add\n",
- cpus_found, cpus_to_add);
- kfree(cpu_drcs);
- return -EINVAL;
- }
-
- for (i = 0; i < cpus_to_add; i++) {
- rc = dlpar_cpu_add(cpu_drcs[i]);
- if (rc)
- break;
-
- cpus_added++;
- }
-
- if (cpus_added < cpus_to_add) {
- pr_warn("CPU hot-add failed, removing any added CPUs\n");
-
- for (i = 0; i < cpus_added; i++)
- dlpar_cpu_remove_by_index(cpu_drcs[i]);
-
- rc = -EINVAL;
- } else {
- rc = 0;
- }
-
- kfree(cpu_drcs);
- return rc;
-}
-
-int dlpar_cpu_readd(int cpu)
-{
- struct device_node *dn;
- struct device *dev;
- u32 drc_index;
- int rc;
-
- dev = get_cpu_device(cpu);
- dn = dev->of_node;
-
- rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
-
- rc = dlpar_cpu_remove_by_index(drc_index);
- if (!rc)
- rc = dlpar_cpu_add(drc_index);
-
- return rc;
-}
-
int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
{
- u32 count, drc_index;
+ u32 drc_index;
int rc;
- count = hp_elog->_drc_u.drc_count;
drc_index = hp_elog->_drc_u.drc_index;
lock_device_hotplug();
switch (hp_elog->action) {
case PSERIES_HP_ELOG_ACTION_REMOVE:
- if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
- rc = dlpar_cpu_remove_by_count(count);
- else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+ if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
rc = dlpar_cpu_remove_by_index(drc_index);
+ /*
+ * Setting the isolation state of an UNISOLATED/CONFIGURED
+ * device to UNISOLATE is a no-op, but the hypervisor can
+ * use it as a hint that the CPU removal failed.
+ */
+ if (rc)
+ dlpar_unisolate_drc(drc_index);
+ }
else
rc = -EINVAL;
break;
case PSERIES_HP_ELOG_ACTION_ADD:
- if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
- rc = dlpar_cpu_add_by_count(count);
- else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+ if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
rc = dlpar_cpu_add(drc_index);
else
rc = -EINVAL;
@@ -1013,28 +844,10 @@ static struct notifier_block pseries_smp_nb = {
.notifier_call = pseries_smp_notifier,
};
-#define MAX_CEDE_LATENCY_LEVELS 4
-#define CEDE_LATENCY_PARAM_LENGTH 10
-#define CEDE_LATENCY_PARAM_MAX_LENGTH \
- (MAX_CEDE_LATENCY_LEVELS * CEDE_LATENCY_PARAM_LENGTH * sizeof(char))
-#define CEDE_LATENCY_TOKEN 45
-
-static char cede_parameters[CEDE_LATENCY_PARAM_MAX_LENGTH];
-
-static int parse_cede_parameters(void)
-{
- memset(cede_parameters, 0, CEDE_LATENCY_PARAM_MAX_LENGTH);
- return rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
- NULL,
- CEDE_LATENCY_TOKEN,
- __pa(cede_parameters),
- CEDE_LATENCY_PARAM_MAX_LENGTH);
-}
-
static int __init pseries_cpu_hotplug_init(void)
{
- int cpu;
int qcss_tok;
+ unsigned int node;
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
ppc_md.cpu_probe = dlpar_cpu_probe;
@@ -1051,20 +864,23 @@ static int __init pseries_cpu_hotplug_init(void)
return 0;
}
- ppc_md.cpu_die = pseries_mach_cpu_die;
+ smp_ops->cpu_offline_self = pseries_cpu_offline_self;
smp_ops->cpu_disable = pseries_cpu_disable;
smp_ops->cpu_die = pseries_cpu_die;
/* Processors can be added/removed only on LPAR */
if (firmware_has_feature(FW_FEATURE_LPAR)) {
- of_reconfig_notifier_register(&pseries_smp_nb);
- cpu_maps_update_begin();
- if (cede_offline_enabled && parse_cede_parameters() == 0) {
- default_offline_state = CPU_STATE_INACTIVE;
- for_each_online_cpu(cpu)
- set_default_offline_state(cpu);
+ for_each_node(node) {
+ if (!alloc_cpumask_var_node(&node_recorded_ids_map[node],
+ GFP_KERNEL, node))
+ return -ENOMEM;
+
+ /* Record ids of CPU added at boot time */
+ cpumask_copy(node_recorded_ids_map[node],
+ cpumask_of_node(node));
}
- cpu_maps_update_done();
+
+ of_reconfig_notifier_register(&pseries_smp_nb);
}
return 0;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index a4d40a3ceea3..2e3a317722a8 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -16,28 +16,30 @@
#include <asm/firmware.h>
#include <asm/machdep.h>
-#include <asm/prom.h>
#include <asm/sparsemem.h>
#include <asm/fadump.h>
#include <asm/drmem.h>
#include "pseries.h"
-static bool rtas_hp_event;
-
unsigned long pseries_memory_block_size(void)
{
struct device_node *np;
- unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
+ u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
struct resource r;
np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (np) {
- const __be64 *size;
+ int len;
+ int size_cells;
+ const __be32 *prop;
+
+ size_cells = of_n_size_cells(np);
- size = of_get_property(np, "ibm,lmb-size", NULL);
- if (size)
- memblock_size = be64_to_cpup(size);
+ prop = of_get_property(np, "ibm,lmb-size", &len);
+ if (prop && len >= size_cells * sizeof(__be32))
+ memblock_size = of_read_number(prop, size_cells);
of_node_put(np);
+
} else if (machine_is(pseries)) {
/* This fallback really only applies to pseries */
unsigned int memzero_size = 0;
@@ -177,6 +179,8 @@ static int update_lmb_associativity_index(struct drmem_lmb *lmb)
return -ENODEV;
}
+ update_numa_distance(lmb_node);
+
dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
if (!dr_node) {
dlpar_free_cc_nodes(lmb_node);
@@ -208,13 +212,11 @@ static int update_lmb_associativity_index(struct drmem_lmb *lmb)
static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
{
unsigned long section_nr;
- struct mem_section *mem_sect;
struct memory_block *mem_block;
section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
- mem_sect = __nr_to_section(section_nr);
- mem_block = find_memory_block(mem_sect);
+ mem_block = find_memory_block(section_nr);
return mem_block;
}
@@ -223,7 +225,7 @@ static int get_lmb_range(u32 drc_index, int n_lmbs,
struct drmem_lmb **end_lmb)
{
struct drmem_lmb *lmb, *start, *end;
- struct drmem_lmb *last_lmb;
+ struct drmem_lmb *limit;
start = NULL;
for_each_drmem_lmb(lmb) {
@@ -236,10 +238,10 @@ static int get_lmb_range(u32 drc_index, int n_lmbs,
if (!start)
return -EINVAL;
- end = &start[n_lmbs - 1];
+ end = &start[n_lmbs];
- last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
- if (end > last_lmb)
+ limit = &drmem_info->lmbs[drmem_info->n_lmbs];
+ if (end > limit)
return -EINVAL;
*start_lmb = start;
@@ -279,11 +281,11 @@ static int dlpar_offline_lmb(struct drmem_lmb *lmb)
return dlpar_change_lmb_state(lmb, false);
}
-static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
+static int pseries_remove_memblock(unsigned long base, unsigned long memblock_size)
{
unsigned long block_sz, start_pfn;
int sections_per_block;
- int i, nid;
+ int i;
start_pfn = base >> PAGE_SHIFT;
@@ -294,10 +296,9 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
block_sz = pseries_memory_block_size();
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
- nid = memory_add_physaddr_to_nid(base);
for (i = 0; i < sections_per_block; i++) {
- __remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
+ __remove_memory(base, MIN_MEMORY_BLOCK_SIZE);
base += MIN_MEMORY_BLOCK_SIZE;
}
@@ -310,10 +311,11 @@ out:
static int pseries_remove_mem_node(struct device_node *np)
{
- const __be32 *regs;
+ const __be32 *prop;
unsigned long base;
- unsigned int lmb_size;
+ unsigned long lmb_size;
int ret = -EINVAL;
+ int addr_cells, size_cells;
/*
* Check to see if we are actually removing memory
@@ -324,12 +326,19 @@ static int pseries_remove_mem_node(struct device_node *np)
/*
* Find the base address and size of the memblock
*/
- regs = of_get_property(np, "reg", NULL);
- if (!regs)
+ prop = of_get_property(np, "reg", NULL);
+ if (!prop)
return ret;
- base = be64_to_cpu(*(unsigned long *)regs);
- lmb_size = be32_to_cpu(regs[3]);
+ addr_cells = of_n_addr_cells(np);
+ size_cells = of_n_size_cells(np);
+
+ /*
+ * "reg" property represents (addr,size) tuple.
+ */
+ base = of_read_number(prop, addr_cells);
+ prop += addr_cells;
+ lmb_size = of_read_number(prop, size_cells);
pseries_remove_memblock(base, lmb_size);
return 0;
@@ -337,64 +346,52 @@ static int pseries_remove_mem_node(struct device_node *np)
static bool lmb_is_removable(struct drmem_lmb *lmb)
{
- int i, scns_per_block;
- bool rc = true;
- unsigned long pfn, block_sz;
- u64 phys_addr;
-
- if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
+ if ((lmb->flags & DRCONF_MEM_RESERVED) ||
+ !(lmb->flags & DRCONF_MEM_ASSIGNED))
return false;
- block_sz = memory_block_size_bytes();
- scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
- phys_addr = lmb->base_addr;
-
#ifdef CONFIG_FA_DUMP
/*
* Don't hot-remove memory that falls in fadump boot memory area
* and memory that is reserved for capturing old kernel memory.
*/
- if (is_fadump_memory_area(phys_addr, block_sz))
+ if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
return false;
#endif
-
- for (i = 0; i < scns_per_block; i++) {
- pfn = PFN_DOWN(phys_addr);
- if (!pfn_present(pfn)) {
- phys_addr += MIN_MEMORY_BLOCK_SIZE;
- continue;
- }
-
- rc = rc && is_mem_section_removable(pfn, PAGES_PER_SECTION);
- phys_addr += MIN_MEMORY_BLOCK_SIZE;
- }
-
- return rc;
+ /* device_offline() will determine if we can actually remove this lmb */
+ return true;
}
static int dlpar_add_lmb(struct drmem_lmb *);
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
{
+ struct memory_block *mem_block;
unsigned long block_sz;
int rc;
if (!lmb_is_removable(lmb))
return -EINVAL;
+ mem_block = lmb_to_memblock(lmb);
+ if (mem_block == NULL)
+ return -EINVAL;
+
rc = dlpar_offline_lmb(lmb);
- if (rc)
+ if (rc) {
+ put_device(&mem_block->dev);
return rc;
+ }
block_sz = pseries_memory_block_size();
- __remove_memory(lmb->nid, lmb->base_addr, block_sz);
+ __remove_memory(lmb->base_addr, block_sz);
+ put_device(&mem_block->dev);
/* Update memory regions for memory remove */
memblock_remove(lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
- lmb_clear_nid(lmb);
lmb->flags &= ~DRCONF_MEM_ASSIGNED;
return 0;
@@ -403,7 +400,7 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
{
struct drmem_lmb *lmb;
- int lmbs_removed = 0;
+ int lmbs_reserved = 0;
int lmbs_available = 0;
int rc;
@@ -437,12 +434,12 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
*/
drmem_mark_lmb_reserved(lmb);
- lmbs_removed++;
- if (lmbs_removed == lmbs_to_remove)
+ lmbs_reserved++;
+ if (lmbs_reserved == lmbs_to_remove)
break;
}
- if (lmbs_removed != lmbs_to_remove) {
+ if (lmbs_reserved != lmbs_to_remove) {
pr_err("Memory hot-remove failed, adding LMB's back\n");
for_each_drmem_lmb(lmb) {
@@ -455,6 +452,10 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+
+ lmbs_reserved--;
+ if (lmbs_reserved == 0)
+ break;
}
rc = -EINVAL;
@@ -468,6 +469,10 @@ static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
lmb->base_addr);
drmem_remove_lmb_reservation(lmb);
+
+ lmbs_reserved--;
+ if (lmbs_reserved == 0)
+ break;
}
rc = 0;
}
@@ -481,7 +486,7 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
int lmb_found;
int rc;
- pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
+ pr_debug("Attempting to hot-remove LMB, drc index %x\n", drc_index);
lmb_found = 0;
for_each_drmem_lmb(lmb) {
@@ -499,44 +504,10 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
rc = -EINVAL;
if (rc)
- pr_info("Failed to hot-remove memory at %llx\n",
- lmb->base_addr);
+ pr_debug("Failed to hot-remove memory at %llx\n",
+ lmb->base_addr);
else
- pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
-
- return rc;
-}
-
-static int dlpar_memory_readd_by_index(u32 drc_index)
-{
- struct drmem_lmb *lmb;
- int lmb_found;
- int rc;
-
- pr_info("Attempting to update LMB, drc index %x\n", drc_index);
-
- lmb_found = 0;
- for_each_drmem_lmb(lmb) {
- if (lmb->drc_index == drc_index) {
- lmb_found = 1;
- rc = dlpar_remove_lmb(lmb);
- if (!rc) {
- rc = dlpar_add_lmb(lmb);
- if (rc)
- dlpar_release_drc(lmb->drc_index);
- }
- break;
- }
- }
-
- if (!lmb_found)
- rc = -EINVAL;
-
- if (rc)
- pr_info("Failed to update memory at %llx\n",
- lmb->base_addr);
- else
- pr_info("Memory at %llx was updated\n", lmb->base_addr);
+ pr_debug("Memory at %llx was hot-removed\n", lmb->base_addr);
return rc;
}
@@ -544,7 +515,6 @@ static int dlpar_memory_readd_by_index(u32 drc_index)
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
{
struct drmem_lmb *lmb, *start_lmb, *end_lmb;
- int lmbs_available = 0;
int rc;
pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
@@ -557,18 +527,29 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
if (rc)
return -EINVAL;
- /* Validate that there are enough LMBs to satisfy the request */
+ /*
+ * Validate that all LMBs in range are not reserved. Note that it
+ * is ok if they are !ASSIGNED since our goal here is to remove the
+ * LMB range, regardless of whether some LMBs were already removed
+ * by any other reason.
+ *
+ * This is a contrast to what is done in remove_by_count() where we
+ * check for both RESERVED and !ASSIGNED (via lmb_is_removable()),
+ * because we want to remove a fixed amount of LMBs in that function.
+ */
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
- if (lmb->flags & DRCONF_MEM_RESERVED)
- break;
-
- lmbs_available++;
+ if (lmb->flags & DRCONF_MEM_RESERVED) {
+ pr_err("Memory at %llx (drc index %x) is reserved\n",
+ lmb->base_addr, lmb->drc_index);
+ return -EINVAL;
+ }
}
- if (lmbs_available < lmbs_to_remove)
- return -EINVAL;
-
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
+ /*
+ * dlpar_remove_lmb() will error out if the LMB is already
+ * !ASSIGNED, but this case is a no-op for us.
+ */
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
continue;
@@ -587,6 +568,13 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
if (!drmem_lmb_reserved(lmb))
continue;
+ /*
+ * Setting the isolation state of an UNISOLATED/CONFIGURED
+ * device to UNISOLATE is a no-op, but the hypervisor can
+ * use it as a hint that the LMB removal failed.
+ */
+ dlpar_unisolate_drc(lmb->drc_index);
+
rc = dlpar_add_lmb(lmb);
if (rc)
pr_err("Failed to add LMB, drc index %x\n",
@@ -613,7 +601,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
#else
static inline int pseries_remove_memblock(unsigned long base,
- unsigned int memblock_size)
+ unsigned long memblock_size)
{
return -EOPNOTSUPP;
}
@@ -621,10 +609,6 @@ static inline int pseries_remove_mem_node(struct device_node *np)
{
return 0;
}
-static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
-{
- return -EOPNOTSUPP;
-}
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
{
return -EOPNOTSUPP;
@@ -637,10 +621,6 @@ static int dlpar_memory_remove_by_index(u32 drc_index)
{
return -EOPNOTSUPP;
}
-static int dlpar_memory_readd_by_index(u32 drc_index)
-{
- return -EOPNOTSUPP;
-}
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
{
@@ -651,7 +631,7 @@ static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
static int dlpar_add_lmb(struct drmem_lmb *lmb)
{
unsigned long block_sz;
- int rc;
+ int nid, rc;
if (lmb->flags & DRCONF_MEM_ASSIGNED)
return -EINVAL;
@@ -662,11 +642,15 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
return rc;
}
- lmb_set_nid(lmb);
block_sz = memory_block_size_bytes();
+ /* Find the node id for this LMB. Fake one if necessary. */
+ nid = of_drconf_to_nid_single(lmb);
+ if (nid < 0 || !node_possible(nid))
+ nid = first_online_node;
+
/* Add the memory */
- rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
+ rc = __add_memory(nid, lmb->base_addr, block_sz, MHP_NONE);
if (rc) {
invalidate_lmb_associativity_index(lmb);
return rc;
@@ -674,9 +658,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
rc = dlpar_online_lmb(lmb);
if (rc) {
- __remove_memory(lmb->nid, lmb->base_addr, block_sz);
+ __remove_memory(lmb->base_addr, block_sz);
invalidate_lmb_associativity_index(lmb);
- lmb_clear_nid(lmb);
} else {
lmb->flags |= DRCONF_MEM_ASSIGNED;
}
@@ -688,7 +671,7 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
{
struct drmem_lmb *lmb;
int lmbs_available = 0;
- int lmbs_added = 0;
+ int lmbs_reserved = 0;
int rc;
pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
@@ -698,6 +681,9 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
/* Validate that there are enough LMBs to satisfy the request */
for_each_drmem_lmb(lmb) {
+ if (lmb->flags & DRCONF_MEM_RESERVED)
+ continue;
+
if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
lmbs_available++;
@@ -726,13 +712,12 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
* requested LMBs cannot be added.
*/
drmem_mark_lmb_reserved(lmb);
-
- lmbs_added++;
- if (lmbs_added == lmbs_to_add)
+ lmbs_reserved++;
+ if (lmbs_reserved == lmbs_to_add)
break;
}
- if (lmbs_added != lmbs_to_add) {
+ if (lmbs_reserved != lmbs_to_add) {
pr_err("Memory hot-add failed, removing any added LMBs\n");
for_each_drmem_lmb(lmb) {
@@ -747,6 +732,10 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
dlpar_release_drc(lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+ lmbs_reserved--;
+
+ if (lmbs_reserved == 0)
+ break;
}
rc = -EINVAL;
} else {
@@ -754,9 +743,13 @@ static int dlpar_memory_add_by_count(u32 lmbs_to_add)
if (!drmem_lmb_reserved(lmb))
continue;
- pr_info("Memory at %llx (drc index %x) was hot-added\n",
- lmb->base_addr, lmb->drc_index);
+ pr_debug("Memory at %llx (drc index %x) was hot-added\n",
+ lmb->base_addr, lmb->drc_index);
drmem_remove_lmb_reservation(lmb);
+ lmbs_reserved--;
+
+ if (lmbs_reserved == 0)
+ break;
}
rc = 0;
}
@@ -801,7 +794,6 @@ static int dlpar_memory_add_by_index(u32 drc_index)
static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
{
struct drmem_lmb *lmb, *start_lmb, *end_lmb;
- int lmbs_available = 0;
int rc;
pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
@@ -816,15 +808,14 @@ static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
/* Validate that the LMBs in this range are not reserved */
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
- if (lmb->flags & DRCONF_MEM_RESERVED)
- break;
-
- lmbs_available++;
+ /* Fail immediately if the whole range can't be hot-added */
+ if (lmb->flags & DRCONF_MEM_RESERVED) {
+ pr_err("Memory at %llx (drc index %x) is reserved\n",
+ lmb->base_addr, lmb->drc_index);
+ return -EINVAL;
+ }
}
- if (lmbs_available < lmbs_to_add)
- return -EINVAL;
-
for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
if (lmb->flags & DRCONF_MEM_ASSIGNED)
continue;
@@ -923,21 +914,14 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
}
break;
- case PSERIES_HP_ELOG_ACTION_READD:
- drc_index = hp_elog->_drc_u.drc_index;
- rc = dlpar_memory_readd_by_index(drc_index);
- break;
default:
pr_err("Invalid action (%d) specified\n", hp_elog->action);
rc = -EINVAL;
break;
}
- if (!rc) {
- rtas_hp_event = true;
+ if (!rc)
rc = drmem_update_dt();
- rtas_hp_event = false;
- }
unlock_device_hotplug();
return rc;
@@ -945,10 +929,11 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
static int pseries_add_mem_node(struct device_node *np)
{
- const __be32 *regs;
+ const __be32 *prop;
unsigned long base;
- unsigned int lmb_size;
+ unsigned long lmb_size;
int ret = -EINVAL;
+ int addr_cells, size_cells;
/*
* Check to see if we are actually adding memory
@@ -959,12 +944,18 @@ static int pseries_add_mem_node(struct device_node *np)
/*
* Find the base and size of the memblock
*/
- regs = of_get_property(np, "reg", NULL);
- if (!regs)
+ prop = of_get_property(np, "reg", NULL);
+ if (!prop)
return ret;
- base = be64_to_cpu(*(unsigned long *)regs);
- lmb_size = be32_to_cpu(regs[3]);
+ addr_cells = of_n_addr_cells(np);
+ size_cells = of_n_size_cells(np);
+ /*
+ * "reg" property represents (addr,size) tuple.
+ */
+ base = of_read_number(prop, addr_cells);
+ prop += addr_cells;
+ lmb_size = of_read_number(prop, size_cells);
/*
* Update memory region to represent the memory add
@@ -973,60 +964,6 @@ static int pseries_add_mem_node(struct device_node *np)
return (ret < 0) ? -EINVAL : 0;
}
-static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
-{
- struct of_drconf_cell_v1 *new_drmem, *old_drmem;
- unsigned long memblock_size;
- u32 entries;
- __be32 *p;
- int i, rc = -EINVAL;
-
- if (rtas_hp_event)
- return 0;
-
- memblock_size = pseries_memory_block_size();
- if (!memblock_size)
- return -EINVAL;
-
- if (!pr->old_prop)
- return 0;
-
- p = (__be32 *) pr->old_prop->value;
- if (!p)
- return -EINVAL;
-
- /* The first int of the property is the number of lmb's described
- * by the property. This is followed by an array of of_drconf_cell
- * entries. Get the number of entries and skip to the array of
- * of_drconf_cell's.
- */
- entries = be32_to_cpu(*p++);
- old_drmem = (struct of_drconf_cell_v1 *)p;
-
- p = (__be32 *)pr->prop->value;
- p++;
- new_drmem = (struct of_drconf_cell_v1 *)p;
-
- for (i = 0; i < entries; i++) {
- if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
- (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
- rc = pseries_remove_memblock(
- be64_to_cpu(old_drmem[i].base_addr),
- memblock_size);
- break;
- } else if ((!(be32_to_cpu(old_drmem[i].flags) &
- DRCONF_MEM_ASSIGNED)) &&
- (be32_to_cpu(new_drmem[i].flags) &
- DRCONF_MEM_ASSIGNED)) {
- rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
- memblock_size);
- rc = (rc < 0) ? -EINVAL : 0;
- break;
- }
- }
- return rc;
-}
-
static int pseries_memory_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -1041,9 +978,9 @@ static int pseries_memory_notifier(struct notifier_block *nb,
err = pseries_remove_mem_node(rd->dn);
break;
case OF_RECONFIG_UPDATE_PROPERTY:
- if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
- err = pseries_update_drconf_memory(rd);
- break;
+ if (!strcmp(rd->dn->name,
+ "ibm,dynamic-reconfiguration-memory"))
+ drmem_update_lmbs(rd->prop);
}
return notifier_from_errno(err);
}
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 2136e42833af..762eb15d3bd4 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -16,7 +16,7 @@
#ifdef CONFIG_TRACEPOINTS
#ifndef CONFIG_JUMP_LABEL
- .section ".toc","aw"
+ .data
.globl hcall_tracepoint_refcount
hcall_tracepoint_refcount:
@@ -88,7 +88,7 @@ hcall_tracepoint_refcount:
BEGIN_FTR_SECTION; \
b 1f; \
END_FTR_SECTION(0, 1); \
- ld r12,hcall_tracepoint_refcount@toc(r2); \
+ LOAD_REG_ADDR(r12, hcall_tracepoint_refcount) ; \
std r12,32(r1); \
cmpdi r12,0; \
bne- LABEL; \
@@ -102,6 +102,20 @@ END_FTR_SECTION(0, 1); \
#define HCALL_BRANCH(LABEL)
#endif
+_GLOBAL_TOC(plpar_hcall_norets_notrace)
+ HMT_MEDIUM
+
+ mfcr r0
+ stw r0,8(r1)
+ HVSC /* invoke the hypervisor */
+
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+ blr /* return r3 = status */
+
_GLOBAL_TOC(plpar_hcall_norets)
HMT_MEDIUM
@@ -110,6 +124,9 @@ _GLOBAL_TOC(plpar_hcall_norets)
HCALL_BRANCH(plpar_hcall_norets_trace)
HVSC /* invoke the hypervisor */
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
blr /* return r3 = status */
@@ -119,6 +136,10 @@ plpar_hcall_norets_trace:
HCALL_INST_PRECALL(R4)
HVSC
HCALL_INST_POSTCALL_NORETS
+
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
blr
@@ -149,6 +170,9 @@ _GLOBAL_TOC(plpar_hcall)
std r6, 16(r12)
std r7, 24(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -178,6 +202,9 @@ plpar_hcall_trace:
HCALL_INST_POSTCALL(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -213,6 +240,9 @@ _GLOBAL(plpar_hcall_raw)
std r6, 16(r12)
std r7, 24(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -252,6 +282,9 @@ _GLOBAL_TOC(plpar_hcall9)
std r11,56(r12)
std r0, 64(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -290,6 +323,9 @@ plpar_hcall9_trace:
HCALL_INST_POSTCALL(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
@@ -329,6 +365,9 @@ _GLOBAL(plpar_hcall9_raw)
std r11,56(r12)
std r0, 64(r12)
+ li r4,0
+ stb r4,PACASRR_VALID(r13)
+
lwz r0,8(r1)
mtcrf 0xff,r0
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index c40c62ec432e..3a50612a78db 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -26,7 +26,7 @@ struct hcall_stats {
};
#define HCALL_STAT_ARRAY_SIZE ((MAX_HCALL_OPCODE >> 2) + 1)
-DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
+static DEFINE_PER_CPU(struct hcall_stats[HCALL_STAT_ARRAY_SIZE], hcall_stats);
/*
* Routines for displaying the statistics in debugfs
@@ -70,31 +70,14 @@ static int hc_show(struct seq_file *m, void *p)
return 0;
}
-static const struct seq_operations hcall_inst_seq_ops = {
+static const struct seq_operations hcall_inst_sops = {
.start = hc_start,
.next = hc_next,
.stop = hc_stop,
.show = hc_show
};
-static int hcall_inst_seq_open(struct inode *inode, struct file *file)
-{
- int rc;
- struct seq_file *seq;
-
- rc = seq_open(file, &hcall_inst_seq_ops);
- seq = file->private_data;
- seq->private = file_inode(file)->i_private;
-
- return rc;
-}
-
-static const struct file_operations hcall_inst_seq_fops = {
- .open = hcall_inst_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(hcall_inst);
#define HCALL_ROOT_DIR "hcall_inst"
#define CPU_NAME_BUF_SIZE 32
@@ -149,7 +132,7 @@ static int __init hcall_inst_init(void)
snprintf(cpu_name_buf, CPU_NAME_BUF_SIZE, "cpu%d", cpu);
debugfs_create_file(cpu_name_buf, 0444, hcall_root,
per_cpu(hcall_stats, cpu),
- &hcall_inst_seq_fops);
+ &hcall_inst_fops);
}
return 0;
diff --git a/arch/powerpc/platforms/pseries/hvcserver.c b/arch/powerpc/platforms/pseries/hvcserver.c
index 267139b13530..d48c9c7ce10f 100644
--- a/arch/powerpc/platforms/pseries/hvcserver.c
+++ b/arch/powerpc/platforms/pseries/hvcserver.c
@@ -45,7 +45,7 @@ static int hvcs_convert(long to_convert)
case H_LONG_BUSY_ORDER_10_SEC:
case H_LONG_BUSY_ORDER_100_SEC:
return -EBUSY;
- case H_FUNCTION: /* fall through */
+ case H_FUNCTION:
default:
return -EPERM;
}
@@ -176,7 +176,7 @@ int hvcs_get_partner_info(uint32_t unit_address, struct list_head *head,
= (unsigned int)last_p_partition_ID;
/* copy the Null-term char too */
- strlcpy(&next_partner_info->location_code[0],
+ strscpy(&next_partner_info->location_code[0],
(char *)&pi_buff[2],
sizeof(next_partner_info->location_code));
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index b91eb0929ed1..a870cada7acd 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -40,13 +40,15 @@
#include <linux/export.h>
#include <linux/console.h>
#include <linux/kobject.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/of_platform.h>
#include <asm/ibmebus.h>
+#include <asm/machdep.h>
static struct device ibmebus_bus_device = { /* fake "parent" device */
.init_name = "ibmebus",
@@ -150,7 +152,11 @@ static const struct dma_map_ops ibmebus_dma_ops = {
static int ibmebus_match_path(struct device *dev, const void *data)
{
struct device_node *dn = to_platform_device(dev)->dev.of_node;
- return (of_find_node_by_path(data) == dn);
+ struct device_node *tn = of_find_node_by_path(data);
+
+ of_node_put(tn);
+
+ return (tn == dn);
}
static int ibmebus_match_node(struct device *dev, const void *data)
@@ -354,24 +360,23 @@ static int ibmebus_bus_device_probe(struct device *dev)
if (!drv->probe)
return error;
- of_dev_get(of_dev);
+ get_device(dev);
if (of_driver_match_device(dev, dev->driver))
error = drv->probe(of_dev);
if (error)
- of_dev_put(of_dev);
+ put_device(dev);
return error;
}
-static int ibmebus_bus_device_remove(struct device *dev)
+static void ibmebus_bus_device_remove(struct device *dev)
{
struct platform_device *of_dev = to_platform_device(dev);
struct platform_driver *drv = to_platform_driver(dev->driver);
if (dev->driver && drv->remove)
drv->remove(of_dev);
- return 0;
}
static void ibmebus_bus_device_shutdown(struct device *dev)
@@ -464,4 +469,4 @@ static int __init ibmebus_bus_init(void)
return 0;
}
-postcore_initcall(ibmebus_bus_init);
+machine_postcore_initcall(pseries, ibmebus_bus_init);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 2e0a8eab5588..561adac69022 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -39,28 +39,45 @@
#include "pseries.h"
-static struct iommu_table_group *iommu_pseries_alloc_group(int node)
+enum {
+ DDW_QUERY_PE_DMA_WIN = 0,
+ DDW_CREATE_PE_DMA_WIN = 1,
+ DDW_REMOVE_PE_DMA_WIN = 2,
+
+ DDW_APPLICABLE_SIZE
+};
+
+enum {
+ DDW_EXT_SIZE = 0,
+ DDW_EXT_RESET_DMA_WIN = 1,
+ DDW_EXT_QUERY_OUT_SIZE = 2
+};
+
+static struct iommu_table *iommu_pseries_alloc_table(int node)
{
- struct iommu_table_group *table_group;
struct iommu_table *tbl;
- table_group = kzalloc_node(sizeof(struct iommu_table_group), GFP_KERNEL,
- node);
- if (!table_group)
- return NULL;
-
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
if (!tbl)
- goto free_group;
+ return NULL;
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
kref_init(&tbl->it_kref);
+ return tbl;
+}
- table_group->tables[0] = tbl;
+static struct iommu_table_group *iommu_pseries_alloc_group(int node)
+{
+ struct iommu_table_group *table_group;
+
+ table_group = kzalloc_node(sizeof(*table_group), GFP_KERNEL, node);
+ if (!table_group)
+ return NULL;
- return table_group;
+ table_group->tables[0] = iommu_pseries_alloc_table(node);
+ if (table_group->tables[0])
+ return table_group;
-free_group:
kfree(table_group);
return NULL;
}
@@ -93,6 +110,8 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
u64 proto_tce;
__be64 *tcep;
u64 rpn;
+ const unsigned long tceshift = tbl->it_page_shift;
+ const unsigned long pagesize = IOMMU_PAGE_SIZE(tbl);
proto_tce = TCE_PCI_READ; // Read allowed
@@ -103,10 +122,10 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
while (npages--) {
/* can't move this out since we might cross MEMBLOCK boundary */
- rpn = __pa(uaddr) >> TCE_SHIFT;
- *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
+ rpn = __pa(uaddr) >> tceshift;
+ *tcep = cpu_to_be64(proto_tce | rpn << tceshift);
- uaddr += TCE_PAGE_SIZE;
+ uaddr += pagesize;
tcep++;
}
return 0;
@@ -132,7 +151,7 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
return be64_to_cpu(*tcep);
}
-static void tce_free_pSeriesLP(unsigned long liobn, long, long);
+static void tce_free_pSeriesLP(unsigned long liobn, long, long, long);
static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
@@ -152,12 +171,12 @@ static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
proto_tce |= TCE_PCI_WRITE;
while (npages--) {
- tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
+ tce = proto_tce | rpn << tceshift;
rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
- tce_free_pSeriesLP(liobn, tcenum_start,
+ tce_free_pSeriesLP(liobn, tcenum_start, tceshift,
(npages_start - (npages + 1)));
break;
}
@@ -191,10 +210,11 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long tcenum_start = tcenum, npages_start = npages;
int ret = 0;
unsigned long flags;
+ const unsigned long tceshift = tbl->it_page_shift;
if ((npages == 1) || !firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
return tce_build_pSeriesLP(tbl->it_index, tcenum,
- tbl->it_page_shift, npages, uaddr,
+ tceshift, npages, uaddr,
direction, attrs);
}
@@ -211,13 +231,13 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
if (!tcep) {
local_irq_restore(flags);
return tce_build_pSeriesLP(tbl->it_index, tcenum,
- tbl->it_page_shift,
+ tceshift,
npages, uaddr, direction, attrs);
}
__this_cpu_write(tce_page, tcep);
}
- rpn = __pa(uaddr) >> TCE_SHIFT;
+ rpn = __pa(uaddr) >> tceshift;
proto_tce = TCE_PCI_READ;
if (direction != DMA_TO_DEVICE)
proto_tce |= TCE_PCI_WRITE;
@@ -231,12 +251,12 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
for (l = 0; l < limit; l++) {
- tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
+ tcep[l] = cpu_to_be64(proto_tce | rpn << tceshift);
rpn++;
}
rc = plpar_tce_put_indirect((u64)tbl->it_index,
- (u64)tcenum << 12,
+ (u64)tcenum << tceshift,
(u64)__pa(tcep),
limit);
@@ -263,12 +283,13 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
return ret;
}
-static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
+static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
+ long npages)
{
u64 rc;
while (npages--) {
- rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
+ rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, 0);
if (rc && printk_ratelimit()) {
printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
@@ -287,9 +308,11 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
u64 rc;
if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
- return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
+ return tce_free_pSeriesLP(tbl->it_index, tcenum,
+ tbl->it_page_shift, npages);
- rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
+ rc = plpar_tce_stuff((u64)tbl->it_index,
+ (u64)tcenum << tbl->it_page_shift, 0, npages);
if (rc && printk_ratelimit()) {
printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
@@ -305,7 +328,8 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
u64 rc;
unsigned long tce_ret;
- rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
+ rc = plpar_tce_get((u64)tbl->it_index,
+ (u64)tcenum << tbl->it_page_shift, &tce_ret);
if (rc && printk_ratelimit()) {
printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
@@ -325,7 +349,7 @@ struct dynamic_dma_window_prop {
__be32 window_shift; /* ilog2(tce_window_size) */
};
-struct direct_window {
+struct dma_win {
struct device_node *device;
const struct dynamic_dma_window_prop *prop;
struct list_head list;
@@ -334,7 +358,7 @@ struct direct_window {
/* Dynamic DMA Window support */
struct ddw_query_response {
u32 windows_available;
- u32 largest_available_block;
+ u64 largest_available_block;
u32 page_size;
u32 migration_capable;
};
@@ -345,12 +369,13 @@ struct ddw_create_response {
u32 addr_lo;
};
-static LIST_HEAD(direct_window_list);
+static LIST_HEAD(dma_win_list);
/* prevents races between memory on/offline and window creation */
-static DEFINE_SPINLOCK(direct_window_list_lock);
+static DEFINE_SPINLOCK(dma_win_list_lock);
/* protects initializing window twice for same device */
-static DEFINE_MUTEX(direct_window_init_mutex);
+static DEFINE_MUTEX(dma_win_init_mutex);
#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
+#define DMA64_PROPNAME "linux,dma64-ddr-window-info"
static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
unsigned long num_pfn, const void *arg)
@@ -477,6 +502,24 @@ static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
}
+static void iommu_table_setparms_common(struct iommu_table *tbl, unsigned long busno,
+ unsigned long liobn, unsigned long win_addr,
+ unsigned long window_size, unsigned long page_shift,
+ void *base, struct iommu_table_ops *table_ops)
+{
+ tbl->it_busno = busno;
+ tbl->it_index = liobn;
+ tbl->it_offset = win_addr >> page_shift;
+ tbl->it_size = window_size >> page_shift;
+ tbl->it_page_shift = page_shift;
+ tbl->it_base = (unsigned long)base;
+ tbl->it_blocksize = 16;
+ tbl->it_type = TCE_PCI;
+ tbl->it_ops = table_ops;
+}
+
+struct iommu_table_ops iommu_table_pseries_ops;
+
static void iommu_table_setparms(struct pci_controller *phb,
struct device_node *dn,
struct iommu_table *tbl)
@@ -485,8 +528,13 @@ static void iommu_table_setparms(struct pci_controller *phb,
const unsigned long *basep;
const u32 *sizep;
- node = phb->dn;
+ /* Test if we are going over 2GB of DMA space */
+ if (phb->dma_window_base_cur + phb->dma_window_size > SZ_2G) {
+ udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
+ panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
+ }
+ node = phb->dn;
basep = of_get_property(node, "linux,tce-base", NULL);
sizep = of_get_property(node, "linux,tce-size", NULL);
if (basep == NULL || sizep == NULL) {
@@ -495,33 +543,18 @@ static void iommu_table_setparms(struct pci_controller *phb,
return;
}
- tbl->it_base = (unsigned long)__va(*basep);
+ iommu_table_setparms_common(tbl, phb->bus->number, 0, phb->dma_window_base_cur,
+ phb->dma_window_size, IOMMU_PAGE_SHIFT_4K,
+ __va(*basep), &iommu_table_pseries_ops);
if (!is_kdump_kernel())
memset((void *)tbl->it_base, 0, *sizep);
- tbl->it_busno = phb->bus->number;
- tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
-
- /* Units of tce entries */
- tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
-
- /* Test if we are going over 2GB of DMA space */
- if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
- udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
- panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
- }
-
phb->dma_window_base_cur += phb->dma_window_size;
-
- /* Set the tce table size - measured in entries */
- tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
-
- tbl->it_index = 0;
- tbl->it_blocksize = 16;
- tbl->it_type = TCE_PCI;
}
+struct iommu_table_ops iommu_table_lpar_multi_ops;
+
/*
* iommu_table_setparms_lpar
*
@@ -533,17 +566,13 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
struct iommu_table_group *table_group,
const __be32 *dma_window)
{
- unsigned long offset, size;
+ unsigned long offset, size, liobn;
- of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
+ of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
+
+ iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
+ &iommu_table_lpar_multi_ops);
- tbl->it_busno = phb->bus->number;
- tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
- tbl->it_base = 0;
- tbl->it_blocksize = 16;
- tbl->it_type = TCE_PCI;
- tbl->it_offset = offset >> tbl->it_page_shift;
- tbl->it_size = size >> tbl->it_page_shift;
table_group->tce32_start = offset;
table_group->tce32_size = size;
@@ -623,8 +652,9 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
tbl = pci->table_group->tables[0];
iommu_table_setparms(pci->phb, dn, tbl);
- tbl->it_ops = &iommu_table_pseries_ops;
- iommu_init_table(tbl, pci->phb->node, 0, 0);
+
+ if (!iommu_init_table(tbl, pci->phb->node, 0, 0))
+ panic("Failed to initialize iommu table");
/* Divide the rest (1.75GB) among the children */
pci->phb->dma_window_size = 0x80000000ul;
@@ -636,8 +666,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
#ifdef CONFIG_IOMMU_API
static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
- long *tce, enum dma_data_direction *direction,
- bool realmode)
+ long *tce, enum dma_data_direction *direction)
{
long rc;
unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
@@ -671,6 +700,33 @@ struct iommu_table_ops iommu_table_lpar_multi_ops = {
.get = tce_get_pSeriesLP
};
+/*
+ * Find nearest ibm,dma-window (default DMA window) or direct DMA window or
+ * dynamic 64bit DMA window, walking up the device tree.
+ */
+static struct device_node *pci_dma_find(struct device_node *dn,
+ const __be32 **dma_window)
+{
+ const __be32 *dw = NULL;
+
+ for ( ; dn && PCI_DN(dn); dn = dn->parent) {
+ dw = of_get_property(dn, "ibm,dma-window", NULL);
+ if (dw) {
+ if (dma_window)
+ *dma_window = dw;
+ return dn;
+ }
+ dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
+ if (dw)
+ return dn;
+ dw = of_get_property(dn, DMA64_PROPNAME, NULL);
+ if (dw)
+ return dn;
+ }
+
+ return NULL;
+}
+
static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
{
struct iommu_table *tbl;
@@ -683,17 +739,10 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
dn);
- /* Find nearest ibm,dma-window, walking up the device tree */
- for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- if (dma_window != NULL)
- break;
- }
+ pdn = pci_dma_find(dn, &dma_window);
- if (dma_window == NULL) {
+ if (dma_window == NULL)
pr_debug(" no ibm,dma-window property !\n");
- return;
- }
ppci = PCI_DN(pdn);
@@ -703,10 +752,13 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
if (!ppci->table_group) {
ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
tbl = ppci->table_group->tables[0];
- iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
- ppci->table_group, dma_window);
- tbl->it_ops = &iommu_table_lpar_multi_ops;
- iommu_init_table(tbl, ppci->phb->node, 0, 0);
+ if (dma_window) {
+ iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
+ ppci->table_group, dma_window);
+
+ if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
+ panic("Failed to initialize iommu table");
+ }
iommu_register_group(ppci->table_group,
pci_domain_nr(bus), 0);
pr_debug(" created table: %p\n", ppci->table_group);
@@ -734,8 +786,10 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node);
tbl = PCI_DN(dn)->table_group->tables[0];
iommu_table_setparms(phb, dn, tbl);
- tbl->it_ops = &iommu_table_pseries_ops;
- iommu_init_table(tbl, phb->node, 0, 0);
+
+ if (!iommu_init_table(tbl, phb->node, 0, 0))
+ panic("Failed to initialize iommu table");
+
set_iommu_table_base(&dev->dev, tbl);
return;
}
@@ -767,28 +821,10 @@ static int __init disable_ddw_setup(char *str)
early_param("disable_ddw", disable_ddw_setup);
-static void remove_ddw(struct device_node *np, bool remove_prop)
+static void clean_dma_window(struct device_node *np, struct dynamic_dma_window_prop *dwp)
{
- struct dynamic_dma_window_prop *dwp;
- struct property *win64;
- u32 ddw_avail[3];
- u64 liobn;
- int ret = 0;
-
- ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
- &ddw_avail[0], 3);
-
- win64 = of_find_property(np, DIRECT64_PROPNAME, NULL);
- if (!win64)
- return;
-
- if (ret || win64->length < sizeof(*dwp))
- goto delprop;
-
- dwp = win64->value;
- liobn = (u64)be32_to_cpu(dwp->liobn);
+ int ret;
- /* clear the whole window, note the arg is in kernel pages */
ret = tce_clearrange_multi_pSeriesLP(0,
1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
if (ret)
@@ -797,86 +833,199 @@ static void remove_ddw(struct device_node *np, bool remove_prop)
else
pr_debug("%pOF successfully cleared tces in window.\n",
np);
+}
+
+/*
+ * Call only if DMA window is clean.
+ */
+static void __remove_dma_window(struct device_node *np, u32 *ddw_avail, u64 liobn)
+{
+ int ret;
- ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
+ ret = rtas_call(ddw_avail[DDW_REMOVE_PE_DMA_WIN], 1, 1, NULL, liobn);
if (ret)
- pr_warn("%pOF: failed to remove direct window: rtas returned "
+ pr_warn("%pOF: failed to remove DMA window: rtas returned "
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np, ret, ddw_avail[2], liobn);
+ np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn);
else
- pr_debug("%pOF: successfully removed direct window: rtas returned "
+ pr_debug("%pOF: successfully removed DMA window: rtas returned "
"%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np, ret, ddw_avail[2], liobn);
+ np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn);
+}
+
+static void remove_dma_window(struct device_node *np, u32 *ddw_avail,
+ struct property *win)
+{
+ struct dynamic_dma_window_prop *dwp;
+ u64 liobn;
-delprop:
- if (remove_prop)
- ret = of_remove_property(np, win64);
+ dwp = win->value;
+ liobn = (u64)be32_to_cpu(dwp->liobn);
+
+ clean_dma_window(np, dwp);
+ __remove_dma_window(np, ddw_avail, liobn);
+}
+
+static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_name)
+{
+ struct property *win;
+ u32 ddw_avail[DDW_APPLICABLE_SIZE];
+ int ret = 0;
+
+ win = of_find_property(np, win_name, NULL);
+ if (!win)
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
+ &ddw_avail[0], DDW_APPLICABLE_SIZE);
if (ret)
- pr_warn("%pOF: failed to remove direct window property: %d\n",
+ return 0;
+
+
+ if (win->length >= sizeof(struct dynamic_dma_window_prop))
+ remove_dma_window(np, ddw_avail, win);
+
+ if (!remove_prop)
+ return 0;
+
+ ret = of_remove_property(np, win);
+ if (ret)
+ pr_warn("%pOF: failed to remove DMA window property: %d\n",
np, ret);
+ return 0;
}
-static u64 find_existing_ddw(struct device_node *pdn)
+static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift)
{
- struct direct_window *window;
- const struct dynamic_dma_window_prop *direct64;
- u64 dma_addr = 0;
+ struct dma_win *window;
+ const struct dynamic_dma_window_prop *dma64;
+ bool found = false;
- spin_lock(&direct_window_list_lock);
+ spin_lock(&dma_win_list_lock);
/* check if we already created a window and dupe that config if so */
- list_for_each_entry(window, &direct_window_list, list) {
+ list_for_each_entry(window, &dma_win_list, list) {
if (window->device == pdn) {
- direct64 = window->prop;
- dma_addr = be64_to_cpu(direct64->dma_base);
+ dma64 = window->prop;
+ *dma_addr = be64_to_cpu(dma64->dma_base);
+ *window_shift = be32_to_cpu(dma64->window_shift);
+ found = true;
break;
}
}
- spin_unlock(&direct_window_list_lock);
+ spin_unlock(&dma_win_list_lock);
- return dma_addr;
+ return found;
}
-static int find_existing_ddw_windows(void)
+static struct dma_win *ddw_list_new_entry(struct device_node *pdn,
+ const struct dynamic_dma_window_prop *dma64)
+{
+ struct dma_win *window;
+
+ window = kzalloc(sizeof(*window), GFP_KERNEL);
+ if (!window)
+ return NULL;
+
+ window->device = pdn;
+ window->prop = dma64;
+
+ return window;
+}
+
+static void find_existing_ddw_windows_named(const char *name)
{
int len;
struct device_node *pdn;
- struct direct_window *window;
- const struct dynamic_dma_window_prop *direct64;
-
- if (!firmware_has_feature(FW_FEATURE_LPAR))
- return 0;
+ struct dma_win *window;
+ const struct dynamic_dma_window_prop *dma64;
- for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
- direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
- if (!direct64)
+ for_each_node_with_property(pdn, name) {
+ dma64 = of_get_property(pdn, name, &len);
+ if (!dma64 || len < sizeof(*dma64)) {
+ remove_ddw(pdn, true, name);
continue;
+ }
- window = kzalloc(sizeof(*window), GFP_KERNEL);
- if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
- kfree(window);
- remove_ddw(pdn, true);
- continue;
+ window = ddw_list_new_entry(pdn, dma64);
+ if (!window) {
+ of_node_put(pdn);
+ break;
}
- window->device = pdn;
- window->prop = direct64;
- spin_lock(&direct_window_list_lock);
- list_add(&window->list, &direct_window_list);
- spin_unlock(&direct_window_list_lock);
+ spin_lock(&dma_win_list_lock);
+ list_add(&window->list, &dma_win_list);
+ spin_unlock(&dma_win_list_lock);
}
+}
+
+static int find_existing_ddw_windows(void)
+{
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ return 0;
+
+ find_existing_ddw_windows_named(DIRECT64_PROPNAME);
+ find_existing_ddw_windows_named(DMA64_PROPNAME);
return 0;
}
machine_arch_initcall(pseries, find_existing_ddw_windows);
+/**
+ * ddw_read_ext - Get the value of an DDW extension
+ * @np: device node from which the extension value is to be read.
+ * @extnum: index number of the extension.
+ * @value: pointer to return value, modified when extension is available.
+ *
+ * Checks if "ibm,ddw-extensions" exists for this node, and get the value
+ * on index 'extnum'.
+ * It can be used only to check if a property exists, passing value == NULL.
+ *
+ * Returns:
+ * 0 if extension successfully read
+ * -EINVAL if the "ibm,ddw-extensions" does not exist,
+ * -ENODATA if "ibm,ddw-extensions" does not have a value, and
+ * -EOVERFLOW if "ibm,ddw-extensions" does not contain this extension.
+ */
+static inline int ddw_read_ext(const struct device_node *np, int extnum,
+ u32 *value)
+{
+ static const char propname[] = "ibm,ddw-extensions";
+ u32 count;
+ int ret;
+
+ ret = of_property_read_u32_index(np, propname, DDW_EXT_SIZE, &count);
+ if (ret)
+ return ret;
+
+ if (count < extnum)
+ return -EOVERFLOW;
+
+ if (!value)
+ value = &count;
+
+ return of_property_read_u32_index(np, propname, extnum, value);
+}
+
static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
- struct ddw_query_response *query)
+ struct ddw_query_response *query,
+ struct device_node *parent)
{
struct device_node *dn;
struct pci_dn *pdn;
- u32 cfg_addr;
+ u32 cfg_addr, ext_query, query_out[5];
u64 buid;
- int ret;
+ int ret, out_sz;
+
+ /*
+ * From LoPAR level 2.8, "ibm,ddw-extensions" index 3 can rule how many
+ * output parameters ibm,query-pe-dma-windows will have, ranging from
+ * 5 to 6.
+ */
+ ret = ddw_read_ext(parent, DDW_EXT_QUERY_OUT_SIZE, &ext_query);
+ if (!ret && ext_query == 1)
+ out_sz = 6;
+ else
+ out_sz = 5;
/*
* Get the config address and phb buid of the PE window.
@@ -889,11 +1038,30 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
buid = pdn->phb->buid;
cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
- ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
- cfg_addr, BUID_HI(buid), BUID_LO(buid));
- dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
- " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid),
- BUID_LO(buid), ret);
+ ret = rtas_call(ddw_avail[DDW_QUERY_PE_DMA_WIN], 3, out_sz, query_out,
+ cfg_addr, BUID_HI(buid), BUID_LO(buid));
+
+ switch (out_sz) {
+ case 5:
+ query->windows_available = query_out[0];
+ query->largest_available_block = query_out[1];
+ query->page_size = query_out[2];
+ query->migration_capable = query_out[3];
+ break;
+ case 6:
+ query->windows_available = query_out[0];
+ query->largest_available_block = ((u64)query_out[1] << 32) |
+ query_out[2];
+ query->page_size = query_out[3];
+ query->migration_capable = query_out[4];
+ break;
+ }
+
+ dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x returned %d, lb=%llx ps=%x wn=%d\n",
+ ddw_avail[DDW_QUERY_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
+ BUID_LO(buid), ret, query->largest_available_block,
+ query->page_size, query->windows_available);
+
return ret;
}
@@ -920,15 +1088,16 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
do {
/* extra outputs are LIOBN and dma-addr (hi, lo) */
- ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create,
- cfg_addr, BUID_HI(buid), BUID_LO(buid),
- page_shift, window_shift);
+ ret = rtas_call(ddw_avail[DDW_CREATE_PE_DMA_WIN], 5, 4,
+ (u32 *)create, cfg_addr, BUID_HI(buid),
+ BUID_LO(buid), page_shift, window_shift);
} while (rtas_busy_delay(ret));
dev_info(&dev->dev,
"ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
- "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1],
- cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift,
- window_shift, ret, create->liobn, create->addr_hi, create->addr_lo);
+ "(liobn = 0x%x starting addr = %x %x)\n",
+ ddw_avail[DDW_CREATE_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
+ BUID_LO(buid), page_shift, window_shift, ret, create->liobn,
+ create->addr_hi, create->addr_lo);
return ret;
}
@@ -969,6 +1138,94 @@ static phys_addr_t ddw_memory_hotplug_max(void)
}
/*
+ * Platforms supporting the DDW option starting with LoPAR level 2.7 implement
+ * ibm,ddw-extensions, which carries the rtas token for
+ * ibm,reset-pe-dma-windows.
+ * That rtas-call can be used to restore the default DMA window for the device.
+ */
+static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
+{
+ int ret;
+ u32 cfg_addr, reset_dma_win;
+ u64 buid;
+ struct device_node *dn;
+ struct pci_dn *pdn;
+
+ ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win);
+ if (ret)
+ return;
+
+ dn = pci_device_to_OF_node(dev);
+ pdn = PCI_DN(dn);
+ buid = pdn->phb->buid;
+ cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8);
+
+ ret = rtas_call(reset_dma_win, 3, 1, NULL, cfg_addr, BUID_HI(buid),
+ BUID_LO(buid));
+ if (ret)
+ dev_info(&dev->dev,
+ "ibm,reset-pe-dma-windows(%x) %x %x %x returned %d ",
+ reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid),
+ ret);
+}
+
+/* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
+static int iommu_get_page_shift(u32 query_page_size)
+{
+ /* Supported IO page-sizes according to LoPAR, note that 2M is out of order */
+ const int shift[] = {
+ __builtin_ctzll(SZ_4K), __builtin_ctzll(SZ_64K), __builtin_ctzll(SZ_16M),
+ __builtin_ctzll(SZ_32M), __builtin_ctzll(SZ_64M), __builtin_ctzll(SZ_128M),
+ __builtin_ctzll(SZ_256M), __builtin_ctzll(SZ_16G), __builtin_ctzll(SZ_2M)
+ };
+
+ int i = ARRAY_SIZE(shift) - 1;
+ int ret = 0;
+
+ /*
+ * On LoPAR, ibm,query-pe-dma-window outputs "IO Page Sizes" using a bit field:
+ * - bit 31 means 4k pages are supported,
+ * - bit 30 means 64k pages are supported, and so on.
+ * Larger pagesizes map more memory with the same amount of TCEs, so start probing them.
+ */
+ for (; i >= 0 ; i--) {
+ if (query_page_size & (1 << i))
+ ret = max(ret, shift[i]);
+ }
+
+ return ret;
+}
+
+static struct property *ddw_property_create(const char *propname, u32 liobn, u64 dma_addr,
+ u32 page_shift, u32 window_shift)
+{
+ struct dynamic_dma_window_prop *ddwprop;
+ struct property *win64;
+
+ win64 = kzalloc(sizeof(*win64), GFP_KERNEL);
+ if (!win64)
+ return NULL;
+
+ win64->name = kstrdup(propname, GFP_KERNEL);
+ ddwprop = kzalloc(sizeof(*ddwprop), GFP_KERNEL);
+ win64->value = ddwprop;
+ win64->length = sizeof(*ddwprop);
+ if (!win64->name || !win64->value) {
+ kfree(win64->name);
+ kfree(win64->value);
+ kfree(win64);
+ return NULL;
+ }
+
+ ddwprop->liobn = cpu_to_be32(liobn);
+ ddwprop->dma_base = cpu_to_be64(dma_addr);
+ ddwprop->tce_shift = cpu_to_be32(page_shift);
+ ddwprop->window_shift = cpu_to_be32(window_shift);
+
+ return win64;
+}
+
+/*
* If the PE supports dynamic dma windows, and there is space for a table
* that can map all pages in a linear offset, then setup such a table,
* and record the dma-offset in the struct device.
@@ -977,27 +1234,37 @@ static phys_addr_t ddw_memory_hotplug_max(void)
* pdn: the parent pe node with the ibm,dma_window property
* Future: also check if we can remap the base window for our base page size
*
- * returns the dma offset for use by the direct mapped DMA code.
+ * returns true if can map all pages (direct mapping), false otherwise..
*/
-static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
+static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
{
- int len, ret;
+ int len = 0, ret;
+ int max_ram_len = order_base_2(ddw_memory_hotplug_max());
struct ddw_query_response query;
struct ddw_create_response create;
int page_shift;
- u64 dma_addr, max_addr;
+ u64 win_addr;
+ const char *win_name;
struct device_node *dn;
- u32 ddw_avail[3];
- struct direct_window *window;
+ u32 ddw_avail[DDW_APPLICABLE_SIZE];
+ struct dma_win *window;
struct property *win64;
- struct dynamic_dma_window_prop *ddwprop;
struct failed_ddw_pdn *fpdn;
+ bool default_win_removed = false, direct_mapping = false;
+ bool pmem_present;
+ struct pci_dn *pci = PCI_DN(pdn);
+ struct property *default_win = NULL;
- mutex_lock(&direct_window_init_mutex);
+ dn = of_find_node_by_type(NULL, "ibm,pmemory");
+ pmem_present = dn != NULL;
+ of_node_put(dn);
- dma_addr = find_existing_ddw(pdn);
- if (dma_addr != 0)
+ mutex_lock(&dma_win_init_mutex);
+
+ if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
+ direct_mapping = (len >= max_ram_len);
goto out_unlock;
+ }
/*
* If we already went through this for a previous function of
@@ -1020,7 +1287,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* the property is actually in the parent, not the PE
*/
ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
- &ddw_avail[0], 3);
+ &ddw_avail[0], DDW_APPLICABLE_SIZE);
if (ret)
goto out_failed;
@@ -1031,108 +1298,194 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* of page sizes: supported and supported for migrate-dma.
*/
dn = pci_device_to_OF_node(dev);
- ret = query_ddw(dev, ddw_avail, &query);
+ ret = query_ddw(dev, ddw_avail, &query, pdn);
if (ret != 0)
goto out_failed;
+ /*
+ * If there is no window available, remove the default DMA window,
+ * if it's present. This will make all the resources available to the
+ * new DDW window.
+ * If anything fails after this, we need to restore it, so also check
+ * for extensions presence.
+ */
if (query.windows_available == 0) {
- /*
- * no additional windows are available for this device.
- * We might be able to reallocate the existing window,
- * trading in for a larger page size.
- */
- dev_dbg(&dev->dev, "no free dynamic windows");
- goto out_failed;
- }
- if (query.page_size & 4) {
- page_shift = 24; /* 16MB */
- } else if (query.page_size & 2) {
- page_shift = 16; /* 64kB */
- } else if (query.page_size & 1) {
- page_shift = 12; /* 4kB */
- } else {
- dev_dbg(&dev->dev, "no supported direct page size in mask %x",
- query.page_size);
- goto out_failed;
+ int reset_win_ext;
+
+ /* DDW + IOMMU on single window may fail if there is any allocation */
+ if (iommu_table_in_use(pci->table_group->tables[0])) {
+ dev_warn(&dev->dev, "current IOMMU table in use, can't be replaced.\n");
+ goto out_failed;
+ }
+
+ default_win = of_find_property(pdn, "ibm,dma-window", NULL);
+ if (!default_win)
+ goto out_failed;
+
+ reset_win_ext = ddw_read_ext(pdn, DDW_EXT_RESET_DMA_WIN, NULL);
+ if (reset_win_ext)
+ goto out_failed;
+
+ remove_dma_window(pdn, ddw_avail, default_win);
+ default_win_removed = true;
+
+ /* Query again, to check if the window is available */
+ ret = query_ddw(dev, ddw_avail, &query, pdn);
+ if (ret != 0)
+ goto out_failed;
+
+ if (query.windows_available == 0) {
+ /* no windows are available for this device. */
+ dev_dbg(&dev->dev, "no free dynamic windows");
+ goto out_failed;
+ }
}
- /* verify the window * number of ptes will map the partition */
- /* check largest block * page size > max memory hotplug addr */
- max_addr = ddw_memory_hotplug_max();
- if (query.largest_available_block < (max_addr >> page_shift)) {
- dev_dbg(&dev->dev, "can't map partition max 0x%llx with %u "
- "%llu-sized pages\n", max_addr, query.largest_available_block,
- 1ULL << page_shift);
+
+ page_shift = iommu_get_page_shift(query.page_size);
+ if (!page_shift) {
+ dev_dbg(&dev->dev, "no supported page size in mask %x",
+ query.page_size);
goto out_failed;
}
- len = order_base_2(max_addr);
- win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
- if (!win64) {
- dev_info(&dev->dev,
- "couldn't allocate property for 64bit dma window\n");
- goto out_failed;
+
+
+ /*
+ * The "ibm,pmemory" can appear anywhere in the address space.
+ * Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
+ * for the upper limit and fallback to max RAM otherwise but this
+ * disables device::dma_ops_bypass.
+ */
+ len = max_ram_len;
+ if (pmem_present) {
+ if (query.largest_available_block >=
+ (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
+ len = MAX_PHYSMEM_BITS;
+ else
+ dev_info(&dev->dev, "Skipping ibm,pmemory");
}
- win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
- win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
- win64->length = sizeof(*ddwprop);
- if (!win64->name || !win64->value) {
- dev_info(&dev->dev,
- "couldn't allocate property name and value\n");
- goto out_free_prop;
+
+ /* check if the available block * number of ptes will map everything */
+ if (query.largest_available_block < (1ULL << (len - page_shift))) {
+ dev_dbg(&dev->dev,
+ "can't map partition max 0x%llx with %llu %llu-sized pages\n",
+ 1ULL << len,
+ query.largest_available_block,
+ 1ULL << page_shift);
+
+ len = order_base_2(query.largest_available_block << page_shift);
+ win_name = DMA64_PROPNAME;
+ } else {
+ direct_mapping = !default_win_removed ||
+ (len == MAX_PHYSMEM_BITS) ||
+ (!pmem_present && (len == max_ram_len));
+ win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME;
}
ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
if (ret != 0)
- goto out_free_prop;
-
- ddwprop->liobn = cpu_to_be32(create.liobn);
- ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) |
- create.addr_lo);
- ddwprop->tce_shift = cpu_to_be32(page_shift);
- ddwprop->window_shift = cpu_to_be32(len);
+ goto out_failed;
dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %pOF\n",
create.liobn, dn);
- window = kzalloc(sizeof(*window), GFP_KERNEL);
- if (!window)
- goto out_clear_window;
+ win_addr = ((u64)create.addr_hi << 32) | create.addr_lo;
+ win64 = ddw_property_create(win_name, create.liobn, win_addr, page_shift, len);
- ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
- win64->value, tce_setrange_multi_pSeriesLP_walk);
- if (ret) {
- dev_info(&dev->dev, "failed to map direct window for %pOF: %d\n",
- dn, ret);
- goto out_free_window;
+ if (!win64) {
+ dev_info(&dev->dev,
+ "couldn't allocate property, property name, or value\n");
+ goto out_remove_win;
}
ret = of_add_property(pdn, win64);
if (ret) {
- dev_err(&dev->dev, "unable to add dma window property for %pOF: %d",
- pdn, ret);
- goto out_free_window;
+ dev_err(&dev->dev, "unable to add DMA window property for %pOF: %d",
+ pdn, ret);
+ goto out_free_prop;
}
- window->device = pdn;
- window->prop = ddwprop;
- spin_lock(&direct_window_list_lock);
- list_add(&window->list, &direct_window_list);
- spin_unlock(&direct_window_list_lock);
+ window = ddw_list_new_entry(pdn, win64->value);
+ if (!window)
+ goto out_del_prop;
+
+ if (direct_mapping) {
+ /* DDW maps the whole partition, so enable direct DMA mapping */
+ ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
+ win64->value, tce_setrange_multi_pSeriesLP_walk);
+ if (ret) {
+ dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
+ dn, ret);
+
+ /* Make sure to clean DDW if any TCE was set*/
+ clean_dma_window(pdn, win64->value);
+ goto out_del_list;
+ }
+ } else {
+ struct iommu_table *newtbl;
+ int i;
+ unsigned long start = 0, end = 0;
+
+ for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) {
+ const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM;
+
+ /* Look for MMIO32 */
+ if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM) {
+ start = pci->phb->mem_resources[i].start;
+ end = pci->phb->mem_resources[i].end;
+ break;
+ }
+ }
+
+ /* New table for using DDW instead of the default DMA window */
+ newtbl = iommu_pseries_alloc_table(pci->phb->node);
+ if (!newtbl) {
+ dev_dbg(&dev->dev, "couldn't create new IOMMU table\n");
+ goto out_del_list;
+ }
+
+ iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, win_addr,
+ 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops);
+ iommu_init_table(newtbl, pci->phb->node, start, end);
+
+ pci->table_group->tables[1] = newtbl;
+
+ set_iommu_table_base(&dev->dev, newtbl);
+ }
+
+ if (default_win_removed) {
+ iommu_tce_table_put(pci->table_group->tables[0]);
+ pci->table_group->tables[0] = NULL;
+
+ /* default_win is valid here because default_win_removed == true */
+ of_remove_property(pdn, default_win);
+ dev_info(&dev->dev, "Removed default DMA window for %pOF\n", pdn);
+ }
+
+ spin_lock(&dma_win_list_lock);
+ list_add(&window->list, &dma_win_list);
+ spin_unlock(&dma_win_list_lock);
- dma_addr = be64_to_cpu(ddwprop->dma_base);
+ dev->dev.archdata.dma_offset = win_addr;
goto out_unlock;
-out_free_window:
+out_del_list:
kfree(window);
-out_clear_window:
- remove_ddw(pdn, true);
+out_del_prop:
+ of_remove_property(pdn, win64);
out_free_prop:
kfree(win64->name);
kfree(win64->value);
kfree(win64);
+out_remove_win:
+ /* DDW is clean, so it's ok to call this directly. */
+ __remove_dma_window(pdn, ddw_avail, create.liobn);
+
out_failed:
+ if (default_win_removed)
+ reset_dma_window(dev, pdn);
fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
if (!fpdn)
@@ -1141,8 +1494,17 @@ out_failed:
list_add(&fpdn->list, &failed_ddw_pdn_list);
out_unlock:
- mutex_unlock(&direct_window_init_mutex);
- return dma_addr;
+ mutex_unlock(&dma_win_init_mutex);
+
+ /*
+ * If we have persistent memory and the window size is only as big
+ * as RAM, then we failed to create a window to cover persistent
+ * memory and need to set the DMA limit.
+ */
+ if (pmem_present && direct_mapping && len == max_ram_len)
+ dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len);
+
+ return direct_mapping;
}
static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
@@ -1163,13 +1525,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
pr_debug(" node is %pOF\n", dn);
- for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
- pdn = pdn->parent) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- if (dma_window)
- break;
- }
-
+ pdn = pci_dma_find(dn, &dma_window);
if (!pdn || !PCI_DN(pdn)) {
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
"no DMA window found for pci dev=%s dn=%pOF\n",
@@ -1184,7 +1540,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
tbl = pci->table_group->tables[0];
iommu_table_setparms_lpar(pci->phb, pdn, tbl,
pci->table_group, dma_window);
- tbl->it_ops = &iommu_table_lpar_multi_ops;
+
iommu_init_table(tbl, pci->phb->node, 0, 0);
iommu_register_group(pci->table_group,
pci_domain_nr(pci->phb->bus), 0);
@@ -1200,7 +1556,6 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
{
struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
- const __be32 *dma_window = NULL;
/* only attempt to use a new window if 64-bit DMA is requested */
if (dma_mask < DMA_BIT_MASK(64))
@@ -1214,18 +1569,9 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
* search upwards in the tree until we either hit a dma-window
* property, OR find a parent with a table already allocated.
*/
- for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
- pdn = pdn->parent) {
- dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
- if (dma_window)
- break;
- }
-
- if (pdn && PCI_DN(pdn)) {
- pdev->dev.archdata.dma_offset = enable_ddw(pdev, pdn);
- if (pdev->dev.archdata.dma_offset)
- return true;
- }
+ pdn = pci_dma_find(dn, NULL);
+ if (pdn && PCI_DN(pdn))
+ return enable_ddw(pdev, pdn);
return false;
}
@@ -1233,29 +1579,29 @@ static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
- struct direct_window *window;
+ struct dma_win *window;
struct memory_notify *arg = data;
int ret = 0;
switch (action) {
case MEM_GOING_ONLINE:
- spin_lock(&direct_window_list_lock);
- list_for_each_entry(window, &direct_window_list, list) {
+ spin_lock(&dma_win_list_lock);
+ list_for_each_entry(window, &dma_win_list, list) {
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
arg->nr_pages, window->prop);
/* XXX log error */
}
- spin_unlock(&direct_window_list_lock);
+ spin_unlock(&dma_win_list_lock);
break;
case MEM_CANCEL_ONLINE:
case MEM_OFFLINE:
- spin_lock(&direct_window_list_lock);
- list_for_each_entry(window, &direct_window_list, list) {
+ spin_lock(&dma_win_list_lock);
+ list_for_each_entry(window, &dma_win_list, list) {
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
arg->nr_pages, window->prop);
/* XXX log error */
}
- spin_unlock(&direct_window_list_lock);
+ spin_unlock(&dma_win_list_lock);
break;
default:
break;
@@ -1276,7 +1622,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
struct of_reconfig_data *rd = data;
struct device_node *np = rd->dn;
struct pci_dn *pci = PCI_DN(np);
- struct direct_window *window;
+ struct dma_win *window;
switch (action) {
case OF_RECONFIG_DETACH_NODE:
@@ -1287,20 +1633,22 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti
* we have to remove the property when releasing
* the device node.
*/
- remove_ddw(np, false);
+ if (remove_ddw(np, false, DIRECT64_PROPNAME))
+ remove_ddw(np, false, DMA64_PROPNAME);
+
if (pci && pci->table_group)
iommu_pseries_free_group(pci->table_group,
np->full_name);
- spin_lock(&direct_window_list_lock);
- list_for_each_entry(window, &direct_window_list, list) {
+ spin_lock(&dma_win_list_lock);
+ list_for_each_entry(window, &dma_win_list, list) {
if (window->device == np) {
list_del(&window->list);
kfree(window);
break;
}
}
- spin_unlock(&direct_window_list_lock);
+ spin_unlock(&dma_win_list_lock);
break;
default:
err = NOTIFY_DONE;
@@ -1314,7 +1662,7 @@ static struct notifier_block iommu_reconfig_nb = {
};
/* These are called very early. */
-void iommu_init_early_pSeries(void)
+void __init iommu_init_early_pSeries(void)
{
if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
return;
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 145fcfbc017f..096d09ed89f6 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/page.h>
#include <asm/firmware.h>
#include <asm/kexec.h>
@@ -61,3 +61,11 @@ void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
} else
xics_kexec_teardown_cpu(secondary);
}
+
+void pseries_machine_kexec(struct kimage *image)
+{
+ if (firmware_has_feature(FW_FEATURE_SET_MODE))
+ pseries_disable_reloc_on_exc();
+
+ default_machine_kexec(image);
+}
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 3c3da25b445c..97ef6499e501 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -21,15 +21,16 @@
#include <linux/cpuhotplug.h>
#include <linux/workqueue.h>
#include <linux/proc_fs.h>
+#include <linux/pgtable.h>
+#include <linux/debugfs.h>
+
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/machdep.h>
+#include <asm/setup.h>
#include <asm/mmu_context.h>
#include <asm/iommu.h>
#include <asm/tlb.h>
-#include <asm/prom.h>
#include <asm/cputable.h>
#include <asm/udbg.h>
#include <asm/smp.h>
@@ -38,8 +39,7 @@
#include <asm/plpar_wrappers.h>
#include <asm/kexec.h>
#include <asm/fadump.h>
-#include <asm/asm-prototypes.h>
-#include <asm/debugfs.h>
+#include <asm/dtl.h>
#include "pseries.h"
@@ -56,6 +56,7 @@ EXPORT_SYMBOL(plpar_hcall);
EXPORT_SYMBOL(plpar_hcall9);
EXPORT_SYMBOL(plpar_hcall_norets);
+#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* H_BLOCK_REMOVE supported block size for this page size in segment who's base
* page size is that page size.
@@ -64,6 +65,7 @@ EXPORT_SYMBOL(plpar_hcall_norets);
* page size.
*/
static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init;
+#endif
/*
* Due to the involved complexity, and that the current hypervisor is only
@@ -260,7 +262,7 @@ static int cpu_relative_dispatch_distance(int last_disp_cpu, int cur_disp_cpu)
if (!last_disp_cpu_assoc || !cur_disp_cpu_assoc)
return -EIO;
- return cpu_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc);
+ return cpu_relative_distance(last_disp_cpu_assoc, cur_disp_cpu_assoc);
}
static int cpu_home_node_dispatch_distance(int disp_cpu)
@@ -280,7 +282,7 @@ static int cpu_home_node_dispatch_distance(int disp_cpu)
if (!disp_cpu_assoc || !vcpu_assoc)
return -EIO;
- return cpu_distance(disp_cpu_assoc, vcpu_assoc);
+ return cpu_relative_distance(disp_cpu_assoc, vcpu_assoc);
}
static void update_vcpu_disp_stat(int disp_cpu)
@@ -636,8 +638,16 @@ static const struct proc_ops vcpudispatch_stats_freq_proc_ops = {
static int __init vcpudispatch_stats_procfs_init(void)
{
- if (!lppaca_shared_proc(get_lppaca()))
+ /*
+ * Avoid smp_processor_id while preemptible. All CPUs should have
+ * the same value for lppaca_shared_proc.
+ */
+ preempt_disable();
+ if (!lppaca_shared_proc(get_lppaca())) {
+ preempt_enable();
return 0;
+ }
+ preempt_enable();
if (!proc_create("powerpc/vcpudispatch_stats", 0600, NULL,
&vcpudispatch_stats_proc_ops))
@@ -650,6 +660,17 @@ static int __init vcpudispatch_stats_procfs_init(void)
}
machine_device_initcall(pseries, vcpudispatch_stats_procfs_init);
+
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+u64 pseries_paravirt_steal_clock(int cpu)
+{
+ struct lppaca *lppaca = &lppaca_of(cpu);
+
+ return be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) +
+ be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb));
+}
+#endif
+
#endif /* CONFIG_PPC_SPLPAR */
void vpa_init(int cpu)
@@ -679,7 +700,7 @@ void vpa_init(int cpu)
return;
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* PAPR says this feature is SLB-Buffer but firmware never
* reports that. All SPLPAR support SLB shadow buffer.
@@ -692,7 +713,7 @@ void vpa_init(int cpu)
"cpu %d (hw %d) of area %lx failed with %ld\n",
cpu, hwcpu, addr, ret);
}
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
/*
* Register dispatch trace log, if one has been allocated.
@@ -702,6 +723,36 @@ void vpa_init(int cpu)
#ifdef CONFIG_PPC_BOOK3S_64
+static int __init pseries_lpar_register_process_table(unsigned long base,
+ unsigned long page_size, unsigned long table_size)
+{
+ long rc;
+ unsigned long flags = 0;
+
+ if (table_size)
+ flags |= PROC_TABLE_NEW;
+ if (radix_enabled()) {
+ flags |= PROC_TABLE_RADIX;
+ if (mmu_has_feature(MMU_FTR_GTSE))
+ flags |= PROC_TABLE_GTSE;
+ } else
+ flags |= PROC_TABLE_HPT_SLB;
+ for (;;) {
+ rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
+ page_size, table_size);
+ if (!H_IS_LONG_BUSY(rc))
+ break;
+ mdelay(get_longbusy_msecs(rc));
+ }
+ if (rc != H_SUCCESS) {
+ pr_err("Failed to register process table (rc=%ld)\n", rc);
+ BUG();
+ }
+ return rc;
+}
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
@@ -792,7 +843,8 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
return -1;
}
-static void manual_hpte_clear_all(void)
+/* Called during kexec sequence with MMU off */
+static notrace void manual_hpte_clear_all(void)
{
unsigned long size_bytes = 1UL << ppc64_pft_size;
unsigned long hpte_count = size_bytes >> 4;
@@ -825,7 +877,8 @@ static void manual_hpte_clear_all(void)
}
}
-static int hcall_hpte_clear_all(void)
+/* Called during kexec sequence with MMU off */
+static notrace int hcall_hpte_clear_all(void)
{
int rc;
@@ -836,7 +889,8 @@ static int hcall_hpte_clear_all(void)
return rc;
}
-static void pseries_hpte_clear_all(void)
+/* Called during kexec sequence with MMU off */
+static notrace void pseries_hpte_clear_all(void)
{
int rc;
@@ -878,7 +932,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
want_v = hpte_encode_avpn(vpn, psize, ssize);
- flags = (newpp & 7) | H_AVPN;
+ flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
+ flags |= (newpp & HPTE_R_KEY_HI) >> 48;
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
/* Move pp0 into bit 8 (IBM 55) */
flags |= (newpp & HPTE_R_PP0) >> 55;
@@ -967,11 +1022,13 @@ static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
slot = pSeries_lpar_hpte_find(vpn, psize, ssize);
BUG_ON(slot == -1);
- flags = newpp & 7;
+ flags = newpp & (HPTE_R_PP | HPTE_R_N);
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
/* Move pp0 into bit 8 (IBM 55) */
flags |= (newpp & HPTE_R_PP0) >> 55;
+ flags |= ((newpp & HPTE_R_KEY_HI) >> 48) | (newpp & HPTE_R_KEY_LO);
+
lpar_rc = plpar_pte_protect(flags, slot, 0);
BUG_ON(lpar_rc != H_SUCCESS);
@@ -1620,7 +1677,7 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
}
msleep(delay);
rc = plpar_resize_hpt_prepare(0, shift);
- };
+ }
switch (rc) {
case H_SUCCESS:
@@ -1664,32 +1721,6 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
return 0;
}
-static int pseries_lpar_register_process_table(unsigned long base,
- unsigned long page_size, unsigned long table_size)
-{
- long rc;
- unsigned long flags = 0;
-
- if (table_size)
- flags |= PROC_TABLE_NEW;
- if (radix_enabled())
- flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE;
- else
- flags |= PROC_TABLE_HPT_SLB;
- for (;;) {
- rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
- page_size, table_size);
- if (!H_IS_LONG_BUSY(rc))
- break;
- mdelay(get_longbusy_msecs(rc));
- }
- if (rc != H_SUCCESS) {
- pr_err("Failed to register process table (rc=%ld)\n", rc);
- BUG();
- }
- return rc;
-}
-
void __init hpte_init_pseries(void)
{
mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
@@ -1712,14 +1743,17 @@ void __init hpte_init_pseries(void)
if (cpu_has_feature(CPU_FTR_ARCH_300))
pseries_lpar_register_process_table(0, 0, 0);
}
+#endif /* CONFIG_PPC_64S_HASH_MMU */
-void radix_init_pseries(void)
+#ifdef CONFIG_PPC_RADIX_MMU
+void __init radix_init_pseries(void)
{
pr_info("Using radix MMU under hypervisor\n");
pseries_lpar_register_process_table(__pa(process_tb),
0, PRTB_SIZE_SHIFT - 12);
}
+#endif
#ifdef CONFIG_PPC_SMLPAR
#define CMO_FREE_HINT_DEFAULT 1
@@ -1813,30 +1847,28 @@ void hcall_tracepoint_unregfunc(void)
#endif
/*
- * Since the tracing code might execute hcalls we need to guard against
- * recursion. One example of this are spinlocks calling H_YIELD on
- * shared processor partitions.
+ * Keep track of hcall tracing depth and prevent recursion. Warn if any is
+ * detected because it may indicate a problem. This will not catch all
+ * problems with tracing code making hcalls, because the tracing might have
+ * been invoked from a non-hcall, so the first hcall could recurse into it
+ * without warning here, but this better than nothing.
+ *
+ * Hcalls with specific problems being traced should use the _notrace
+ * plpar_hcall variants.
*/
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
-void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
+notrace void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
{
unsigned long flags;
unsigned int *depth;
- /*
- * We cannot call tracepoints inside RCU idle regions which
- * means we must not trace H_CEDE.
- */
- if (opcode == H_CEDE)
- return;
-
local_irq_save(flags);
depth = this_cpu_ptr(&hcall_trace_depth);
- if (*depth)
+ if (WARN_ON_ONCE(*depth))
goto out;
(*depth)++;
@@ -1848,19 +1880,16 @@ out:
local_irq_restore(flags);
}
-void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
+notrace void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf)
{
unsigned long flags;
unsigned int *depth;
- if (opcode == H_CEDE)
- return;
-
local_irq_save(flags);
depth = this_cpu_ptr(&hcall_trace_depth);
- if (*depth)
+ if (*depth) /* Don't warn again on the way out */
goto out;
(*depth)++;
@@ -1917,7 +1946,8 @@ int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
return rc;
}
-static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
+#ifdef CONFIG_PPC_64S_HASH_MMU
+static unsigned long __init vsid_unscramble(unsigned long vsid, int ssize)
{
unsigned long protovsid;
unsigned long va_bits = VA_BITS;
@@ -1977,6 +2007,7 @@ static int __init reserve_vrma_context_id(void)
return 0;
}
machine_device_initcall(pseries, reserve_vrma_context_id);
+#endif
#ifdef CONFIG_DEBUG_FS
/* debugfs file interface for vpa data */
@@ -2005,7 +2036,7 @@ static int __init vpa_debugfs_init(void)
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return 0;
- vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root);
+ vpa_dir = debugfs_create_dir("vpa", arch_debugfs_dir);
/* set up the per-cpu vpa file*/
for_each_possible_cpu(i) {
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index b8d28ab88178..63fd925ccbb8 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -28,7 +28,6 @@
#include <asm/firmware.h>
#include <asm/rtas.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/vdso_datapage.h>
#include <asm/vio.h>
#include <asm/mmu.h>
@@ -36,6 +35,7 @@
#include <asm/drmem.h>
#include "pseries.h"
+#include "vas.h" /* pseries_vas_dlpar_cpu() */
/*
* This isn't a module but we expose that to userspace
@@ -136,6 +136,39 @@ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
return rc;
}
+static void show_gpci_data(struct seq_file *m)
+{
+ struct hv_gpci_request_buffer *buf;
+ unsigned int affinity_score;
+ long ret;
+
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (buf == NULL)
+ return;
+
+ /*
+ * Show the local LPAR's affinity score.
+ *
+ * 0xB1 selects the Affinity_Domain_Info_By_Partition subcall.
+ * The score is at byte 0xB in the output buffer.
+ */
+ memset(&buf->params, 0, sizeof(buf->params));
+ buf->params.counter_request = cpu_to_be32(0xB1);
+ buf->params.starting_index = cpu_to_be32(-1); /* local LPAR */
+ buf->params.counter_info_version_in = 0x5; /* v5+ for score */
+ ret = plpar_hcall_norets(H_GET_PERF_COUNTER_INFO, virt_to_phys(buf),
+ sizeof(*buf));
+ if (ret != H_SUCCESS) {
+ pr_debug("hcall failed: H_GET_PERF_COUNTER_INFO: %ld, %x\n",
+ ret, be32_to_cpu(buf->params.detail_rc));
+ goto out;
+ }
+ affinity_score = buf->bytes[0xB];
+ seq_printf(m, "partition_affinity_score=%u\n", affinity_score);
+out:
+ kfree(buf);
+}
+
static unsigned h_pic(unsigned long *pool_idle_time,
unsigned long *num_procs)
{
@@ -278,6 +311,92 @@ static void parse_mpp_x_data(struct seq_file *m)
seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);
}
+/*
+ * PAPR defines, in section "7.3.16 System Parameters Option", the token 55 to
+ * read the LPAR name, and the largest output data to 4000 + 2 bytes length.
+ */
+#define SPLPAR_LPAR_NAME_TOKEN 55
+#define GET_SYS_PARM_BUF_SIZE 4002
+#if GET_SYS_PARM_BUF_SIZE > RTAS_DATA_BUF_SIZE
+#error "GET_SYS_PARM_BUF_SIZE is larger than RTAS_DATA_BUF_SIZE"
+#endif
+
+/*
+ * Read the lpar name using the RTAS ibm,get-system-parameter call.
+ *
+ * The name read through this call is updated if changes are made by the end
+ * user on the hypervisor side.
+ *
+ * Some hypervisor (like Qemu) may not provide this value. In that case, a non
+ * null value is returned.
+ */
+static int read_rtas_lpar_name(struct seq_file *m)
+{
+ int rc, len, token;
+ union {
+ char raw_buffer[GET_SYS_PARM_BUF_SIZE];
+ struct {
+ __be16 len;
+ char name[GET_SYS_PARM_BUF_SIZE-2];
+ };
+ } *local_buffer;
+
+ token = rtas_token("ibm,get-system-parameter");
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return -EINVAL;
+
+ local_buffer = kmalloc(sizeof(*local_buffer), GFP_KERNEL);
+ if (!local_buffer)
+ return -ENOMEM;
+
+ do {
+ spin_lock(&rtas_data_buf_lock);
+ memset(rtas_data_buf, 0, sizeof(*local_buffer));
+ rc = rtas_call(token, 3, 1, NULL, SPLPAR_LPAR_NAME_TOKEN,
+ __pa(rtas_data_buf), sizeof(*local_buffer));
+ if (!rc)
+ memcpy(local_buffer->raw_buffer, rtas_data_buf,
+ sizeof(local_buffer->raw_buffer));
+ spin_unlock(&rtas_data_buf_lock);
+ } while (rtas_busy_delay(rc));
+
+ if (!rc) {
+ /* Force end of string */
+ len = min((int) be16_to_cpu(local_buffer->len),
+ (int) sizeof(local_buffer->name)-1);
+ local_buffer->name[len] = '\0';
+
+ seq_printf(m, "partition_name=%s\n", local_buffer->name);
+ } else
+ rc = -ENODATA;
+
+ kfree(local_buffer);
+ return rc;
+}
+
+/*
+ * Read the LPAR name from the Device Tree.
+ *
+ * The value read in the DT is not updated if the end-user is touching the LPAR
+ * name on the hypervisor side.
+ */
+static int read_dt_lpar_name(struct seq_file *m)
+{
+ const char *name;
+
+ if (of_property_read_string(of_root, "ibm,partition-name", &name))
+ return -ENOENT;
+
+ seq_printf(m, "partition_name=%s\n", name);
+ return 0;
+}
+
+static void read_lpar_name(struct seq_file *m)
+{
+ if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
+ pr_err_once("Error can't get the LPAR name");
+}
+
#define SPLPAR_CHARACTERISTICS_TOKEN 20
#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
@@ -463,6 +582,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
/* this call handles the ibm,get-system-parameter contents */
+ read_lpar_name(m);
parse_system_parameter_string(m);
parse_ppp_data(m);
parse_mpp_data(m);
@@ -487,6 +607,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
partition_active_processors * 100);
}
+ show_gpci_data(m);
+
seq_printf(m, "partition_active_processors=%d\n",
partition_active_processors);
@@ -496,12 +618,15 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "shared_processor_mode=%d\n",
lppaca_shared_proc(get_lppaca()));
-#ifdef CONFIG_PPC_BOOK3S_64
- seq_printf(m, "slb_size=%d\n", mmu_slb_size);
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ if (!radix_enabled())
+ seq_printf(m, "slb_size=%d\n", mmu_slb_size);
#endif
parse_em_data(m);
maxmem_data(m);
+ seq_printf(m, "security_flavor=%u\n", pseries_security_flavor);
+
return 0;
}
@@ -624,6 +749,16 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
return -EINVAL;
retval = update_ppp(new_entitled_ptr, NULL);
+
+ if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
+ /*
+ * The hypervisor assigns VAS resources based
+ * on entitled capacity for shared mode.
+ * Reconfig VAS windows based on DLPAR CPU events.
+ */
+ if (pseries_vas_dlpar_cpu() != 0)
+ retval = H_HARDWARE;
+ }
} else if (!strcmp(kbuf, "capacity_weight")) {
char *endp;
*new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index b571285f6c14..634fac5db3f9 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -6,12 +6,17 @@
* Copyright (C) 2010 IBM Corporation
*/
+
+#define pr_fmt(fmt) "mobility: " fmt
+
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
+#include <linux/nmi.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/stat.h>
+#include <linux/stop_machine.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/delay.h>
@@ -21,6 +26,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include "pseries.h"
+#include "vas.h" /* vas_migration_handler() */
#include "../../kernel/cacheinfo.h"
static struct kobject *mobility_kobj;
@@ -42,6 +48,39 @@ struct update_props_workarea {
#define MIGRATION_SCOPE (1)
#define PRRN_SCOPE -2
+#ifdef CONFIG_PPC_WATCHDOG
+static unsigned int nmi_wd_lpm_factor = 200;
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table nmi_wd_lpm_factor_ctl_table[] = {
+ {
+ .procname = "nmi_wd_lpm_factor",
+ .data = &nmi_wd_lpm_factor,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ },
+ {}
+};
+static struct ctl_table nmi_wd_lpm_factor_sysctl_root[] = {
+ {
+ .procname = "kernel",
+ .mode = 0555,
+ .child = nmi_wd_lpm_factor_ctl_table,
+ },
+ {}
+};
+
+static int __init register_nmi_wd_lpm_factor_sysctl(void)
+{
+ register_sysctl_table(nmi_wd_lpm_factor_sysctl_root);
+
+ return 0;
+}
+device_initcall(register_nmi_wd_lpm_factor_sysctl);
+#endif /* CONFIG_SYSCTL */
+#endif /* CONFIG_PPC_WATCHDOG */
+
static int mobility_rtas_call(int token, char *buf, s32 scope)
{
int rc;
@@ -56,16 +95,31 @@ static int mobility_rtas_call(int token, char *buf, s32 scope)
return rc;
}
-static int delete_dt_node(__be32 phandle)
+static int delete_dt_node(struct device_node *dn)
{
- struct device_node *dn;
+ struct device_node *pdn;
+ bool is_platfac;
- dn = of_find_node_by_phandle(be32_to_cpu(phandle));
- if (!dn)
- return -ENOENT;
+ pdn = of_get_parent(dn);
+ is_platfac = of_node_is_type(dn, "ibm,platform-facilities") ||
+ of_node_is_type(pdn, "ibm,platform-facilities");
+ of_node_put(pdn);
+
+ /*
+ * The drivers that bind to nodes in the platform-facilities
+ * hierarchy don't support node removal, and the removal directive
+ * from firmware is always followed by an add of an equivalent
+ * node. The capability (e.g. RNG, encryption, compression)
+ * represented by the node is never interrupted by the migration.
+ * So ignore changes to this part of the tree.
+ */
+ if (is_platfac) {
+ pr_notice("ignoring remove operation for %pOFfp\n", dn);
+ return 0;
+ }
+ pr_debug("removing node %pOFfp\n", dn);
dlpar_detach_node(dn);
- of_node_put(dn);
return 0;
}
@@ -122,6 +176,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
}
if (!more) {
+ pr_debug("updating node %pOF property %s\n", dn, name);
of_update_property(dn, new_prop);
*prop = NULL;
}
@@ -129,10 +184,9 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
return 0;
}
-static int update_dt_node(__be32 phandle, s32 scope)
+static int update_dt_node(struct device_node *dn, s32 scope)
{
struct update_props_workarea *upwa;
- struct device_node *dn;
struct property *prop = NULL;
int i, rc, rtas_rc;
char *prop_data;
@@ -149,14 +203,8 @@ static int update_dt_node(__be32 phandle, s32 scope)
if (!rtas_buf)
return -ENOMEM;
- dn = of_find_node_by_phandle(be32_to_cpu(phandle));
- if (!dn) {
- kfree(rtas_buf);
- return -ENOENT;
- }
-
upwa = (struct update_props_workarea *)&rtas_buf[0];
- upwa->phandle = phandle;
+ upwa->phandle = cpu_to_be32(dn->phandle);
do {
rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf,
@@ -168,7 +216,7 @@ static int update_dt_node(__be32 phandle, s32 scope)
nprops = be32_to_cpu(upwa->nprops);
/* On the first call to ibm,update-properties for a node the
- * the first property value descriptor contains an empty
+ * first property value descriptor contains an empty
* property name, the property value length encoded as u32,
* and the property value is the node path being updated.
*/
@@ -202,11 +250,12 @@ static int update_dt_node(__be32 phandle, s32 scope)
rc = update_dt_property(dn, &prop, prop_name,
vd, prop_data);
if (rc) {
- printk(KERN_ERR "Could not update %s"
- " property\n", prop_name);
+ pr_err("updating %s property failed: %d\n",
+ prop_name, rc);
}
prop_data += vd;
+ break;
}
cond_resched();
@@ -215,59 +264,42 @@ static int update_dt_node(__be32 phandle, s32 scope)
cond_resched();
} while (rtas_rc == 1);
- of_node_put(dn);
kfree(rtas_buf);
return 0;
}
-static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
+static int add_dt_node(struct device_node *parent_dn, __be32 drc_index)
{
struct device_node *dn;
- struct device_node *parent_dn;
int rc;
- parent_dn = of_find_node_by_phandle(be32_to_cpu(parent_phandle));
- if (!parent_dn)
- return -ENOENT;
-
dn = dlpar_configure_connector(drc_index, parent_dn);
- if (!dn) {
- of_node_put(parent_dn);
+ if (!dn)
return -ENOENT;
+
+ /*
+ * Since delete_dt_node() ignores this node type, this is the
+ * necessary counterpart. We also know that a platform-facilities
+ * node returned from dlpar_configure_connector() has children
+ * attached, and dlpar_attach_node() only adds the parent, leaking
+ * the children. So ignore these on the add side for now.
+ */
+ if (of_node_is_type(dn, "ibm,platform-facilities")) {
+ pr_notice("ignoring add operation for %pOF\n", dn);
+ dlpar_free_cc_nodes(dn);
+ return 0;
}
rc = dlpar_attach_node(dn, parent_dn);
if (rc)
dlpar_free_cc_nodes(dn);
- of_node_put(parent_dn);
- return rc;
-}
+ pr_debug("added node %pOFfp\n", dn);
-static void prrn_update_node(__be32 phandle)
-{
- struct pseries_hp_errorlog hp_elog;
- struct device_node *dn;
-
- /*
- * If a node is found from a the given phandle, the phandle does not
- * represent the drc index of an LMB and we can ignore.
- */
- dn = of_find_node_by_phandle(be32_to_cpu(phandle));
- if (dn) {
- of_node_put(dn);
- return;
- }
-
- hp_elog.resource = PSERIES_HP_ELOG_RESOURCE_MEM;
- hp_elog.action = PSERIES_HP_ELOG_ACTION_READD;
- hp_elog.id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
- hp_elog._drc_u.drc_index = phandle;
-
- handle_dlpar_errorlog(&hp_elog);
+ return rc;
}
-int pseries_devicetree_update(s32 scope)
+static int pseries_devicetree_update(s32 scope)
{
char *rtas_buf;
__be32 *data;
@@ -276,7 +308,7 @@ int pseries_devicetree_update(s32 scope)
update_nodes_token = rtas_token("ibm,update-nodes");
if (update_nodes_token == RTAS_UNKNOWN_SERVICE)
- return -EINVAL;
+ return 0;
rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
if (!rtas_buf)
@@ -296,26 +328,31 @@ int pseries_devicetree_update(s32 scope)
data++;
for (i = 0; i < node_count; i++) {
+ struct device_node *np;
__be32 phandle = *data++;
__be32 drc_index;
+ np = of_find_node_by_phandle(be32_to_cpu(phandle));
+ if (!np) {
+ pr_warn("Failed lookup: phandle 0x%x for action 0x%x\n",
+ be32_to_cpu(phandle), action);
+ continue;
+ }
+
switch (action) {
case DELETE_DT_NODE:
- delete_dt_node(phandle);
+ delete_dt_node(np);
break;
case UPDATE_DT_NODE:
- update_dt_node(phandle, scope);
-
- if (scope == PRRN_SCOPE)
- prrn_update_node(phandle);
-
+ update_dt_node(np, scope);
break;
case ADD_DT_NODE:
drc_index = *data++;
- add_dt_node(phandle, drc_index);
+ add_dt_node(np, drc_index);
break;
}
+ of_node_put(np);
cond_resched();
}
}
@@ -330,21 +367,8 @@ int pseries_devicetree_update(s32 scope)
void post_mobility_fixup(void)
{
int rc;
- int activate_fw_token;
- activate_fw_token = rtas_token("ibm,activate-firmware");
- if (activate_fw_token == RTAS_UNKNOWN_SERVICE) {
- printk(KERN_ERR "Could not make post-mobility "
- "activate-fw call.\n");
- return;
- }
-
- do {
- rc = rtas_call(activate_fw_token, 0, 1, NULL);
- } while (rtas_busy_delay(rc));
-
- if (rc)
- printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
+ rtas_activate_firmware();
/*
* We don't want CPUs to go online/offline while the device
@@ -361,19 +385,405 @@ void post_mobility_fixup(void)
rc = pseries_devicetree_update(MIGRATION_SCOPE);
if (rc)
- printk(KERN_ERR "Post-mobility device tree update "
- "failed: %d\n", rc);
+ pr_err("device tree update failed: %d\n", rc);
cacheinfo_rebuild();
cpus_read_unlock();
- /* Possibly switch to a new RFI flush type */
- pseries_setup_rfi_flush();
+ /* Possibly switch to a new L1 flush type */
+ pseries_setup_security_mitigations();
+
+ /* Reinitialise system information for hv-24x7 */
+ read_24x7_sys_info();
return;
}
+static int poll_vasi_state(u64 handle, unsigned long *res)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long hvrc;
+ int ret;
+
+ hvrc = plpar_hcall(H_VASI_STATE, retbuf, handle);
+ switch (hvrc) {
+ case H_SUCCESS:
+ ret = 0;
+ *res = retbuf[0];
+ break;
+ case H_PARAMETER:
+ ret = -EINVAL;
+ break;
+ case H_FUNCTION:
+ ret = -EOPNOTSUPP;
+ break;
+ case H_HARDWARE:
+ default:
+ pr_err("unexpected H_VASI_STATE result %ld\n", hvrc);
+ ret = -EIO;
+ break;
+ }
+ return ret;
+}
+
+static int wait_for_vasi_session_suspending(u64 handle)
+{
+ unsigned long state;
+ int ret;
+
+ /*
+ * Wait for transition from H_VASI_ENABLED to
+ * H_VASI_SUSPENDING. Treat anything else as an error.
+ */
+ while (true) {
+ ret = poll_vasi_state(handle, &state);
+
+ if (ret != 0 || state == H_VASI_SUSPENDING) {
+ break;
+ } else if (state == H_VASI_ENABLED) {
+ ssleep(1);
+ } else {
+ pr_err("unexpected H_VASI_STATE result %lu\n", state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ /*
+ * Proceed even if H_VASI_STATE is unavailable. If H_JOIN or
+ * ibm,suspend-me are also unimplemented, we'll recover then.
+ */
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+
+ return ret;
+}
+
+static void wait_for_vasi_session_completed(u64 handle)
+{
+ unsigned long state = 0;
+ int ret;
+
+ pr_info("waiting for memory transfer to complete...\n");
+
+ /*
+ * Wait for transition from H_VASI_RESUMED to H_VASI_COMPLETED.
+ */
+ while (true) {
+ ret = poll_vasi_state(handle, &state);
+
+ /*
+ * If the memory transfer is already complete and the migration
+ * has been cleaned up by the hypervisor, H_PARAMETER is return,
+ * which is translate in EINVAL by poll_vasi_state().
+ */
+ if (ret == -EINVAL || (!ret && state == H_VASI_COMPLETED)) {
+ pr_info("memory transfer completed.\n");
+ break;
+ }
+
+ if (ret) {
+ pr_err("H_VASI_STATE return error (%d)\n", ret);
+ break;
+ }
+
+ if (state != H_VASI_RESUMED) {
+ pr_err("unexpected H_VASI_STATE result %lu\n", state);
+ break;
+ }
+
+ msleep(500);
+ }
+}
+
+static void prod_single(unsigned int target_cpu)
+{
+ long hvrc;
+ int hwid;
+
+ hwid = get_hard_smp_processor_id(target_cpu);
+ hvrc = plpar_hcall_norets(H_PROD, hwid);
+ if (hvrc == H_SUCCESS)
+ return;
+ pr_err_ratelimited("H_PROD of CPU %u (hwid %d) error: %ld\n",
+ target_cpu, hwid, hvrc);
+}
+
+static void prod_others(void)
+{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id())
+ prod_single(cpu);
+ }
+}
+
+static u16 clamp_slb_size(void)
+{
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ u16 prev = mmu_slb_size;
+
+ slb_set_size(SLB_MIN_SIZE);
+
+ return prev;
+#else
+ return 0;
+#endif
+}
+
+static int do_suspend(void)
+{
+ u16 saved_slb_size;
+ int status;
+ int ret;
+
+ pr_info("calling ibm,suspend-me on CPU %i\n", smp_processor_id());
+
+ /*
+ * The destination processor model may have fewer SLB entries
+ * than the source. We reduce mmu_slb_size to a safe minimum
+ * before suspending in order to minimize the possibility of
+ * programming non-existent entries on the destination. If
+ * suspend fails, we restore it before returning. On success
+ * the OF reconfig path will update it from the new device
+ * tree after resuming on the destination.
+ */
+ saved_slb_size = clamp_slb_size();
+
+ ret = rtas_ibm_suspend_me(&status);
+ if (ret != 0) {
+ pr_err("ibm,suspend-me error: %d\n", status);
+ slb_set_size(saved_slb_size);
+ }
+
+ return ret;
+}
+
+/**
+ * struct pseries_suspend_info - State shared between CPUs for join/suspend.
+ * @counter: Threads are to increment this upon resuming from suspend
+ * or if an error is received from H_JOIN. The thread which performs
+ * the first increment (i.e. sets it to 1) is responsible for
+ * waking the other threads.
+ * @done: False if join/suspend is in progress. True if the operation is
+ * complete (successful or not).
+ */
+struct pseries_suspend_info {
+ atomic_t counter;
+ bool done;
+};
+
+static int do_join(void *arg)
+{
+ struct pseries_suspend_info *info = arg;
+ atomic_t *counter = &info->counter;
+ long hvrc;
+ int ret;
+
+retry:
+ /* Must ensure MSR.EE off for H_JOIN. */
+ hard_irq_disable();
+ hvrc = plpar_hcall_norets(H_JOIN);
+
+ switch (hvrc) {
+ case H_CONTINUE:
+ /*
+ * All other CPUs are offline or in H_JOIN. This CPU
+ * attempts the suspend.
+ */
+ ret = do_suspend();
+ break;
+ case H_SUCCESS:
+ /*
+ * The suspend is complete and this cpu has received a
+ * prod, or we've received a stray prod from unrelated
+ * code (e.g. paravirt spinlocks) and we need to join
+ * again.
+ *
+ * This barrier orders the return from H_JOIN above vs
+ * the load of info->done. It pairs with the barrier
+ * in the wakeup/prod path below.
+ */
+ smp_mb();
+ if (READ_ONCE(info->done) == false) {
+ pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
+ smp_processor_id());
+ goto retry;
+ }
+ ret = 0;
+ break;
+ case H_BAD_MODE:
+ case H_HARDWARE:
+ default:
+ ret = -EIO;
+ pr_err_ratelimited("H_JOIN error %ld on CPU %i\n",
+ hvrc, smp_processor_id());
+ break;
+ }
+
+ if (atomic_inc_return(counter) == 1) {
+ pr_info("CPU %u waking all threads\n", smp_processor_id());
+ WRITE_ONCE(info->done, true);
+ /*
+ * This barrier orders the store to info->done vs subsequent
+ * H_PRODs to wake the other CPUs. It pairs with the barrier
+ * in the H_SUCCESS case above.
+ */
+ smp_mb();
+ prod_others();
+ }
+ /*
+ * Execution may have been suspended for several seconds, so
+ * reset the watchdog.
+ */
+ touch_nmi_watchdog();
+ return ret;
+}
+
+/*
+ * Abort reason code byte 0. We use only the 'Migrating partition' value.
+ */
+enum vasi_aborting_entity {
+ ORCHESTRATOR = 1,
+ VSP_SOURCE = 2,
+ PARTITION_FIRMWARE = 3,
+ PLATFORM_FIRMWARE = 4,
+ VSP_TARGET = 5,
+ MIGRATING_PARTITION = 6,
+};
+
+static void pseries_cancel_migration(u64 handle, int err)
+{
+ u32 reason_code;
+ u32 detail;
+ u8 entity;
+ long hvrc;
+
+ entity = MIGRATING_PARTITION;
+ detail = abs(err) & 0xffffff;
+ reason_code = (entity << 24) | detail;
+
+ hvrc = plpar_hcall_norets(H_VASI_SIGNAL, handle,
+ H_VASI_SIGNAL_CANCEL, reason_code);
+ if (hvrc)
+ pr_err("H_VASI_SIGNAL error: %ld\n", hvrc);
+}
+
+static int pseries_suspend(u64 handle)
+{
+ const unsigned int max_attempts = 5;
+ unsigned int retry_interval_ms = 1;
+ unsigned int attempt = 1;
+ int ret;
+
+ while (true) {
+ struct pseries_suspend_info info;
+ unsigned long vasi_state;
+ int vasi_err;
+
+ info = (struct pseries_suspend_info) {
+ .counter = ATOMIC_INIT(0),
+ .done = false,
+ };
+
+ ret = stop_machine(do_join, &info, cpu_online_mask);
+ if (ret == 0)
+ break;
+ /*
+ * Encountered an error. If the VASI stream is still
+ * in Suspending state, it's likely a transient
+ * condition related to some device in the partition
+ * and we can retry in the hope that the cause has
+ * cleared after some delay.
+ *
+ * A better design would allow drivers etc to prepare
+ * for the suspend and avoid conditions which prevent
+ * the suspend from succeeding. For now, we have this
+ * mitigation.
+ */
+ pr_notice("Partition suspend attempt %u of %u error: %d\n",
+ attempt, max_attempts, ret);
+
+ if (attempt == max_attempts)
+ break;
+
+ vasi_err = poll_vasi_state(handle, &vasi_state);
+ if (vasi_err == 0) {
+ if (vasi_state != H_VASI_SUSPENDING) {
+ pr_notice("VASI state %lu after failed suspend\n",
+ vasi_state);
+ break;
+ }
+ } else if (vasi_err != -EOPNOTSUPP) {
+ pr_err("VASI state poll error: %d", vasi_err);
+ break;
+ }
+
+ pr_notice("Will retry partition suspend after %u ms\n",
+ retry_interval_ms);
+
+ msleep(retry_interval_ms);
+ retry_interval_ms *= 10;
+ attempt++;
+ }
+
+ return ret;
+}
+
+static int pseries_migrate_partition(u64 handle)
+{
+ int ret;
+ unsigned int factor = 0;
+
+#ifdef CONFIG_PPC_WATCHDOG
+ factor = nmi_wd_lpm_factor;
+#endif
+ /*
+ * When the migration is initiated, the hypervisor changes VAS
+ * mappings to prepare before OS gets the notification and
+ * closes all VAS windows. NX generates continuous faults during
+ * this time and the user space can not differentiate these
+ * faults from the migration event. So reduce this time window
+ * by closing VAS windows at the beginning of this function.
+ */
+ vas_migration_handler(VAS_SUSPEND);
+
+ ret = wait_for_vasi_session_suspending(handle);
+ if (ret)
+ goto out;
+
+ if (factor)
+ watchdog_nmi_set_timeout_pct(factor);
+
+ ret = pseries_suspend(handle);
+ if (ret == 0) {
+ post_mobility_fixup();
+ /*
+ * Wait until the memory transfer is complete, so that the user
+ * space process returns from the syscall after the transfer is
+ * complete. This allows the user hooks to be executed at the
+ * right time.
+ */
+ wait_for_vasi_session_completed(handle);
+ } else
+ pseries_cancel_migration(handle, ret);
+
+ if (factor)
+ watchdog_nmi_set_timeout_pct(0);
+
+out:
+ vas_migration_handler(VAS_RESUME);
+
+ return ret;
+}
+
+int rtas_syscall_dispatch_ibm_suspend_me(u64 handle)
+{
+ return pseries_migrate_partition(handle);
+}
+
static ssize_t migration_store(struct class *class,
struct class_attribute *attr, const char *buf,
size_t count)
@@ -385,21 +795,10 @@ static ssize_t migration_store(struct class *class,
if (rc)
return rc;
- stop_topology_update();
-
- do {
- rc = rtas_ibm_suspend_me(streamid);
- if (rc == -EAGAIN)
- ssleep(1);
- } while (rc == -EAGAIN);
-
+ rc = pseries_migrate_partition(streamid);
if (rc)
return rc;
- post_mobility_fixup();
-
- start_topology_update();
-
return count;
}
@@ -424,11 +823,11 @@ static int __init mobility_sysfs_init(void)
rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
if (rc)
- pr_err("mobility: unable to create migration sysfs file (%d)\n", rc);
+ pr_err("unable to create migration sysfs file (%d)\n", rc);
rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr);
if (rc)
- pr_err("mobility: unable to create api_version sysfs file (%d)\n", rc);
+ pr_err("unable to create api_version sysfs file (%d)\n", rc);
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 133f6adcb39c..a3a71d37cb9a 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -4,14 +4,17 @@
* Copyright 2006-2007 Michael Ellerman, IBM Corp.
*/
+#include <linux/crash_dump.h>
#include <linux/device.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <asm/rtas.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/machdep.h>
+#include <asm/xive.h>
#include "pseries.h"
@@ -109,21 +112,6 @@ static int rtas_query_irq_number(struct pci_dn *pdn, int offset)
return rtas_ret[0];
}
-static void rtas_teardown_msi_irqs(struct pci_dev *pdev)
-{
- struct msi_desc *entry;
-
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->irq)
- continue;
-
- irq_set_msi_desc(entry->irq, NULL);
- irq_dispose_mapping(entry->irq);
- }
-
- rtas_disable_msi(pdev);
-}
-
static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
{
struct device_node *dn;
@@ -163,12 +151,12 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
/* Quota calculation */
-static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
+static struct device_node *__find_pe_total_msi(struct device_node *node, int *total)
{
struct device_node *dn;
const __be32 *p;
- dn = of_node_get(pci_device_to_OF_node(dev));
+ dn = of_node_get(node);
while (dn) {
p = of_get_property(dn, "ibm,pe-total-#msi", NULL);
if (p) {
@@ -184,6 +172,11 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
return NULL;
}
+static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
+{
+ return __find_pe_total_msi(pci_device_to_OF_node(dev), total);
+}
+
static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
{
struct device_node *dn;
@@ -329,27 +322,6 @@ out:
return request;
}
-static int check_msix_entries(struct pci_dev *pdev)
-{
- struct msi_desc *entry;
- int expected;
-
- /* There's no way for us to express to firmware that we want
- * a discontiguous, or non-zero based, range of MSI-X entries.
- * So we must reject such requests. */
-
- expected = 0;
- for_each_pci_msi_entry(entry, pdev) {
- if (entry->msi_attrib.entry_nr != expected) {
- pr_debug("rtas_msi: bad MSI-X entries.\n");
- return -EINVAL;
- }
- expected++;
- }
-
- return 0;
-}
-
static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
{
u32 addr_hi, addr_lo;
@@ -367,12 +339,11 @@ static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0);
}
-static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
+static int rtas_prepare_msi_irqs(struct pci_dev *pdev, int nvec_in, int type,
+ msi_alloc_info_t *arg)
{
struct pci_dn *pdn;
- int hwirq, virq, i, quota, rc;
- struct msi_desc *entry;
- struct msi_msg msg;
+ int quota, rc;
int nvec = nvec_in;
int use_32bit_msi_hack = 0;
@@ -389,9 +360,6 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
if (quota && quota < nvec)
return quota;
- if (type == PCI_CAP_ID_MSIX && check_msix_entries(pdev))
- return -EINVAL;
-
/*
* Firmware currently refuse any non power of two allocation
* so we round up if the quota will allow it.
@@ -450,30 +418,248 @@ again:
return rc;
}
- i = 0;
- for_each_pci_msi_entry(entry, pdev) {
- hwirq = rtas_query_irq_number(pdn, i++);
- if (hwirq < 0) {
- pr_debug("rtas_msi: error (%d) getting hwirq\n", rc);
- return hwirq;
- }
+ return 0;
+}
- virq = irq_create_mapping(NULL, hwirq);
+static int pseries_msi_ops_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *arg)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int type = pdev->msix_enabled ? PCI_CAP_ID_MSIX : PCI_CAP_ID_MSI;
- if (!virq) {
- pr_debug("rtas_msi: Failed mapping hwirq %d\n", hwirq);
- return -ENOSPC;
- }
+ return rtas_prepare_msi_irqs(pdev, nvec, type, arg);
+}
- dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq);
- irq_set_msi_desc(virq, entry);
+/*
+ * ->msi_free() is called before irq_domain_free_irqs_top() when the
+ * handler data is still available. Use that to clear the XIVE
+ * controller data.
+ */
+static void pseries_msi_ops_msi_free(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ unsigned int irq)
+{
+ if (xive_enabled())
+ xive_irq_free_data(irq);
+}
+
+/*
+ * RTAS can not disable one MSI at a time. It's all or nothing. Do it
+ * at the end after all IRQs have been freed.
+ */
+static void pseries_msi_domain_free_irqs(struct irq_domain *domain,
+ struct device *dev)
+{
+ if (WARN_ON_ONCE(!dev_is_pci(dev)))
+ return;
+
+ __msi_domain_free_irqs(domain, dev);
+
+ rtas_disable_msi(to_pci_dev(dev));
+}
+
+static struct msi_domain_ops pseries_pci_msi_domain_ops = {
+ .msi_prepare = pseries_msi_ops_prepare,
+ .msi_free = pseries_msi_ops_msi_free,
+ .domain_free_irqs = pseries_msi_domain_free_irqs,
+};
- /* Read config space back so we can restore after reset */
- __pci_read_msi_msg(entry, &msg);
- entry->msg = msg;
+static void pseries_msi_shutdown(struct irq_data *d)
+{
+ d = d->parent_data;
+ if (d->chip->irq_shutdown)
+ d->chip->irq_shutdown(d);
+}
+
+static void pseries_msi_mask(struct irq_data *d)
+{
+ pci_msi_mask_irq(d);
+ irq_chip_mask_parent(d);
+}
+
+static void pseries_msi_unmask(struct irq_data *d)
+{
+ pci_msi_unmask_irq(d);
+ irq_chip_unmask_parent(d);
+}
+
+static void pseries_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ struct msi_desc *entry = irq_data_get_msi_desc(data);
+
+ /*
+ * Do not update the MSIx vector table. It's not strictly necessary
+ * because the table is initialized by the underlying hypervisor, PowerVM
+ * or QEMU/KVM. However, if the MSIx vector entry is cleared, any further
+ * activation will fail. This can happen in some drivers (eg. IPR) which
+ * deactivate an IRQ used for testing MSI support.
+ */
+ entry->msg = *msg;
+}
+
+static struct irq_chip pseries_pci_msi_irq_chip = {
+ .name = "pSeries-PCI-MSI",
+ .irq_shutdown = pseries_msi_shutdown,
+ .irq_mask = pseries_msi_mask,
+ .irq_unmask = pseries_msi_unmask,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_write_msi_msg = pseries_msi_write_msg,
+};
+
+
+/*
+ * Set MSI_FLAG_MSIX_CONTIGUOUS as there is no way to express to
+ * firmware to request a discontiguous or non-zero based range of
+ * MSI-X entries. Core code will reject such setup attempts.
+ */
+static struct msi_domain_info pseries_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MSIX_CONTIGUOUS),
+ .ops = &pseries_pci_msi_domain_ops,
+ .chip = &pseries_pci_msi_irq_chip,
+};
+
+static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
+{
+ __pci_read_msi_msg(irq_data_get_msi_desc(data), msg);
+}
+
+static struct irq_chip pseries_msi_irq_chip = {
+ .name = "pSeries-MSI",
+ .irq_shutdown = pseries_msi_shutdown,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_compose_msi_msg = pseries_msi_compose_msg,
+};
+
+static int pseries_irq_parent_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_fwspec parent_fwspec;
+ int ret;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 2;
+ parent_fwspec.param[0] = hwirq;
+ parent_fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int pseries_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct pci_controller *phb = domain->host_data;
+ msi_alloc_info_t *info = arg;
+ struct msi_desc *desc = info->desc;
+ struct pci_dev *pdev = msi_desc_to_pci_dev(desc);
+ int hwirq;
+ int i, ret;
+
+ hwirq = rtas_query_irq_number(pci_get_pdn(pdev), desc->msi_index);
+ if (hwirq < 0) {
+ dev_err(&pdev->dev, "Failed to query HW IRQ: %d\n", hwirq);
+ return hwirq;
+ }
+
+ dev_dbg(&pdev->dev, "%s bridge %pOF %d/%x #%d\n", __func__,
+ phb->dn, virq, hwirq, nr_irqs);
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = pseries_irq_parent_domain_alloc(domain, virq + i, hwirq + i);
+ if (ret)
+ goto out;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &pseries_msi_irq_chip, domain->host_data);
}
return 0;
+
+out:
+ /* TODO: handle RTAS cleanup in ->msi_finish() ? */
+ irq_domain_free_irqs_parent(domain, virq, i - 1);
+ return ret;
+}
+
+static void pseries_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ struct pci_controller *phb = irq_data_get_irq_chip_data(d);
+
+ pr_debug("%s bridge %pOF %d #%d\n", __func__, phb->dn, virq, nr_irqs);
+
+ /* XIVE domain data is cleared through ->msi_free() */
+}
+
+static const struct irq_domain_ops pseries_irq_domain_ops = {
+ .alloc = pseries_irq_domain_alloc,
+ .free = pseries_irq_domain_free,
+};
+
+static int __pseries_msi_allocate_domains(struct pci_controller *phb,
+ unsigned int count)
+{
+ struct irq_domain *parent = irq_get_default_host();
+
+ phb->fwnode = irq_domain_alloc_named_id_fwnode("pSeries-MSI",
+ phb->global_number);
+ if (!phb->fwnode)
+ return -ENOMEM;
+
+ phb->dev_domain = irq_domain_create_hierarchy(parent, 0, count,
+ phb->fwnode,
+ &pseries_irq_domain_ops, phb);
+ if (!phb->dev_domain) {
+ pr_err("PCI: failed to create IRQ domain bridge %pOF (domain %d)\n",
+ phb->dn, phb->global_number);
+ irq_domain_free_fwnode(phb->fwnode);
+ return -ENOMEM;
+ }
+
+ phb->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(phb->dn),
+ &pseries_msi_domain_info,
+ phb->dev_domain);
+ if (!phb->msi_domain) {
+ pr_err("PCI: failed to create MSI IRQ domain bridge %pOF (domain %d)\n",
+ phb->dn, phb->global_number);
+ irq_domain_free_fwnode(phb->fwnode);
+ irq_domain_remove(phb->dev_domain);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int pseries_msi_allocate_domains(struct pci_controller *phb)
+{
+ int count;
+
+ if (!__find_pe_total_msi(phb->dn, &count)) {
+ pr_err("PCI: failed to find MSIs for bridge %pOF (domain %d)\n",
+ phb->dn, phb->global_number);
+ return -ENOSPC;
+ }
+
+ return __pseries_msi_allocate_domains(phb, count);
+}
+
+void pseries_msi_free_domains(struct pci_controller *phb)
+{
+ if (phb->msi_domain)
+ irq_domain_remove(phb->msi_domain);
+ if (phb->dev_domain)
+ irq_domain_remove(phb->dev_domain);
+ if (phb->fwnode)
+ irq_domain_free_fwnode(phb->fwnode);
}
static void rtas_msi_pci_irq_fixup(struct pci_dev *pdev)
@@ -496,8 +682,6 @@ static void rtas_msi_pci_irq_fixup(struct pci_dev *pdev)
static int rtas_msi_init(void)
{
- struct pci_controller *phb;
-
query_token = rtas_token("ibm,query-interrupt-source-number");
change_token = rtas_token("ibm,change-msi");
@@ -509,16 +693,6 @@ static int rtas_msi_init(void)
pr_debug("rtas_msi: Registering RTAS MSI callbacks.\n");
- WARN_ON(pseries_pci_controller_ops.setup_msi_irqs);
- pseries_pci_controller_ops.setup_msi_irqs = rtas_setup_msi_irqs;
- pseries_pci_controller_ops.teardown_msi_irqs = rtas_teardown_msi_irqs;
-
- list_for_each_entry(phb, &hose_list, list_node) {
- WARN_ON(phb->controller_ops.setup_msi_irqs);
- phb->controller_ops.setup_msi_irqs = rtas_setup_msi_irqs;
- phb->controller_ops.teardown_msi_irqs = rtas_teardown_msi_irqs;
- }
-
WARN_ON(ppc_md.pci_irq_fixup);
ppc_md.pci_irq_fixup = rtas_msi_pci_irq_fixup;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 69db2eca367f..cbf1720eb4aa 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -13,9 +13,9 @@
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/uaccess.h>
+#include <linux/of.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
/* Max bytes to read/write in one go */
diff --git a/arch/powerpc/platforms/pseries/of_helpers.c b/arch/powerpc/platforms/pseries/of_helpers.c
index 66dfd8256712..23241c71ef37 100644
--- a/arch/powerpc/platforms/pseries/of_helpers.c
+++ b/arch/powerpc/platforms/pseries/of_helpers.c
@@ -88,7 +88,7 @@ int of_read_drc_info_cell(struct property **prop, const __be32 **curval,
return -EINVAL;
/* Should now know end of current entry */
- (*curval) = (void *)p2;
+ (*curval) = (void *)(++p2);
data->last_drc_index = data->drc_index_start +
((data->num_sequential_elems - 1) * data->sequential_inc);
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h
deleted file mode 100644
index 51414aee2862..000000000000
--- a/arch/powerpc/platforms/pseries/offline_states.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _OFFLINE_STATES_H_
-#define _OFFLINE_STATES_H_
-
-/* Cpu offline states go here */
-enum cpu_state_vals {
- CPU_STATE_OFFLINE,
- CPU_STATE_INACTIVE,
- CPU_STATE_ONLINE,
- CPU_MAX_OFFLINE_STATES
-};
-
-#ifdef CONFIG_HOTPLUG_CPU
-extern enum cpu_state_vals get_cpu_current_state(int cpu);
-extern void set_cpu_current_state(int cpu, enum cpu_state_vals state);
-extern void set_preferred_offline_state(int cpu, enum cpu_state_vals state);
-extern void set_default_offline_state(int cpu);
-#else
-static inline enum cpu_state_vals get_cpu_current_state(int cpu)
-{
- return CPU_STATE_ONLINE;
-}
-
-static inline void set_cpu_current_state(int cpu, enum cpu_state_vals state)
-{
-}
-
-static inline void set_preferred_offline_state(int cpu, enum cpu_state_vals state)
-{
-}
-
-static inline void set_default_offline_state(int cpu)
-{
-}
-#endif
-
-extern enum cpu_state_vals get_preferred_offline_state(int cpu);
-#endif
diff --git a/arch/powerpc/platforms/pseries/papr_platform_attributes.c b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
new file mode 100644
index 000000000000..526c621b098b
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/papr_platform_attributes.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Platform energy and frequency attributes driver
+ *
+ * This driver creates a sys file at /sys/firmware/papr/ which encapsulates a
+ * directory structure containing files in keyword - value pairs that specify
+ * energy and frequency configuration of the system.
+ *
+ * The format of exposing the sysfs information is as follows:
+ * /sys/firmware/papr/energy_scale_info/
+ * |-- <id>/
+ * |-- desc
+ * |-- value
+ * |-- value_desc (if exists)
+ * |-- <id>/
+ * |-- desc
+ * |-- value
+ * |-- value_desc (if exists)
+ *
+ * Copyright 2022 IBM Corp.
+ */
+
+#include <asm/hvcall.h>
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+
+#include "pseries.h"
+
+/*
+ * Flag attributes to fetch either all or one attribute from the HCALL
+ * flag = BE(0) => fetch all attributes with firstAttributeId = 0
+ * flag = BE(1) => fetch a single attribute with firstAttributeId = id
+ */
+#define ESI_FLAGS_ALL 0
+#define ESI_FLAGS_SINGLE (1ull << 63)
+
+#define KOBJ_MAX_ATTRS 3
+
+#define ESI_HDR_SIZE sizeof(struct h_energy_scale_info_hdr)
+#define ESI_ATTR_SIZE sizeof(struct energy_scale_attribute)
+#define CURR_MAX_ESI_ATTRS 8
+
+struct energy_scale_attribute {
+ __be64 id;
+ __be64 val;
+ u8 desc[64];
+ u8 value_desc[64];
+} __packed;
+
+struct h_energy_scale_info_hdr {
+ __be64 num_attrs;
+ __be64 array_offset;
+ u8 data_header_version;
+} __packed;
+
+struct papr_attr {
+ u64 id;
+ struct kobj_attribute kobj_attr;
+};
+
+struct papr_group {
+ struct attribute_group pg;
+ struct papr_attr pgattrs[KOBJ_MAX_ATTRS];
+};
+
+static struct papr_group *papr_groups;
+/* /sys/firmware/papr */
+static struct kobject *papr_kobj;
+/* /sys/firmware/papr/energy_scale_info */
+static struct kobject *esi_kobj;
+
+/*
+ * Energy modes can change dynamically hence making a new hcall each time the
+ * information needs to be retrieved
+ */
+static int papr_get_attr(u64 id, struct energy_scale_attribute *esi)
+{
+ int esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * ESI_ATTR_SIZE);
+ int ret, max_esi_attrs = CURR_MAX_ESI_ATTRS;
+ struct energy_scale_attribute *curr_esi;
+ struct h_energy_scale_info_hdr *hdr;
+ char *buf;
+
+ buf = kmalloc(esi_buf_size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+retry:
+ ret = plpar_hcall_norets(H_GET_ENERGY_SCALE_INFO, ESI_FLAGS_SINGLE,
+ id, virt_to_phys(buf),
+ esi_buf_size);
+
+ /*
+ * If the hcall fails with not enough memory for either the
+ * header or data, attempt to allocate more
+ */
+ if (ret == H_PARTIAL || ret == H_P4) {
+ char *temp_buf;
+
+ max_esi_attrs += 4;
+ esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * max_esi_attrs);
+
+ temp_buf = krealloc(buf, esi_buf_size, GFP_KERNEL);
+ if (temp_buf)
+ buf = temp_buf;
+ else
+ return -ENOMEM;
+
+ goto retry;
+ }
+
+ if (ret != H_SUCCESS) {
+ pr_warn("hcall failed: H_GET_ENERGY_SCALE_INFO");
+ ret = -EIO;
+ goto out_buf;
+ }
+
+ hdr = (struct h_energy_scale_info_hdr *) buf;
+ curr_esi = (struct energy_scale_attribute *)
+ (buf + be64_to_cpu(hdr->array_offset));
+
+ if (esi_buf_size <
+ be64_to_cpu(hdr->array_offset) + (be64_to_cpu(hdr->num_attrs)
+ * sizeof(struct energy_scale_attribute))) {
+ ret = -EIO;
+ goto out_buf;
+ }
+
+ *esi = *curr_esi;
+
+out_buf:
+ kfree(buf);
+
+ return ret;
+}
+
+/*
+ * Extract and export the description of the energy scale attributes
+ */
+static ssize_t desc_show(struct kobject *kobj,
+ struct kobj_attribute *kobj_attr,
+ char *buf)
+{
+ struct papr_attr *pattr = container_of(kobj_attr, struct papr_attr,
+ kobj_attr);
+ struct energy_scale_attribute esi;
+ int ret;
+
+ ret = papr_get_attr(pattr->id, &esi);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", esi.desc);
+}
+
+/*
+ * Extract and export the numeric value of the energy scale attributes
+ */
+static ssize_t val_show(struct kobject *kobj,
+ struct kobj_attribute *kobj_attr,
+ char *buf)
+{
+ struct papr_attr *pattr = container_of(kobj_attr, struct papr_attr,
+ kobj_attr);
+ struct energy_scale_attribute esi;
+ int ret;
+
+ ret = papr_get_attr(pattr->id, &esi);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%llu\n", be64_to_cpu(esi.val));
+}
+
+/*
+ * Extract and export the value description in string format of the energy
+ * scale attributes
+ */
+static ssize_t val_desc_show(struct kobject *kobj,
+ struct kobj_attribute *kobj_attr,
+ char *buf)
+{
+ struct papr_attr *pattr = container_of(kobj_attr, struct papr_attr,
+ kobj_attr);
+ struct energy_scale_attribute esi;
+ int ret;
+
+ ret = papr_get_attr(pattr->id, &esi);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%s\n", esi.value_desc);
+}
+
+static struct papr_ops_info {
+ const char *attr_name;
+ ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *kobj_attr,
+ char *buf);
+} ops_info[KOBJ_MAX_ATTRS] = {
+ { "desc", desc_show },
+ { "value", val_show },
+ { "value_desc", val_desc_show },
+};
+
+static void add_attr(u64 id, int index, struct papr_attr *attr)
+{
+ attr->id = id;
+ sysfs_attr_init(&attr->kobj_attr.attr);
+ attr->kobj_attr.attr.name = ops_info[index].attr_name;
+ attr->kobj_attr.attr.mode = 0444;
+ attr->kobj_attr.show = ops_info[index].show;
+}
+
+static int add_attr_group(u64 id, struct papr_group *pg, bool show_val_desc)
+{
+ int i;
+
+ for (i = 0; i < KOBJ_MAX_ATTRS; i++) {
+ if (!strcmp(ops_info[i].attr_name, "value_desc") &&
+ !show_val_desc) {
+ continue;
+ }
+ add_attr(id, i, &pg->pgattrs[i]);
+ pg->pg.attrs[i] = &pg->pgattrs[i].kobj_attr.attr;
+ }
+
+ return sysfs_create_group(esi_kobj, &pg->pg);
+}
+
+
+static int __init papr_init(void)
+{
+ int esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * ESI_ATTR_SIZE);
+ int ret, idx, i, max_esi_attrs = CURR_MAX_ESI_ATTRS;
+ struct h_energy_scale_info_hdr *esi_hdr;
+ struct energy_scale_attribute *esi_attrs;
+ uint64_t num_attrs;
+ char *esi_buf;
+
+ if (!firmware_has_feature(FW_FEATURE_LPAR) ||
+ !firmware_has_feature(FW_FEATURE_ENERGY_SCALE_INFO)) {
+ return -ENXIO;
+ }
+
+ esi_buf = kmalloc(esi_buf_size, GFP_KERNEL);
+ if (esi_buf == NULL)
+ return -ENOMEM;
+ /*
+ * hcall(
+ * uint64 H_GET_ENERGY_SCALE_INFO, // Get energy scale info
+ * uint64 flags, // Per the flag request
+ * uint64 firstAttributeId, // The attribute id
+ * uint64 bufferAddress, // Guest physical address of the output buffer
+ * uint64 bufferSize); // The size in bytes of the output buffer
+ */
+retry:
+
+ ret = plpar_hcall_norets(H_GET_ENERGY_SCALE_INFO, ESI_FLAGS_ALL, 0,
+ virt_to_phys(esi_buf), esi_buf_size);
+
+ /*
+ * If the hcall fails with not enough memory for either the
+ * header or data, attempt to allocate more
+ */
+ if (ret == H_PARTIAL || ret == H_P4) {
+ char *temp_esi_buf;
+
+ max_esi_attrs += 4;
+ esi_buf_size = ESI_HDR_SIZE + (CURR_MAX_ESI_ATTRS * max_esi_attrs);
+
+ temp_esi_buf = krealloc(esi_buf, esi_buf_size, GFP_KERNEL);
+ if (temp_esi_buf)
+ esi_buf = temp_esi_buf;
+ else
+ return -ENOMEM;
+
+ goto retry;
+ }
+
+ if (ret != H_SUCCESS) {
+ pr_warn("hcall failed: H_GET_ENERGY_SCALE_INFO, ret: %d\n", ret);
+ goto out_free_esi_buf;
+ }
+
+ esi_hdr = (struct h_energy_scale_info_hdr *) esi_buf;
+ num_attrs = be64_to_cpu(esi_hdr->num_attrs);
+ esi_attrs = (struct energy_scale_attribute *)
+ (esi_buf + be64_to_cpu(esi_hdr->array_offset));
+
+ if (esi_buf_size <
+ be64_to_cpu(esi_hdr->array_offset) +
+ (num_attrs * sizeof(struct energy_scale_attribute))) {
+ goto out_free_esi_buf;
+ }
+
+ papr_groups = kcalloc(num_attrs, sizeof(*papr_groups), GFP_KERNEL);
+ if (!papr_groups)
+ goto out_free_esi_buf;
+
+ papr_kobj = kobject_create_and_add("papr", firmware_kobj);
+ if (!papr_kobj) {
+ pr_warn("kobject_create_and_add papr failed\n");
+ goto out_papr_groups;
+ }
+
+ esi_kobj = kobject_create_and_add("energy_scale_info", papr_kobj);
+ if (!esi_kobj) {
+ pr_warn("kobject_create_and_add energy_scale_info failed\n");
+ goto out_kobj;
+ }
+
+ /* Allocate the groups before registering */
+ for (idx = 0; idx < num_attrs; idx++) {
+ papr_groups[idx].pg.attrs = kcalloc(KOBJ_MAX_ATTRS + 1,
+ sizeof(*papr_groups[idx].pg.attrs),
+ GFP_KERNEL);
+ if (!papr_groups[idx].pg.attrs)
+ goto out_pgattrs;
+
+ papr_groups[idx].pg.name = kasprintf(GFP_KERNEL, "%lld",
+ be64_to_cpu(esi_attrs[idx].id));
+ if (papr_groups[idx].pg.name == NULL)
+ goto out_pgattrs;
+ }
+
+ for (idx = 0; idx < num_attrs; idx++) {
+ bool show_val_desc = true;
+
+ /* Do not add the value desc attr if it does not exist */
+ if (strnlen(esi_attrs[idx].value_desc,
+ sizeof(esi_attrs[idx].value_desc)) == 0)
+ show_val_desc = false;
+
+ if (add_attr_group(be64_to_cpu(esi_attrs[idx].id),
+ &papr_groups[idx],
+ show_val_desc)) {
+ pr_warn("Failed to create papr attribute group %s\n",
+ papr_groups[idx].pg.name);
+ idx = num_attrs;
+ goto out_pgattrs;
+ }
+ }
+
+ kfree(esi_buf);
+ return 0;
+out_pgattrs:
+ for (i = 0; i < idx ; i++) {
+ kfree(papr_groups[i].pg.attrs);
+ kfree(papr_groups[i].pg.name);
+ }
+ kobject_put(esi_kobj);
+out_kobj:
+ kobject_put(papr_kobj);
+out_papr_groups:
+ kfree(papr_groups);
+out_free_esi_buf:
+ kfree(esi_buf);
+
+ return -ENOMEM;
+}
+
+machine_device_initcall(pseries, papr_init);
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 0b4467e378e5..2f8385523a13 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -12,16 +12,81 @@
#include <linux/libnvdimm.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/seq_buf.h>
+#include <linux/nd.h>
#include <asm/plpar_wrappers.h>
+#include <asm/papr_pdsm.h>
+#include <asm/mce.h>
+#include <asm/unaligned.h>
+#include <linux/perf_event.h>
#define BIND_ANY_ADDR (~0ul)
#define PAPR_SCM_DIMM_CMD_MASK \
((1ul << ND_CMD_GET_CONFIG_SIZE) | \
(1ul << ND_CMD_GET_CONFIG_DATA) | \
- (1ul << ND_CMD_SET_CONFIG_DATA))
-
+ (1ul << ND_CMD_SET_CONFIG_DATA) | \
+ (1ul << ND_CMD_CALL))
+
+/* DIMM health bitmap indicators */
+/* SCM device is unable to persist memory contents */
+#define PAPR_PMEM_UNARMED (1ULL << (63 - 0))
+/* SCM device failed to persist memory contents */
+#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1))
+/* SCM device contents are persisted from previous IPL */
+#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2))
+/* SCM device contents are not persisted from previous IPL */
+#define PAPR_PMEM_EMPTY (1ULL << (63 - 3))
+/* SCM device memory life remaining is critically low */
+#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4))
+/* SCM device will be garded off next IPL due to failure */
+#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5))
+/* SCM contents cannot persist due to current platform health status */
+#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6))
+/* SCM device is unable to persist memory contents in certain conditions */
+#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7))
+/* SCM device is encrypted */
+#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8))
+/* SCM device has been scrubbed and locked */
+#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9))
+
+/* Bits status indicators for health bitmap indicating unarmed dimm */
+#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \
+ PAPR_PMEM_HEALTH_UNHEALTHY)
+
+/* Bits status indicators for health bitmap indicating unflushed dimm */
+#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY)
+
+/* Bits status indicators for health bitmap indicating unrestored dimm */
+#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY)
+
+/* Bit status indicators for smart event notification */
+#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \
+ PAPR_PMEM_HEALTH_FATAL | \
+ PAPR_PMEM_HEALTH_UNHEALTHY)
+
+#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS)
+#define PAPR_SCM_PERF_STATS_VERSION 0x1
+
+/* Struct holding a single performance metric */
+struct papr_scm_perf_stat {
+ u8 stat_id[8];
+ __be64 stat_val;
+} __packed;
+
+/* Struct exchanged between kernel and PHYP for fetching drc perf stats */
+struct papr_scm_perf_stats {
+ u8 eye_catcher[8];
+ /* Should be PAPR_SCM_PERF_STATS_VERSION */
+ __be32 stats_version;
+ /* Number of stats following */
+ __be32 num_statistics;
+ /* zero or more performance matrics */
+ struct papr_scm_perf_stat scm_statistic[];
+} __packed;
+
+/* private struct associated with each region */
struct papr_scm_priv {
struct platform_device *pdev;
struct device_node *dn;
@@ -30,6 +95,7 @@ struct papr_scm_priv {
uint64_t block_size;
int metadata_size;
bool is_volatile;
+ bool hcall_flush_required;
uint64_t bound_addr;
@@ -39,8 +105,62 @@ struct papr_scm_priv {
struct resource res;
struct nd_region *region;
struct nd_interleave_set nd_set;
+ struct list_head region_list;
+
+ /* Protect dimm health data from concurrent read/writes */
+ struct mutex health_mutex;
+
+ /* Last time the health information of the dimm was updated */
+ unsigned long lasthealth_jiffies;
+
+ /* Health information for the dimm */
+ u64 health_bitmap;
+
+ /* Holds the last known dirty shutdown counter value */
+ u64 dirty_shutdown_counter;
+
+ /* length of the stat buffer as expected by phyp */
+ size_t stat_buffer_len;
+
+ /* The bits which needs to be overridden */
+ u64 health_bitmap_inject_mask;
};
+static int papr_scm_pmem_flush(struct nd_region *nd_region,
+ struct bio *bio __maybe_unused)
+{
+ struct papr_scm_priv *p = nd_region_provider_data(nd_region);
+ unsigned long ret_buf[PLPAR_HCALL_BUFSIZE], token = 0;
+ long rc;
+
+ dev_dbg(&p->pdev->dev, "flush drc 0x%x", p->drc_index);
+
+ do {
+ rc = plpar_hcall(H_SCM_FLUSH, ret_buf, p->drc_index, token);
+ token = ret_buf[0];
+
+ /* Check if we are stalled for some time */
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ } else if (rc == H_BUSY) {
+ cond_resched();
+ }
+ } while (rc == H_BUSY);
+
+ if (rc) {
+ dev_err(&p->pdev->dev, "flush error: %ld", rc);
+ rc = -EIO;
+ } else {
+ dev_dbg(&p->pdev->dev, "flush drc 0x%x complete", p->drc_index);
+ }
+
+ return rc;
+}
+
+static LIST_HEAD(papr_nd_regions);
+static DEFINE_MUTEX(papr_ndr_lock);
+
static int drc_pmem_bind(struct papr_scm_priv *p)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
@@ -144,6 +264,336 @@ err_out:
return drc_pmem_bind(p);
}
+/*
+ * Query the Dimm performance stats from PHYP and copy them (if returned) to
+ * provided struct papr_scm_perf_stats instance 'stats' that can hold atleast
+ * (num_stats + header) bytes.
+ * - If buff_stats == NULL the return value is the size in bytes of the buffer
+ * needed to hold all supported performance-statistics.
+ * - If buff_stats != NULL and num_stats == 0 then we copy all known
+ * performance-statistics to 'buff_stat' and expect to be large enough to
+ * hold them.
+ * - if buff_stats != NULL and num_stats > 0 then copy the requested
+ * performance-statistics to buff_stats.
+ */
+static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
+ struct papr_scm_perf_stats *buff_stats,
+ unsigned int num_stats)
+{
+ unsigned long ret[PLPAR_HCALL_BUFSIZE];
+ size_t size;
+ s64 rc;
+
+ /* Setup the out buffer */
+ if (buff_stats) {
+ memcpy(buff_stats->eye_catcher,
+ PAPR_SCM_PERF_STATS_EYECATCHER, 8);
+ buff_stats->stats_version =
+ cpu_to_be32(PAPR_SCM_PERF_STATS_VERSION);
+ buff_stats->num_statistics =
+ cpu_to_be32(num_stats);
+
+ /*
+ * Calculate the buffer size based on num-stats provided
+ * or use the prefetched max buffer length
+ */
+ if (num_stats)
+ /* Calculate size from the num_stats */
+ size = sizeof(struct papr_scm_perf_stats) +
+ num_stats * sizeof(struct papr_scm_perf_stat);
+ else
+ size = p->stat_buffer_len;
+ } else {
+ /* In case of no out buffer ignore the size */
+ size = 0;
+ }
+
+ /* Do the HCALL asking PHYP for info */
+ rc = plpar_hcall(H_SCM_PERFORMANCE_STATS, ret, p->drc_index,
+ buff_stats ? virt_to_phys(buff_stats) : 0,
+ size);
+
+ /* Check if the error was due to an unknown stat-id */
+ if (rc == H_PARTIAL) {
+ dev_err(&p->pdev->dev,
+ "Unknown performance stats, Err:0x%016lX\n", ret[0]);
+ return -ENOENT;
+ } else if (rc == H_AUTHORITY) {
+ dev_info(&p->pdev->dev,
+ "Permission denied while accessing performance stats");
+ return -EPERM;
+ } else if (rc == H_UNSUPPORTED) {
+ dev_dbg(&p->pdev->dev, "Performance stats unsupported\n");
+ return -EOPNOTSUPP;
+ } else if (rc != H_SUCCESS) {
+ dev_err(&p->pdev->dev,
+ "Failed to query performance stats, Err:%lld\n", rc);
+ return -EIO;
+
+ } else if (!size) {
+ /* Handle case where stat buffer size was requested */
+ dev_dbg(&p->pdev->dev,
+ "Performance stats size %ld\n", ret[0]);
+ return ret[0];
+ }
+
+ /* Successfully fetched the requested stats from phyp */
+ dev_dbg(&p->pdev->dev,
+ "Performance stats returned %d stats\n",
+ be32_to_cpu(buff_stats->num_statistics));
+ return 0;
+}
+
+#ifdef CONFIG_PERF_EVENTS
+#define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu)
+
+static const char * const nvdimm_events_map[] = {
+ [1] = "CtlResCt",
+ [2] = "CtlResTm",
+ [3] = "PonSecs ",
+ [4] = "MemLife ",
+ [5] = "CritRscU",
+ [6] = "HostLCnt",
+ [7] = "HostSCnt",
+ [8] = "HostSDur",
+ [9] = "HostLDur",
+ [10] = "MedRCnt ",
+ [11] = "MedWCnt ",
+ [12] = "MedRDur ",
+ [13] = "MedWDur ",
+ [14] = "CchRHCnt",
+ [15] = "CchWHCnt",
+ [16] = "FastWCnt",
+};
+
+static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count)
+{
+ struct papr_scm_perf_stat *stat;
+ struct papr_scm_perf_stats *stats;
+ struct papr_scm_priv *p = dev_get_drvdata(dev);
+ int rc, size;
+
+ /* Invalid eventcode */
+ if (event->attr.config == 0 || event->attr.config >= ARRAY_SIZE(nvdimm_events_map))
+ return -EINVAL;
+
+ /* Allocate request buffer enough to hold single performance stat */
+ size = sizeof(struct papr_scm_perf_stats) +
+ sizeof(struct papr_scm_perf_stat);
+
+ if (!p)
+ return -EINVAL;
+
+ stats = kzalloc(size, GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ stat = &stats->scm_statistic[0];
+ memcpy(&stat->stat_id,
+ nvdimm_events_map[event->attr.config],
+ sizeof(stat->stat_id));
+ stat->stat_val = 0;
+
+ rc = drc_pmem_query_stats(p, stats, 1);
+ if (rc < 0) {
+ kfree(stats);
+ return rc;
+ }
+
+ *count = be64_to_cpu(stat->stat_val);
+ kfree(stats);
+ return 0;
+}
+
+static int papr_scm_pmu_event_init(struct perf_event *event)
+{
+ struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
+ struct papr_scm_priv *p;
+
+ if (!nd_pmu)
+ return -EINVAL;
+
+ /* test the event attr type for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* it does not support event sampling mode */
+ if (is_sampling_event(event))
+ return -EOPNOTSUPP;
+
+ /* no branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ p = (struct papr_scm_priv *)nd_pmu->dev->driver_data;
+ if (!p)
+ return -EINVAL;
+
+ /* Invalid eventcode */
+ if (event->attr.config == 0 || event->attr.config > 16)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int papr_scm_pmu_add(struct perf_event *event, int flags)
+{
+ u64 count;
+ int rc;
+ struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
+
+ if (!nd_pmu)
+ return -EINVAL;
+
+ if (flags & PERF_EF_START) {
+ rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &count);
+ if (rc)
+ return rc;
+
+ local64_set(&event->hw.prev_count, count);
+ }
+
+ return 0;
+}
+
+static void papr_scm_pmu_read(struct perf_event *event)
+{
+ u64 prev, now;
+ int rc;
+ struct nvdimm_pmu *nd_pmu = to_nvdimm_pmu(event->pmu);
+
+ if (!nd_pmu)
+ return;
+
+ rc = papr_scm_pmu_get_value(event, nd_pmu->dev, &now);
+ if (rc)
+ return;
+
+ prev = local64_xchg(&event->hw.prev_count, now);
+ local64_add(now - prev, &event->count);
+}
+
+static void papr_scm_pmu_del(struct perf_event *event, int flags)
+{
+ papr_scm_pmu_read(event);
+}
+
+static void papr_scm_pmu_register(struct papr_scm_priv *p)
+{
+ struct nvdimm_pmu *nd_pmu;
+ int rc, nodeid;
+
+ nd_pmu = kzalloc(sizeof(*nd_pmu), GFP_KERNEL);
+ if (!nd_pmu) {
+ rc = -ENOMEM;
+ goto pmu_err_print;
+ }
+
+ if (!p->stat_buffer_len) {
+ rc = -ENOENT;
+ goto pmu_check_events_err;
+ }
+
+ nd_pmu->pmu.task_ctx_nr = perf_invalid_context;
+ nd_pmu->pmu.name = nvdimm_name(p->nvdimm);
+ nd_pmu->pmu.event_init = papr_scm_pmu_event_init;
+ nd_pmu->pmu.read = papr_scm_pmu_read;
+ nd_pmu->pmu.add = papr_scm_pmu_add;
+ nd_pmu->pmu.del = papr_scm_pmu_del;
+
+ nd_pmu->pmu.capabilities = PERF_PMU_CAP_NO_INTERRUPT |
+ PERF_PMU_CAP_NO_EXCLUDE;
+
+ /*updating the cpumask variable */
+ nodeid = numa_map_to_online_node(dev_to_node(&p->pdev->dev));
+ nd_pmu->arch_cpumask = *cpumask_of_node(nodeid);
+
+ rc = register_nvdimm_pmu(nd_pmu, p->pdev);
+ if (rc)
+ goto pmu_check_events_err;
+
+ /*
+ * Set archdata.priv value to nvdimm_pmu structure, to handle the
+ * unregistering of pmu device.
+ */
+ p->pdev->archdata.priv = nd_pmu;
+ return;
+
+pmu_check_events_err:
+ kfree(nd_pmu);
+pmu_err_print:
+ dev_info(&p->pdev->dev, "nvdimm pmu didn't register rc=%d\n", rc);
+}
+
+#else
+static void papr_scm_pmu_register(struct papr_scm_priv *p) { }
+#endif
+
+/*
+ * Issue hcall to retrieve dimm health info and populate papr_scm_priv with the
+ * health information.
+ */
+static int __drc_pmem_query_health(struct papr_scm_priv *p)
+{
+ unsigned long ret[PLPAR_HCALL_BUFSIZE];
+ u64 bitmap = 0;
+ long rc;
+
+ /* issue the hcall */
+ rc = plpar_hcall(H_SCM_HEALTH, ret, p->drc_index);
+ if (rc == H_SUCCESS)
+ bitmap = ret[0] & ret[1];
+ else if (rc == H_FUNCTION)
+ dev_info_once(&p->pdev->dev,
+ "Hcall H_SCM_HEALTH not implemented, assuming empty health bitmap");
+ else {
+
+ dev_err(&p->pdev->dev,
+ "Failed to query health information, Err:%ld\n", rc);
+ return -ENXIO;
+ }
+
+ p->lasthealth_jiffies = jiffies;
+ /* Allow injecting specific health bits via inject mask. */
+ if (p->health_bitmap_inject_mask)
+ bitmap = (bitmap & ~p->health_bitmap_inject_mask) |
+ p->health_bitmap_inject_mask;
+ WRITE_ONCE(p->health_bitmap, bitmap);
+ dev_dbg(&p->pdev->dev,
+ "Queried dimm health info. Bitmap:0x%016lx Mask:0x%016lx\n",
+ ret[0], ret[1]);
+
+ return 0;
+}
+
+/* Min interval in seconds for assuming stable dimm health */
+#define MIN_HEALTH_QUERY_INTERVAL 60
+
+/* Query cached health info and if needed call drc_pmem_query_health */
+static int drc_pmem_query_health(struct papr_scm_priv *p)
+{
+ unsigned long cache_timeout;
+ int rc;
+
+ /* Protect concurrent modifications to papr_scm_priv */
+ rc = mutex_lock_interruptible(&p->health_mutex);
+ if (rc)
+ return rc;
+
+ /* Jiffies offset for which the health data is assumed to be same */
+ cache_timeout = p->lasthealth_jiffies +
+ msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000);
+
+ /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
+ if (time_after(jiffies, cache_timeout))
+ rc = __drc_pmem_query_health(p);
+ else
+ /* Assume cached health data is valid */
+ rc = 0;
+
+ mutex_unlock(&p->health_mutex);
+ return rc;
+}
static int papr_scm_meta_get(struct papr_scm_priv *p,
struct nd_cmd_get_config_data_hdr *hdr)
@@ -246,16 +696,368 @@ static int papr_scm_meta_set(struct papr_scm_priv *p,
return 0;
}
-int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
- unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
+/*
+ * Do a sanity checks on the inputs args to dimm-control function and return
+ * '0' if valid. Validation of PDSM payloads happens later in
+ * papr_scm_service_pdsm.
+ */
+static int is_cmd_valid(struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len)
{
- struct nd_cmd_get_config_size *get_size_hdr;
+ unsigned long cmd_mask = PAPR_SCM_DIMM_CMD_MASK;
+ struct nd_cmd_pkg *nd_cmd;
struct papr_scm_priv *p;
+ enum papr_pdsm pdsm;
/* Only dimm-specific calls are supported atm */
if (!nvdimm)
return -EINVAL;
+ /* get the provider data from struct nvdimm */
+ p = nvdimm_provider_data(nvdimm);
+
+ if (!test_bit(cmd, &cmd_mask)) {
+ dev_dbg(&p->pdev->dev, "Unsupported cmd=%u\n", cmd);
+ return -EINVAL;
+ }
+
+ /* For CMD_CALL verify pdsm request */
+ if (cmd == ND_CMD_CALL) {
+ /* Verify the envelope and envelop size */
+ if (!buf ||
+ buf_len < (sizeof(struct nd_cmd_pkg) + ND_PDSM_HDR_SIZE)) {
+ dev_dbg(&p->pdev->dev, "Invalid pkg size=%u\n",
+ buf_len);
+ return -EINVAL;
+ }
+
+ /* Verify that the nd_cmd_pkg.nd_family is correct */
+ nd_cmd = (struct nd_cmd_pkg *)buf;
+
+ if (nd_cmd->nd_family != NVDIMM_FAMILY_PAPR) {
+ dev_dbg(&p->pdev->dev, "Invalid pkg family=0x%llx\n",
+ nd_cmd->nd_family);
+ return -EINVAL;
+ }
+
+ pdsm = (enum papr_pdsm)nd_cmd->nd_command;
+
+ /* Verify if the pdsm command is valid */
+ if (pdsm <= PAPR_PDSM_MIN || pdsm >= PAPR_PDSM_MAX) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid PDSM\n",
+ pdsm);
+ return -EINVAL;
+ }
+
+ /* Have enough space to hold returned 'nd_pkg_pdsm' header */
+ if (nd_cmd->nd_size_out < ND_PDSM_HDR_SIZE) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid payload\n",
+ pdsm);
+ return -EINVAL;
+ }
+ }
+
+ /* Let the command be further processed */
+ return 0;
+}
+
+static int papr_pdsm_fuel_gauge(struct papr_scm_priv *p,
+ union nd_pdsm_payload *payload)
+{
+ int rc, size;
+ u64 statval;
+ struct papr_scm_perf_stat *stat;
+ struct papr_scm_perf_stats *stats;
+
+ /* Silently fail if fetching performance metrics isn't supported */
+ if (!p->stat_buffer_len)
+ return 0;
+
+ /* Allocate request buffer enough to hold single performance stat */
+ size = sizeof(struct papr_scm_perf_stats) +
+ sizeof(struct papr_scm_perf_stat);
+
+ stats = kzalloc(size, GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ stat = &stats->scm_statistic[0];
+ memcpy(&stat->stat_id, "MemLife ", sizeof(stat->stat_id));
+ stat->stat_val = 0;
+
+ /* Fetch the fuel gauge and populate it in payload */
+ rc = drc_pmem_query_stats(p, stats, 1);
+ if (rc < 0) {
+ dev_dbg(&p->pdev->dev, "Err(%d) fetching fuel gauge\n", rc);
+ goto free_stats;
+ }
+
+ statval = be64_to_cpu(stat->stat_val);
+ dev_dbg(&p->pdev->dev,
+ "Fetched fuel-gauge %llu", statval);
+ payload->health.extension_flags |=
+ PDSM_DIMM_HEALTH_RUN_GAUGE_VALID;
+ payload->health.dimm_fuel_gauge = statval;
+
+ rc = sizeof(struct nd_papr_pdsm_health);
+
+free_stats:
+ kfree(stats);
+ return rc;
+}
+
+/* Add the dirty-shutdown-counter value to the pdsm */
+static int papr_pdsm_dsc(struct papr_scm_priv *p,
+ union nd_pdsm_payload *payload)
+{
+ payload->health.extension_flags |= PDSM_DIMM_DSC_VALID;
+ payload->health.dimm_dsc = p->dirty_shutdown_counter;
+
+ return sizeof(struct nd_papr_pdsm_health);
+}
+
+/* Fetch the DIMM health info and populate it in provided package. */
+static int papr_pdsm_health(struct papr_scm_priv *p,
+ union nd_pdsm_payload *payload)
+{
+ int rc;
+
+ /* Ensure dimm health mutex is taken preventing concurrent access */
+ rc = mutex_lock_interruptible(&p->health_mutex);
+ if (rc)
+ goto out;
+
+ /* Always fetch upto date dimm health data ignoring cached values */
+ rc = __drc_pmem_query_health(p);
+ if (rc) {
+ mutex_unlock(&p->health_mutex);
+ goto out;
+ }
+
+ /* update health struct with various flags derived from health bitmap */
+ payload->health = (struct nd_papr_pdsm_health) {
+ .extension_flags = 0,
+ .dimm_unarmed = !!(p->health_bitmap & PAPR_PMEM_UNARMED_MASK),
+ .dimm_bad_shutdown = !!(p->health_bitmap & PAPR_PMEM_BAD_SHUTDOWN_MASK),
+ .dimm_bad_restore = !!(p->health_bitmap & PAPR_PMEM_BAD_RESTORE_MASK),
+ .dimm_scrubbed = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
+ .dimm_locked = !!(p->health_bitmap & PAPR_PMEM_SCRUBBED_AND_LOCKED),
+ .dimm_encrypted = !!(p->health_bitmap & PAPR_PMEM_ENCRYPTED),
+ .dimm_health = PAPR_PDSM_DIMM_HEALTHY,
+ };
+
+ /* Update field dimm_health based on health_bitmap flags */
+ if (p->health_bitmap & PAPR_PMEM_HEALTH_FATAL)
+ payload->health.dimm_health = PAPR_PDSM_DIMM_FATAL;
+ else if (p->health_bitmap & PAPR_PMEM_HEALTH_CRITICAL)
+ payload->health.dimm_health = PAPR_PDSM_DIMM_CRITICAL;
+ else if (p->health_bitmap & PAPR_PMEM_HEALTH_UNHEALTHY)
+ payload->health.dimm_health = PAPR_PDSM_DIMM_UNHEALTHY;
+
+ /* struct populated hence can release the mutex now */
+ mutex_unlock(&p->health_mutex);
+
+ /* Populate the fuel gauge meter in the payload */
+ papr_pdsm_fuel_gauge(p, payload);
+ /* Populate the dirty-shutdown-counter field */
+ papr_pdsm_dsc(p, payload);
+
+ rc = sizeof(struct nd_papr_pdsm_health);
+
+out:
+ return rc;
+}
+
+/* Inject a smart error Add the dirty-shutdown-counter value to the pdsm */
+static int papr_pdsm_smart_inject(struct papr_scm_priv *p,
+ union nd_pdsm_payload *payload)
+{
+ int rc;
+ u32 supported_flags = 0;
+ u64 inject_mask = 0, clear_mask = 0;
+ u64 mask;
+
+ /* Check for individual smart error flags and update inject/clear masks */
+ if (payload->smart_inject.flags & PDSM_SMART_INJECT_HEALTH_FATAL) {
+ supported_flags |= PDSM_SMART_INJECT_HEALTH_FATAL;
+ if (payload->smart_inject.fatal_enable)
+ inject_mask |= PAPR_PMEM_HEALTH_FATAL;
+ else
+ clear_mask |= PAPR_PMEM_HEALTH_FATAL;
+ }
+
+ if (payload->smart_inject.flags & PDSM_SMART_INJECT_BAD_SHUTDOWN) {
+ supported_flags |= PDSM_SMART_INJECT_BAD_SHUTDOWN;
+ if (payload->smart_inject.unsafe_shutdown_enable)
+ inject_mask |= PAPR_PMEM_SHUTDOWN_DIRTY;
+ else
+ clear_mask |= PAPR_PMEM_SHUTDOWN_DIRTY;
+ }
+
+ dev_dbg(&p->pdev->dev, "[Smart-inject] inject_mask=%#llx clear_mask=%#llx\n",
+ inject_mask, clear_mask);
+
+ /* Prevent concurrent access to dimm health bitmap related members */
+ rc = mutex_lock_interruptible(&p->health_mutex);
+ if (rc)
+ return rc;
+
+ /* Use inject/clear masks to set health_bitmap_inject_mask */
+ mask = READ_ONCE(p->health_bitmap_inject_mask);
+ mask = (mask & ~clear_mask) | inject_mask;
+ WRITE_ONCE(p->health_bitmap_inject_mask, mask);
+
+ /* Invalidate cached health bitmap */
+ p->lasthealth_jiffies = 0;
+
+ mutex_unlock(&p->health_mutex);
+
+ /* Return the supported flags back to userspace */
+ payload->smart_inject.flags = supported_flags;
+
+ return sizeof(struct nd_papr_pdsm_health);
+}
+
+/*
+ * 'struct pdsm_cmd_desc'
+ * Identifies supported PDSMs' expected length of in/out payloads
+ * and pdsm service function.
+ *
+ * size_in : Size of input payload if any in the PDSM request.
+ * size_out : Size of output payload if any in the PDSM request.
+ * service : Service function for the PDSM request. Return semantics:
+ * rc < 0 : Error servicing PDSM and rc indicates the error.
+ * rc >=0 : Serviced successfully and 'rc' indicate number of
+ * bytes written to payload.
+ */
+struct pdsm_cmd_desc {
+ u32 size_in;
+ u32 size_out;
+ int (*service)(struct papr_scm_priv *dimm,
+ union nd_pdsm_payload *payload);
+};
+
+/* Holds all supported PDSMs' command descriptors */
+static const struct pdsm_cmd_desc __pdsm_cmd_descriptors[] = {
+ [PAPR_PDSM_MIN] = {
+ .size_in = 0,
+ .size_out = 0,
+ .service = NULL,
+ },
+ /* New PDSM command descriptors to be added below */
+
+ [PAPR_PDSM_HEALTH] = {
+ .size_in = 0,
+ .size_out = sizeof(struct nd_papr_pdsm_health),
+ .service = papr_pdsm_health,
+ },
+
+ [PAPR_PDSM_SMART_INJECT] = {
+ .size_in = sizeof(struct nd_papr_pdsm_smart_inject),
+ .size_out = sizeof(struct nd_papr_pdsm_smart_inject),
+ .service = papr_pdsm_smart_inject,
+ },
+ /* Empty */
+ [PAPR_PDSM_MAX] = {
+ .size_in = 0,
+ .size_out = 0,
+ .service = NULL,
+ },
+};
+
+/* Given a valid pdsm cmd return its command descriptor else return NULL */
+static inline const struct pdsm_cmd_desc *pdsm_cmd_desc(enum papr_pdsm cmd)
+{
+ if (cmd >= 0 || cmd < ARRAY_SIZE(__pdsm_cmd_descriptors))
+ return &__pdsm_cmd_descriptors[cmd];
+
+ return NULL;
+}
+
+/*
+ * For a given pdsm request call an appropriate service function.
+ * Returns errors if any while handling the pdsm command package.
+ */
+static int papr_scm_service_pdsm(struct papr_scm_priv *p,
+ struct nd_cmd_pkg *pkg)
+{
+ /* Get the PDSM header and PDSM command */
+ struct nd_pkg_pdsm *pdsm_pkg = (struct nd_pkg_pdsm *)pkg->nd_payload;
+ enum papr_pdsm pdsm = (enum papr_pdsm)pkg->nd_command;
+ const struct pdsm_cmd_desc *pdsc;
+ int rc;
+
+ /* Fetch corresponding pdsm descriptor for validation and servicing */
+ pdsc = pdsm_cmd_desc(pdsm);
+
+ /* Validate pdsm descriptor */
+ /* Ensure that reserved fields are 0 */
+ if (pdsm_pkg->reserved[0] || pdsm_pkg->reserved[1]) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Invalid reserved field\n",
+ pdsm);
+ return -EINVAL;
+ }
+
+ /* If pdsm expects some input, then ensure that the size_in matches */
+ if (pdsc->size_in &&
+ pkg->nd_size_in != (pdsc->size_in + ND_PDSM_HDR_SIZE)) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_in=%d\n",
+ pdsm, pkg->nd_size_in);
+ return -EINVAL;
+ }
+
+ /* If pdsm wants to return data, then ensure that size_out matches */
+ if (pdsc->size_out &&
+ pkg->nd_size_out != (pdsc->size_out + ND_PDSM_HDR_SIZE)) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Mismatched size_out=%d\n",
+ pdsm, pkg->nd_size_out);
+ return -EINVAL;
+ }
+
+ /* Service the pdsm */
+ if (pdsc->service) {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Servicing..\n", pdsm);
+
+ rc = pdsc->service(p, &pdsm_pkg->payload);
+
+ if (rc < 0) {
+ /* error encountered while servicing pdsm */
+ pdsm_pkg->cmd_status = rc;
+ pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
+ } else {
+ /* pdsm serviced and 'rc' bytes written to payload */
+ pdsm_pkg->cmd_status = 0;
+ pkg->nd_fw_size = ND_PDSM_HDR_SIZE + rc;
+ }
+ } else {
+ dev_dbg(&p->pdev->dev, "PDSM[0x%x]: Unsupported PDSM request\n",
+ pdsm);
+ pdsm_pkg->cmd_status = -ENOENT;
+ pkg->nd_fw_size = ND_PDSM_HDR_SIZE;
+ }
+
+ return pdsm_pkg->cmd_status;
+}
+
+static int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc,
+ struct nvdimm *nvdimm, unsigned int cmd, void *buf,
+ unsigned int buf_len, int *cmd_rc)
+{
+ struct nd_cmd_get_config_size *get_size_hdr;
+ struct nd_cmd_pkg *call_pkg = NULL;
+ struct papr_scm_priv *p;
+ int rc;
+
+ rc = is_cmd_valid(nvdimm, cmd, buf, buf_len);
+ if (rc) {
+ pr_debug("Invalid cmd=0x%x. Err=%d\n", cmd, rc);
+ return rc;
+ }
+
+ /* Use a local variable in case cmd_rc pointer is NULL */
+ if (!cmd_rc)
+ cmd_rc = &rc;
+
p = nvdimm_provider_data(nvdimm);
switch (cmd) {
@@ -276,7 +1078,13 @@ int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
*cmd_rc = papr_scm_meta_set(p, buf);
break;
+ case ND_CMD_CALL:
+ call_pkg = (struct nd_cmd_pkg *)buf;
+ *cmd_rc = papr_scm_service_pdsm(p, call_pkg);
+ break;
+
default:
+ dev_dbg(&p->pdev->dev, "Unknown command = %d\n", cmd);
return -EINVAL;
}
@@ -285,24 +1093,147 @@ int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
return 0;
}
-static inline int papr_scm_node(int node)
+static ssize_t health_bitmap_inject_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- int min_dist = INT_MAX, dist;
- int nid, min_node;
+ struct nvdimm *dimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(dimm);
- if ((node == NUMA_NO_NODE) || node_online(node))
- return node;
+ return sprintf(buf, "%#llx\n",
+ READ_ONCE(p->health_bitmap_inject_mask));
+}
- min_node = first_online_node;
- for_each_online_node(nid) {
- dist = node_distance(node, nid);
- if (dist < min_dist) {
- min_dist = dist;
- min_node = nid;
- }
+static DEVICE_ATTR_ADMIN_RO(health_bitmap_inject);
+
+static ssize_t perf_stats_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int index;
+ ssize_t rc;
+ struct seq_buf s;
+ struct papr_scm_perf_stat *stat;
+ struct papr_scm_perf_stats *stats;
+ struct nvdimm *dimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(dimm);
+
+ if (!p->stat_buffer_len)
+ return -ENOENT;
+
+ /* Allocate the buffer for phyp where stats are written */
+ stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
+
+ /* Ask phyp to return all dimm perf stats */
+ rc = drc_pmem_query_stats(p, stats, 0);
+ if (rc)
+ goto free_stats;
+ /*
+ * Go through the returned output buffer and print stats and
+ * values. Since stat_id is essentially a char string of
+ * 8 bytes, simply use the string format specifier to print it.
+ */
+ seq_buf_init(&s, buf, PAGE_SIZE);
+ for (index = 0, stat = stats->scm_statistic;
+ index < be32_to_cpu(stats->num_statistics);
+ ++index, ++stat) {
+ seq_buf_printf(&s, "%.8s = 0x%016llX\n",
+ stat->stat_id,
+ be64_to_cpu(stat->stat_val));
}
- return min_node;
+
+free_stats:
+ kfree(stats);
+ return rc ? rc : (ssize_t)seq_buf_used(&s);
+}
+static DEVICE_ATTR_ADMIN_RO(perf_stats);
+
+static ssize_t flags_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *dimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(dimm);
+ struct seq_buf s;
+ u64 health;
+ int rc;
+
+ rc = drc_pmem_query_health(p);
+ if (rc)
+ return rc;
+
+ /* Copy health_bitmap locally, check masks & update out buffer */
+ health = READ_ONCE(p->health_bitmap);
+
+ seq_buf_init(&s, buf, PAGE_SIZE);
+ if (health & PAPR_PMEM_UNARMED_MASK)
+ seq_buf_printf(&s, "not_armed ");
+
+ if (health & PAPR_PMEM_BAD_SHUTDOWN_MASK)
+ seq_buf_printf(&s, "flush_fail ");
+
+ if (health & PAPR_PMEM_BAD_RESTORE_MASK)
+ seq_buf_printf(&s, "restore_fail ");
+
+ if (health & PAPR_PMEM_ENCRYPTED)
+ seq_buf_printf(&s, "encrypted ");
+
+ if (health & PAPR_PMEM_SMART_EVENT_MASK)
+ seq_buf_printf(&s, "smart_notify ");
+
+ if (health & PAPR_PMEM_SCRUBBED_AND_LOCKED)
+ seq_buf_printf(&s, "scrubbed locked ");
+
+ if (seq_buf_used(&s))
+ seq_buf_printf(&s, "\n");
+
+ return seq_buf_used(&s);
+}
+DEVICE_ATTR_RO(flags);
+
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *dimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(dimm);
+
+ return sysfs_emit(buf, "%llu\n", p->dirty_shutdown_counter);
}
+DEVICE_ATTR_RO(dirty_shutdown);
+
+static umode_t papr_nd_attribute_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct papr_scm_priv *p = nvdimm_provider_data(nvdimm);
+
+ /* For if perf-stats not available remove perf_stats sysfs */
+ if (attr == &dev_attr_perf_stats.attr && p->stat_buffer_len == 0)
+ return 0;
+
+ return attr->mode;
+}
+
+/* papr_scm specific dimm attributes */
+static struct attribute *papr_nd_attributes[] = {
+ &dev_attr_flags.attr,
+ &dev_attr_perf_stats.attr,
+ &dev_attr_dirty_shutdown.attr,
+ &dev_attr_health_bitmap_inject.attr,
+ NULL,
+};
+
+static const struct attribute_group papr_nd_attribute_group = {
+ .name = "papr",
+ .is_visible = papr_nd_attribute_visible,
+ .attrs = papr_nd_attributes,
+};
+
+static const struct attribute_group *papr_nd_attr_groups[] = {
+ &papr_nd_attribute_group,
+ NULL,
+};
static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
{
@@ -317,6 +1248,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
p->bus_desc.of_node = p->pdev->dev.of_node;
p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
+ /* Set the dimm command family mask to accept PDSMs */
+ set_bit(NVDIMM_FAMILY_PAPR, &p->bus_desc.dimm_family_mask);
+
if (!p->bus_desc.provider_name)
return -ENOMEM;
@@ -328,10 +1262,19 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
}
dimm_flags = 0;
- set_bit(NDD_ALIASING, &dimm_flags);
+ set_bit(NDD_LABELING, &dimm_flags);
+
+ /*
+ * Check if the nvdimm is unarmed. No locking needed as we are still
+ * initializing. Ignore error encountered if any.
+ */
+ __drc_pmem_query_health(p);
+
+ if (p->health_bitmap & PAPR_PMEM_UNARMED_MASK)
+ set_bit(NDD_UNARMED, &dimm_flags);
- p->nvdimm = nvdimm_create(p->bus, p, NULL, dimm_flags,
- PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
+ p->nvdimm = nvdimm_create(p->bus, p, papr_nd_attr_groups,
+ dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
if (!p->nvdimm) {
dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
goto err;
@@ -349,7 +1292,7 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
memset(&ndr_desc, 0, sizeof(ndr_desc));
target_nid = dev_to_node(&p->pdev->dev);
- online_nid = papr_scm_node(target_nid);
+ online_nid = numa_map_to_online_node(target_nid);
ndr_desc.numa_node = online_nid;
ndr_desc.target_node = target_nid;
ndr_desc.res = &p->res;
@@ -359,10 +1302,17 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
ndr_desc.num_mappings = 1;
ndr_desc.nd_set = &p->nd_set;
+ if (p->hcall_flush_required) {
+ set_bit(ND_REGION_ASYNC, &ndr_desc.flags);
+ ndr_desc.flush = papr_scm_pmem_flush;
+ }
+
if (p->is_volatile)
p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
- else
+ else {
+ set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
+ }
if (!p->region) {
dev_err(dev, "Error registering region %pR from %pOF\n",
ndr_desc.res, p->dn);
@@ -372,6 +1322,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
dev_info(dev, "Region registered with target node %d and online node %d",
target_nid, online_nid);
+ mutex_lock(&papr_ndr_lock);
+ list_add_tail(&p->region_list, &papr_nd_regions);
+ mutex_unlock(&papr_ndr_lock);
+
return 0;
err: nvdimm_bus_unregister(p->bus);
@@ -379,14 +1333,78 @@ err: nvdimm_bus_unregister(p->bus);
return -ENXIO;
}
+static void papr_scm_add_badblock(struct nd_region *region,
+ struct nvdimm_bus *bus, u64 phys_addr)
+{
+ u64 aligned_addr = ALIGN_DOWN(phys_addr, L1_CACHE_BYTES);
+
+ if (nvdimm_bus_add_badrange(bus, aligned_addr, L1_CACHE_BYTES)) {
+ pr_err("Bad block registration for 0x%llx failed\n", phys_addr);
+ return;
+ }
+
+ pr_debug("Add memory range (0x%llx - 0x%llx) as bad range\n",
+ aligned_addr, aligned_addr + L1_CACHE_BYTES);
+
+ nvdimm_region_notify(region, NVDIMM_REVALIDATE_POISON);
+}
+
+static int handle_mce_ue(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct machine_check_event *evt = data;
+ struct papr_scm_priv *p;
+ u64 phys_addr;
+ bool found = false;
+
+ if (evt->error_type != MCE_ERROR_TYPE_UE)
+ return NOTIFY_DONE;
+
+ if (list_empty(&papr_nd_regions))
+ return NOTIFY_DONE;
+
+ /*
+ * The physical address obtained here is PAGE_SIZE aligned, so get the
+ * exact address from the effective address
+ */
+ phys_addr = evt->u.ue_error.physical_address +
+ (evt->u.ue_error.effective_address & ~PAGE_MASK);
+
+ if (!evt->u.ue_error.physical_address_provided ||
+ !is_zone_device_page(pfn_to_page(phys_addr >> PAGE_SHIFT)))
+ return NOTIFY_DONE;
+
+ /* mce notifier is called from a process context, so mutex is safe */
+ mutex_lock(&papr_ndr_lock);
+ list_for_each_entry(p, &papr_nd_regions, region_list) {
+ if (phys_addr >= p->res.start && phys_addr <= p->res.end) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ papr_scm_add_badblock(p->region, p->bus, phys_addr);
+
+ mutex_unlock(&papr_ndr_lock);
+
+ return found ? NOTIFY_OK : NOTIFY_DONE;
+}
+
+static struct notifier_block mce_ue_nb = {
+ .notifier_call = handle_mce_ue
+};
+
static int papr_scm_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
u32 drc_index, metadata_size;
u64 blocks, block_size;
struct papr_scm_priv *p;
+ u8 uuid_raw[UUID_SIZE];
const char *uuid_str;
- u64 uuid[2];
+ ssize_t stat_size;
+ uuid_t uuid;
int rc;
/* check we have all the required DT properties */
@@ -415,6 +1433,9 @@ static int papr_scm_probe(struct platform_device *pdev)
if (!p)
return -ENOMEM;
+ /* Initialize the dimm mutex */
+ mutex_init(&p->health_mutex);
+
/* optional DT properties */
of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
@@ -423,18 +1444,30 @@ static int papr_scm_probe(struct platform_device *pdev)
p->block_size = block_size;
p->blocks = blocks;
p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
+ p->hcall_flush_required = of_property_read_bool(dn, "ibm,hcall-flush-required");
+
+ if (of_property_read_u64(dn, "ibm,persistence-failed-count",
+ &p->dirty_shutdown_counter))
+ p->dirty_shutdown_counter = 0;
/* We just need to ensure that set cookies are unique across */
- uuid_parse(uuid_str, (uuid_t *) uuid);
+ uuid_parse(uuid_str, &uuid);
+
/*
- * cookie1 and cookie2 are not really little endian
- * we store a little endian representation of the
- * uuid str so that we can compare this with the label
- * area cookie irrespective of the endian config with which
- * the kernel is built.
+ * The cookie1 and cookie2 are not really little endian.
+ * We store a raw buffer representation of the
+ * uuid string so that we can compare this with the label
+ * area cookie irrespective of the endian configuration
+ * with which the kernel is built.
+ *
+ * Historically we stored the cookie in the below format.
+ * for a uuid string 72511b67-0b3b-42fd-8d1d-5be3cae8bcaa
+ * cookie1 was 0xfd423b0b671b5172
+ * cookie2 was 0xaabce8cae35b1d8d
*/
- p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
- p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
+ export_uuid(uuid_raw, &uuid);
+ p->nd_set.cookie1 = get_unaligned_le64(&uuid_raw[0]);
+ p->nd_set.cookie2 = get_unaligned_le64(&uuid_raw[8]);
/* might be zero */
p->metadata_size = metadata_size;
@@ -459,11 +1492,20 @@ static int papr_scm_probe(struct platform_device *pdev)
p->res.name = pdev->name;
p->res.flags = IORESOURCE_MEM;
+ /* Try retrieving the stat buffer and see if its supported */
+ stat_size = drc_pmem_query_stats(p, NULL, 0);
+ if (stat_size > 0) {
+ p->stat_buffer_len = stat_size;
+ dev_dbg(&p->pdev->dev, "Max perf-stat size %lu-bytes\n",
+ p->stat_buffer_len);
+ }
+
rc = papr_scm_nvdimm_init(p);
if (rc)
goto err2;
platform_set_drvdata(pdev, p);
+ papr_scm_pmu_register(p);
return 0;
@@ -476,8 +1518,17 @@ static int papr_scm_remove(struct platform_device *pdev)
{
struct papr_scm_priv *p = platform_get_drvdata(pdev);
+ mutex_lock(&papr_ndr_lock);
+ list_del(&p->region_list);
+ mutex_unlock(&papr_ndr_lock);
+
nvdimm_bus_unregister(p->bus);
drc_pmem_unbind(p);
+
+ if (pdev->archdata.priv)
+ unregister_nvdimm_pmu(pdev->archdata.priv);
+
+ pdev->archdata.priv = NULL;
kfree(p->bus_desc.provider_name);
kfree(p);
@@ -486,6 +1537,7 @@ static int papr_scm_remove(struct platform_device *pdev)
static const struct of_device_id papr_scm_match[] = {
{ .compatible = "ibm,pmemory" },
+ { .compatible = "ibm,pmemory-v2" },
{ },
};
@@ -498,7 +1550,25 @@ static struct platform_driver papr_scm_driver = {
},
};
-module_platform_driver(papr_scm_driver);
+static int __init papr_scm_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&papr_scm_driver);
+ if (!ret)
+ mce_register_notifier(&mce_ue_nb);
+
+ return ret;
+}
+module_init(papr_scm_init);
+
+static void __exit papr_scm_exit(void)
+{
+ mce_unregister_notifier(&mce_ue_nb);
+ platform_driver_unregister(&papr_scm_driver);
+}
+module_exit(papr_scm_exit);
+
MODULE_DEVICE_TABLE(of, papr_scm_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 911534b89c85..6e671c3809ec 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -14,7 +14,6 @@
#include <asm/eeh.h>
#include <asm/pci-bridge.h>
-#include <asm/prom.h>
#include <asm/ppc-pci.h>
#include <asm/pci.h>
#include "pseries.h"
@@ -55,9 +54,8 @@ struct pe_map_bar_entry {
__be32 reserved; /* Reserved Space */
};
-int pseries_send_map_pe(struct pci_dev *pdev,
- u16 num_vfs,
- struct pe_map_bar_entry *vf_pe_array)
+static int pseries_send_map_pe(struct pci_dev *pdev, u16 num_vfs,
+ struct pe_map_bar_entry *vf_pe_array)
{
struct pci_dn *pdn;
int rc;
@@ -88,7 +86,7 @@ int pseries_send_map_pe(struct pci_dev *pdev,
return rc;
}
-void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num)
+static void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num)
{
struct pci_dn *pdn;
@@ -102,7 +100,7 @@ void pseries_set_pe_num(struct pci_dev *pdev, u16 vf_index, __be16 pe_num)
pdn->pe_num_map[vf_index]);
}
-int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs)
+static int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs)
{
struct pci_dn *pdn;
int i, rc, vf_index;
@@ -146,7 +144,7 @@ int pseries_associate_pes(struct pci_dev *pdev, u16 num_vfs)
return rc;
}
-int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+static int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
struct pci_dn *pdn;
int rc;
@@ -189,14 +187,14 @@ int pseries_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
return rc;
}
-int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+static int pseries_pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
{
/* Allocate PCI data */
add_sriov_vf_pdns(pdev);
return pseries_pci_sriov_enable(pdev, num_vfs);
}
-int pseries_pcibios_sriov_disable(struct pci_dev *pdev)
+static int pseries_pcibios_sriov_disable(struct pci_dev *pdev)
{
struct pci_dn *pdn;
@@ -225,8 +223,6 @@ static void __init pSeries_request_regions(void)
void __init pSeries_final_fixup(void)
{
- struct pci_controller *hose;
-
pSeries_request_regions();
eeh_show_enabled();
@@ -235,27 +231,6 @@ void __init pSeries_final_fixup(void)
ppc_md.pcibios_sriov_enable = pseries_pcibios_sriov_enable;
ppc_md.pcibios_sriov_disable = pseries_pcibios_sriov_disable;
#endif
- list_for_each_entry(hose, &hose_list, list_node) {
- struct device_node *dn = hose->dn, *nvdn;
-
- while (1) {
- dn = of_find_all_nodes(dn);
- if (!dn)
- break;
- nvdn = of_parse_phandle(dn, "ibm,nvlink", 0);
- if (!nvdn)
- continue;
- if (!of_device_is_compatible(nvdn, "ibm,npu-link"))
- continue;
- if (!of_device_is_compatible(nvdn->parent,
- "ibm,power9-npu"))
- continue;
-#ifdef CONFIG_PPC_POWERNV
- WARN_ON_ONCE(pnv_npu2_init(hose));
-#endif
- break;
- }
- }
}
/*
@@ -290,6 +265,25 @@ static void fixup_winbond_82c105(struct pci_dev* dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105,
fixup_winbond_82c105);
+static enum pci_bus_speed prop_to_pci_speed(u32 prop)
+{
+ switch (prop) {
+ case 0x01:
+ return PCIE_SPEED_2_5GT;
+ case 0x02:
+ return PCIE_SPEED_5_0GT;
+ case 0x04:
+ return PCIE_SPEED_8_0GT;
+ case 0x08:
+ return PCIE_SPEED_16_0GT;
+ case 0x10:
+ return PCIE_SPEED_32_0GT;
+ default:
+ pr_debug("Unexpected PCI link speed property value\n");
+ return PCI_SPEED_UNKNOWN;
+ }
+}
+
int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct device_node *dn, *pdn;
@@ -322,35 +316,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
}
- switch (pcie_link_speed_stats[0]) {
- case 0x01:
- bus->max_bus_speed = PCIE_SPEED_2_5GT;
- break;
- case 0x02:
- bus->max_bus_speed = PCIE_SPEED_5_0GT;
- break;
- case 0x04:
- bus->max_bus_speed = PCIE_SPEED_8_0GT;
- break;
- default:
- bus->max_bus_speed = PCI_SPEED_UNKNOWN;
- break;
- }
-
- switch (pcie_link_speed_stats[1]) {
- case 0x01:
- bus->cur_bus_speed = PCIE_SPEED_2_5GT;
- break;
- case 0x02:
- bus->cur_bus_speed = PCIE_SPEED_5_0GT;
- break;
- case 0x04:
- bus->cur_bus_speed = PCIE_SPEED_8_0GT;
- break;
- default:
- bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
- break;
- }
-
+ bus->max_bus_speed = prop_to_pci_speed(pcie_link_speed_stats[0]);
+ bus->cur_bus_speed = prop_to_pci_speed(pcie_link_speed_stats[1]);
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 361986e4354e..4ba824568119 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -33,11 +33,13 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
pci_devs_phb_init_dynamic(phb);
+ pseries_msi_allocate_domains(phb);
+
/* Create EEH devices for the PHB */
- eeh_dev_phb_init_dynamic(phb);
+ eeh_phb_pe_create(phb);
if (dn->child)
- eeh_add_device_tree_early(PCI_DN(dn));
+ pseries_eeh_init_edev_recursive(PCI_DN(dn));
pcibios_scan_phb(phb);
pcibios_finish_adding_to_bus(phb->bus);
@@ -50,6 +52,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
int remove_phb_dynamic(struct pci_controller *phb)
{
struct pci_bus *b = phb->bus;
+ struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
struct resource *res;
int rc, i;
@@ -73,10 +76,16 @@ int remove_phb_dynamic(struct pci_controller *phb)
}
}
+ pseries_msi_free_domains(phb);
+
+ /* Keep a reference so phb isn't freed yet */
+ get_device(&host_bridge->dev);
+
/* Remove the PCI bus and unregister the bridge device from sysfs */
phb->bus = NULL;
pci_remove_bus(b);
- device_unregister(b->bridge);
+ host_bridge->bus = NULL;
+ device_unregister(&host_bridge->dev);
/* Now release the IO resource */
if (res->flags & IORESOURCE_IO)
@@ -95,6 +104,7 @@ int remove_phb_dynamic(struct pci_controller *phb)
* the pcibios_free_controller_deferred() callback;
* see pseries_root_bridge_prepare().
*/
+ put_device(&host_bridge->dev);
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
new file mode 100644
index 000000000000..f4b5b5a64db3
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/plpks.c
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * POWER LPAR Platform KeyStore(PLPKS)
+ * Copyright (C) 2022 IBM Corporation
+ * Author: Nayna Jain <nayna@linux.ibm.com>
+ *
+ * Provides access to variables stored in Power LPAR Platform KeyStore(PLPKS).
+ */
+
+#define pr_fmt(fmt) "plpks: " fmt
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <asm/hvcall.h>
+#include <asm/machdep.h>
+
+#include "plpks.h"
+
+#define PKS_FW_OWNER 0x1
+#define PKS_BOOTLOADER_OWNER 0x2
+#define PKS_OS_OWNER 0x3
+
+#define LABEL_VERSION 0
+#define MAX_LABEL_ATTR_SIZE 16
+#define MAX_NAME_SIZE 239
+#define MAX_DATA_SIZE 4000
+
+#define PKS_FLUSH_MAX_TIMEOUT 5000 //msec
+#define PKS_FLUSH_SLEEP 10 //msec
+#define PKS_FLUSH_SLEEP_RANGE 400
+
+static u8 *ospassword;
+static u16 ospasswordlength;
+
+// Retrieved with H_PKS_GET_CONFIG
+static u16 maxpwsize;
+static u16 maxobjsize;
+
+struct plpks_auth {
+ u8 version;
+ u8 consumer;
+ __be64 rsvd0;
+ __be32 rsvd1;
+ __be16 passwordlength;
+ u8 password[];
+} __packed __aligned(16);
+
+struct label_attr {
+ u8 prefix[8];
+ u8 version;
+ u8 os;
+ u8 length;
+ u8 reserved[5];
+};
+
+struct label {
+ struct label_attr attr;
+ u8 name[MAX_NAME_SIZE];
+ size_t size;
+};
+
+static int pseries_status_to_err(int rc)
+{
+ int err;
+
+ switch (rc) {
+ case H_SUCCESS:
+ err = 0;
+ break;
+ case H_FUNCTION:
+ err = -ENXIO;
+ break;
+ case H_P1:
+ case H_P2:
+ case H_P3:
+ case H_P4:
+ case H_P5:
+ case H_P6:
+ err = -EINVAL;
+ break;
+ case H_NOT_FOUND:
+ err = -ENOENT;
+ break;
+ case H_BUSY:
+ err = -EBUSY;
+ break;
+ case H_AUTHORITY:
+ err = -EPERM;
+ break;
+ case H_NO_MEM:
+ err = -ENOMEM;
+ break;
+ case H_RESOURCE:
+ err = -EEXIST;
+ break;
+ case H_TOO_BIG:
+ err = -EFBIG;
+ break;
+ case H_STATE:
+ err = -EIO;
+ break;
+ case H_R_STATE:
+ err = -EIO;
+ break;
+ case H_IN_USE:
+ err = -EEXIST;
+ break;
+ case H_ABORTED:
+ err = -EINTR;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int plpks_gen_password(void)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ u8 *password, consumer = PKS_OS_OWNER;
+ int rc;
+
+ password = kzalloc(maxpwsize, GFP_KERNEL);
+ if (!password)
+ return -ENOMEM;
+
+ rc = plpar_hcall(H_PKS_GEN_PASSWORD, retbuf, consumer, 0,
+ virt_to_phys(password), maxpwsize);
+
+ if (!rc) {
+ ospasswordlength = maxpwsize;
+ ospassword = kzalloc(maxpwsize, GFP_KERNEL);
+ if (!ospassword) {
+ kfree(password);
+ return -ENOMEM;
+ }
+ memcpy(ospassword, password, ospasswordlength);
+ } else {
+ if (rc == H_IN_USE) {
+ pr_warn("Password is already set for POWER LPAR Platform KeyStore\n");
+ rc = 0;
+ } else {
+ goto out;
+ }
+ }
+out:
+ kfree(password);
+
+ return pseries_status_to_err(rc);
+}
+
+static struct plpks_auth *construct_auth(u8 consumer)
+{
+ struct plpks_auth *auth;
+
+ if (consumer > PKS_OS_OWNER)
+ return ERR_PTR(-EINVAL);
+
+ auth = kmalloc(struct_size(auth, password, maxpwsize), GFP_KERNEL);
+ if (!auth)
+ return ERR_PTR(-ENOMEM);
+
+ auth->version = 1;
+ auth->consumer = consumer;
+ auth->rsvd0 = 0;
+ auth->rsvd1 = 0;
+
+ if (consumer == PKS_FW_OWNER || consumer == PKS_BOOTLOADER_OWNER) {
+ auth->passwordlength = 0;
+ return auth;
+ }
+
+ memcpy(auth->password, ospassword, ospasswordlength);
+
+ auth->passwordlength = cpu_to_be16(ospasswordlength);
+
+ return auth;
+}
+
+/**
+ * Label is combination of label attributes + name.
+ * Label attributes are used internally by kernel and not exposed to the user.
+ */
+static struct label *construct_label(char *component, u8 varos, u8 *name,
+ u16 namelen)
+{
+ struct label *label;
+ size_t slen;
+
+ if (!name || namelen > MAX_NAME_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ slen = strlen(component);
+ if (component && slen > sizeof(label->attr.prefix))
+ return ERR_PTR(-EINVAL);
+
+ label = kzalloc(sizeof(*label), GFP_KERNEL);
+ if (!label)
+ return ERR_PTR(-ENOMEM);
+
+ if (component)
+ memcpy(&label->attr.prefix, component, slen);
+
+ label->attr.version = LABEL_VERSION;
+ label->attr.os = varos;
+ label->attr.length = MAX_LABEL_ATTR_SIZE;
+ memcpy(&label->name, name, namelen);
+
+ label->size = sizeof(struct label_attr) + namelen;
+
+ return label;
+}
+
+static int _plpks_get_config(void)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct {
+ u8 version;
+ u8 flags;
+ __be32 rsvd0;
+ __be16 maxpwsize;
+ __be16 maxobjlabelsize;
+ __be16 maxobjsize;
+ __be32 totalsize;
+ __be32 usedspace;
+ __be32 supportedpolicies;
+ __be64 rsvd1;
+ } __packed config;
+ size_t size;
+ int rc;
+
+ size = sizeof(config);
+
+ rc = plpar_hcall(H_PKS_GET_CONFIG, retbuf, virt_to_phys(&config), size);
+
+ if (rc != H_SUCCESS)
+ return pseries_status_to_err(rc);
+
+ maxpwsize = be16_to_cpu(config.maxpwsize);
+ maxobjsize = be16_to_cpu(config.maxobjsize);
+
+ return 0;
+}
+
+static int plpks_confirm_object_flushed(struct label *label,
+ struct plpks_auth *auth)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ u64 timeout = 0;
+ u8 status;
+ int rc;
+
+ do {
+ rc = plpar_hcall(H_PKS_CONFIRM_OBJECT_FLUSHED, retbuf,
+ virt_to_phys(auth), virt_to_phys(label),
+ label->size);
+
+ status = retbuf[0];
+ if (rc) {
+ if (rc == H_NOT_FOUND && status == 1)
+ rc = 0;
+ break;
+ }
+
+ if (!rc && status == 1)
+ break;
+
+ usleep_range(PKS_FLUSH_SLEEP,
+ PKS_FLUSH_SLEEP + PKS_FLUSH_SLEEP_RANGE);
+ timeout = timeout + PKS_FLUSH_SLEEP;
+ } while (timeout < PKS_FLUSH_MAX_TIMEOUT);
+
+ rc = pseries_status_to_err(rc);
+
+ return rc;
+}
+
+int plpks_write_var(struct plpks_var var)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct plpks_auth *auth;
+ struct label *label;
+ int rc;
+
+ if (!var.component || !var.data || var.datalen <= 0 ||
+ var.namelen > MAX_NAME_SIZE || var.datalen > MAX_DATA_SIZE)
+ return -EINVAL;
+
+ if (var.policy & SIGNEDUPDATE)
+ return -EINVAL;
+
+ auth = construct_auth(PKS_OS_OWNER);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ label = construct_label(var.component, var.os, var.name, var.namelen);
+ if (IS_ERR(label)) {
+ rc = PTR_ERR(label);
+ goto out;
+ }
+
+ rc = plpar_hcall(H_PKS_WRITE_OBJECT, retbuf, virt_to_phys(auth),
+ virt_to_phys(label), label->size, var.policy,
+ virt_to_phys(var.data), var.datalen);
+
+ if (!rc)
+ rc = plpks_confirm_object_flushed(label, auth);
+
+ if (rc)
+ pr_err("Failed to write variable %s for component %s with error %d\n",
+ var.name, var.component, rc);
+
+ rc = pseries_status_to_err(rc);
+ kfree(label);
+out:
+ kfree(auth);
+
+ return rc;
+}
+
+int plpks_remove_var(char *component, u8 varos, struct plpks_var_name vname)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct plpks_auth *auth;
+ struct label *label;
+ int rc;
+
+ if (!component || vname.namelen > MAX_NAME_SIZE)
+ return -EINVAL;
+
+ auth = construct_auth(PKS_OS_OWNER);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ label = construct_label(component, varos, vname.name, vname.namelen);
+ if (IS_ERR(label)) {
+ rc = PTR_ERR(label);
+ goto out;
+ }
+
+ rc = plpar_hcall(H_PKS_REMOVE_OBJECT, retbuf, virt_to_phys(auth),
+ virt_to_phys(label), label->size);
+
+ if (!rc)
+ rc = plpks_confirm_object_flushed(label, auth);
+
+ if (rc)
+ pr_err("Failed to remove variable %s for component %s with error %d\n",
+ vname.name, component, rc);
+
+ rc = pseries_status_to_err(rc);
+ kfree(label);
+out:
+ kfree(auth);
+
+ return rc;
+}
+
+static int plpks_read_var(u8 consumer, struct plpks_var *var)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE] = { 0 };
+ struct plpks_auth *auth;
+ struct label *label;
+ u8 *output;
+ int rc;
+
+ if (var->namelen > MAX_NAME_SIZE)
+ return -EINVAL;
+
+ auth = construct_auth(PKS_OS_OWNER);
+ if (IS_ERR(auth))
+ return PTR_ERR(auth);
+
+ label = construct_label(var->component, var->os, var->name,
+ var->namelen);
+ if (IS_ERR(label)) {
+ rc = PTR_ERR(label);
+ goto out_free_auth;
+ }
+
+ output = kzalloc(maxobjsize, GFP_KERNEL);
+ if (!output) {
+ rc = -ENOMEM;
+ goto out_free_label;
+ }
+
+ rc = plpar_hcall(H_PKS_READ_OBJECT, retbuf, virt_to_phys(auth),
+ virt_to_phys(label), label->size, virt_to_phys(output),
+ maxobjsize);
+
+ if (rc != H_SUCCESS) {
+ pr_err("Failed to read variable %s for component %s with error %d\n",
+ var->name, var->component, rc);
+ rc = pseries_status_to_err(rc);
+ goto out_free_output;
+ }
+
+ if (var->datalen == 0 || var->datalen > retbuf[0])
+ var->datalen = retbuf[0];
+
+ var->data = kzalloc(var->datalen, GFP_KERNEL);
+ if (!var->data) {
+ rc = -ENOMEM;
+ goto out_free_output;
+ }
+ var->policy = retbuf[1];
+
+ memcpy(var->data, output, var->datalen);
+ rc = 0;
+
+out_free_output:
+ kfree(output);
+out_free_label:
+ kfree(label);
+out_free_auth:
+ kfree(auth);
+
+ return rc;
+}
+
+int plpks_read_os_var(struct plpks_var *var)
+{
+ return plpks_read_var(PKS_OS_OWNER, var);
+}
+
+int plpks_read_fw_var(struct plpks_var *var)
+{
+ return plpks_read_var(PKS_FW_OWNER, var);
+}
+
+int plpks_read_bootloader_var(struct plpks_var *var)
+{
+ return plpks_read_var(PKS_BOOTLOADER_OWNER, var);
+}
+
+static __init int pseries_plpks_init(void)
+{
+ int rc;
+
+ rc = _plpks_get_config();
+
+ if (rc) {
+ pr_err("POWER LPAR Platform KeyStore is not supported or enabled\n");
+ return rc;
+ }
+
+ rc = plpks_gen_password();
+ if (rc)
+ pr_err("Failed setting POWER LPAR Platform KeyStore Password\n");
+ else
+ pr_info("POWER LPAR Platform KeyStore initialized successfully\n");
+
+ return rc;
+}
+machine_arch_initcall(pseries, pseries_plpks_init);
diff --git a/arch/powerpc/platforms/pseries/plpks.h b/arch/powerpc/platforms/pseries/plpks.h
new file mode 100644
index 000000000000..c6a291367bb1
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/plpks.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 IBM Corporation
+ * Author: Nayna Jain <nayna@linux.ibm.com>
+ *
+ * Platform keystore for pseries LPAR(PLPKS).
+ */
+
+#ifndef _PSERIES_PLPKS_H
+#define _PSERIES_PLPKS_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#define OSSECBOOTAUDIT 0x40000000
+#define OSSECBOOTENFORCE 0x20000000
+#define WORLDREADABLE 0x08000000
+#define SIGNEDUPDATE 0x01000000
+
+#define PLPKS_VAR_LINUX 0x01
+#define PLPKS_VAR_COMMON 0x04
+
+struct plpks_var {
+ char *component;
+ u8 *name;
+ u8 *data;
+ u32 policy;
+ u16 namelen;
+ u16 datalen;
+ u8 os;
+};
+
+struct plpks_var_name {
+ u8 *name;
+ u16 namelen;
+};
+
+struct plpks_var_name_list {
+ u32 varcount;
+ struct plpks_var_name varlist[];
+};
+
+/**
+ * Writes the specified var and its data to PKS.
+ * Any caller of PKS driver should present a valid component type for
+ * their variable.
+ */
+int plpks_write_var(struct plpks_var var);
+
+/**
+ * Removes the specified var and its data from PKS.
+ */
+int plpks_remove_var(char *component, u8 varos,
+ struct plpks_var_name vname);
+
+/**
+ * Returns the data for the specified os variable.
+ */
+int plpks_read_os_var(struct plpks_var *var);
+
+/**
+ * Returns the data for the specified firmware variable.
+ */
+int plpks_read_fw_var(struct plpks_var *var);
+
+/**
+ * Returns the data for the specified bootloader variable.
+ */
+int plpks_read_bootloader_var(struct plpks_var *var);
+
+#endif
diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c
index f860a897a9e0..3c290b9ed01b 100644
--- a/arch/powerpc/platforms/pseries/pmem.c
+++ b/arch/powerpc/platforms/pseries/pmem.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
@@ -24,7 +23,6 @@
#include <asm/topology.h>
#include "pseries.h"
-#include "offline_states.h"
static struct device_node *pmem_node;
@@ -140,13 +138,19 @@ int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
return rc;
}
-const struct of_device_id drc_pmem_match[] = {
+static const struct of_device_id drc_pmem_match[] = {
{ .type = "ibm,persistent-memory", },
{}
};
static int pseries_pmem_init(void)
{
+ /*
+ * Only supported on POWER8 and above.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+ return 0;
+
pmem_node = of_find_node_by_type(NULL, "ibm,persistent-memory");
if (!pmem_node)
return 0;
diff --git a/arch/powerpc/platforms/pseries/power.c b/arch/powerpc/platforms/pseries/power.c
index ee343ec6ab94..3676cb297767 100644
--- a/arch/powerpc/platforms/pseries/power.c
+++ b/arch/powerpc/platforms/pseries/power.c
@@ -51,7 +51,7 @@ static struct attribute *g[] = {
NULL,
};
-static struct attribute_group attr_group = {
+static const struct attribute_group attr_group = {
.attrs = g,
};
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 13fa370a87e4..1d75b7742ef0 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -11,7 +11,7 @@
struct device_node;
-extern void request_event_sources_irqs(struct device_node *np,
+void __init request_event_sources_irqs(struct device_node *np,
irq_handler_t handler, const char *name);
#include <linux/of.h>
@@ -21,6 +21,7 @@ struct pt_regs;
extern int pSeries_system_reset_exception(struct pt_regs *regs);
extern int pSeries_machine_check_exception(struct pt_regs *regs);
extern long pseries_machine_check_realmode(struct pt_regs *regs);
+void pSeries_machine_check_log_err(void);
#ifdef CONFIG_SMP
extern void smp_init_pseries(void);
@@ -33,19 +34,17 @@ int smp_query_cpu_stopped(unsigned int pcpu);
#define QCSS_HARDWARE_ERROR -1
#define QCSS_HARDWARE_BUSY -2
#else
-static inline void smp_init_pseries(void) { };
+static inline void smp_init_pseries(void) { }
#endif
extern void pseries_kexec_cpu_down(int crash_shutdown, int secondary);
+void pseries_machine_kexec(struct kimage *image);
extern void pSeries_final_fixup(void);
/* Poweron flag used for enabling auto ups restart */
extern unsigned long rtas_poweron_auto;
-/* Provided by HVC VIO */
-extern void hvc_vio_init_early(void);
-
/* Dynamic logical Partitioning/Mobility */
extern void dlpar_free_cc_nodes(struct device_node *);
extern void dlpar_free_cc_property(struct property *);
@@ -55,6 +54,7 @@ extern int dlpar_attach_node(struct device_node *, struct device_node *);
extern int dlpar_detach_node(struct device_node *);
extern int dlpar_acquire_drc(u32 drc_index);
extern int dlpar_release_drc(u32 drc_index);
+extern int dlpar_unisolate_drc(u32 drc_index);
void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog);
int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_errlog);
@@ -87,6 +87,8 @@ struct pci_host_bridge;
int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
extern struct pci_controller_ops pseries_pci_controller_ops;
+int pseries_msi_allocate_domains(struct pci_controller *phb);
+void pseries_msi_free_domains(struct pci_controller *phb);
unsigned long pseries_memory_block_size(void);
@@ -111,7 +113,15 @@ static inline unsigned long cmo_get_page_size(void)
int dlpar_workqueue_init(void);
-void pseries_setup_rfi_flush(void);
+extern u32 pseries_security_flavor;
+void pseries_setup_security_mitigations(void);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
void pseries_lpar_read_hblkrm_characteristics(void);
+#else
+static inline void pseries_lpar_read_hblkrm_characteristics(void) { }
+#endif
+
+void pseries_rng_init(void);
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 1d7f973c647b..f12516c3998c 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -23,11 +23,6 @@ static DEFINE_SPINLOCK(ras_log_buf_lock);
static int ras_check_exception_token;
-static void mce_process_errlog_event(struct irq_work *work);
-static struct irq_work mce_errlog_process_work = {
- .func = mce_process_errlog_event,
-};
-
#define EPOW_SENSOR_TOKEN 9
#define EPOW_SENSOR_INDEX 0
@@ -60,11 +55,17 @@ struct pseries_mc_errorlog {
* XX 2: Reserved.
* XXX 3: Type of UE error.
*
- * For error_type != MC_ERROR_TYPE_UE
+ * For error_type == MC_ERROR_TYPE_SLB/ERAT/TLB
* XXXXXXXX
* X 1: Effective address provided.
* XXXXX 5: Reserved.
* XX 2: Type of SLB/ERAT/TLB error.
+ *
+ * For error_type == MC_ERROR_TYPE_CTRL_MEM_ACCESS
+ * XXXXXXXX
+ * X 1: Error causing address provided.
+ * XXX 3: Type of error.
+ * XXXX 4: Reserved.
*/
u8 sub_err_type;
u8 reserved_1[6];
@@ -80,6 +81,7 @@ struct pseries_mc_errorlog {
#define MC_ERROR_TYPE_TLB 0x04
#define MC_ERROR_TYPE_D_CACHE 0x05
#define MC_ERROR_TYPE_I_CACHE 0x07
+#define MC_ERROR_TYPE_CTRL_MEM_ACCESS 0x08
/* RTAS pseries MCE error sub types */
#define MC_ERROR_UE_INDETERMINATE 0
@@ -90,6 +92,7 @@ struct pseries_mc_errorlog {
#define UE_EFFECTIVE_ADDR_PROVIDED 0x40
#define UE_LOGICAL_ADDR_PROVIDED 0x20
+#define MC_EFFECTIVE_ADDR_PROVIDED 0x80
#define MC_ERROR_SLB_PARITY 0
#define MC_ERROR_SLB_MULTIHIT 1
@@ -103,6 +106,9 @@ struct pseries_mc_errorlog {
#define MC_ERROR_TLB_MULTIHIT 2
#define MC_ERROR_TLB_INDETERMINATE 3
+#define MC_ERROR_CTRL_MEM_ACCESS_PTABLE_WALK 0
+#define MC_ERROR_CTRL_MEM_ACCESS_OP_ACCESS 1
+
static inline u8 rtas_mc_error_sub_type(const struct pseries_mc_errorlog *mlog)
{
switch (mlog->error_type) {
@@ -112,6 +118,8 @@ static inline u8 rtas_mc_error_sub_type(const struct pseries_mc_errorlog *mlog)
case MC_ERROR_TYPE_ERAT:
case MC_ERROR_TYPE_TLB:
return (mlog->sub_err_type & 0x03);
+ case MC_ERROR_TYPE_CTRL_MEM_ACCESS:
+ return (mlog->sub_err_type & 0x70) >> 4;
default:
return 0;
}
@@ -122,7 +130,7 @@ static inline u8 rtas_mc_error_sub_type(const struct pseries_mc_errorlog *mlog)
* devices or systems (e.g. hugepages) that have not been initialized at the
* subsys stage.
*/
-int __init init_ras_hotplug_IRQ(void)
+static int __init init_ras_hotplug_IRQ(void)
{
struct device_node *np;
@@ -184,7 +192,6 @@ static void handle_system_shutdown(char event_modifier)
case EPOW_SHUTDOWN_ON_UPS:
pr_emerg("Loss of system power detected. System is running on"
" UPS/battery. Check RTAS error log for details\n");
- orderly_poweroff(true);
break;
case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
@@ -316,12 +323,10 @@ static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id)
/* Handle environmental and power warning (EPOW) interrupts. */
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
{
- int status;
int state;
int critical;
- status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX,
- &state);
+ rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
if (state > 3)
critical = 1; /* Time Critical */
@@ -330,12 +335,9 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
spin_lock(&ras_log_buf_lock);
- status = rtas_call(ras_check_exception_token, 6, 1, NULL,
- RTAS_VECTOR_EXTERNAL_INTERRUPT,
- virq_to_hw(irq),
- RTAS_EPOW_WARNING,
- critical, __pa(&ras_log_buf),
- rtas_get_error_log_max());
+ rtas_call(ras_check_exception_token, 6, 1, NULL, RTAS_VECTOR_EXTERNAL_INTERRUPT,
+ virq_to_hw(irq), RTAS_EPOW_WARNING, critical, __pa(&ras_log_buf),
+ rtas_get_error_log_max());
log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
@@ -395,16 +397,31 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
/*
* Some versions of FWNMI place the buffer inside the 4kB page starting at
* 0x7000. Other versions place it inside the rtas buffer. We check both.
+ * Minimum size of the buffer is 16 bytes.
*/
#define VALID_FWNMI_BUFFER(A) \
- ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \
- (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16))))
+ ((((A) >= 0x7000) && ((A) <= 0x8000 - 16)) || \
+ (((A) >= rtas.base) && ((A) <= (rtas.base + rtas.size - 16))))
static inline struct rtas_error_log *fwnmi_get_errlog(void)
{
return (struct rtas_error_log *)local_paca->mce_data_buf;
}
+static __be64 *fwnmi_get_savep(struct pt_regs *regs)
+{
+ unsigned long savep_ra;
+
+ /* Mask top two bits */
+ savep_ra = regs->gpr[3] & ~(0x3UL << 62);
+ if (!VALID_FWNMI_BUFFER(savep_ra)) {
+ printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
+ return NULL;
+ }
+
+ return __va(savep_ra);
+}
+
/*
* Get the error information for errors coming through the
* FWNMI vectors. The pt_regs' r3 will be updated to reflect
@@ -422,19 +439,14 @@ static inline struct rtas_error_log *fwnmi_get_errlog(void)
*/
static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
{
- unsigned long *savep;
struct rtas_error_log *h;
+ __be64 *savep;
- /* Mask top two bits */
- regs->gpr[3] &= ~(0x3UL << 62);
-
- if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
- printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
+ savep = fwnmi_get_savep(regs);
+ if (!savep)
return NULL;
- }
- savep = __va(regs->gpr[3]);
- regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
+ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
h = (struct rtas_error_log *)&savep[1];
/* Use the per cpu buffer from paca to store rtas error log */
@@ -458,7 +470,15 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
*/
static void fwnmi_release_errinfo(void)
{
- int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
+ struct rtas_args rtas_args;
+ int ret;
+
+ /*
+ * On pseries, the machine check stack is limited to under 4GB, so
+ * args can be on-stack.
+ */
+ rtas_call_unlocked(&rtas_args, ibm_nmi_interlock_token, 0, 1, NULL);
+ ret = be32_to_cpu(rtas_args.rets[0]);
if (ret != 0)
printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret);
}
@@ -475,17 +495,27 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
if ((be64_to_cpu(regs->msr) &
(MSR_LE|MSR_RI|MSR_DR|MSR_IR|MSR_ME|MSR_PR|
MSR_ILE|MSR_HV|MSR_SF)) == (MSR_DR|MSR_SF)) {
- regs->nip = be64_to_cpu((__be64)regs->nip);
- regs->msr = 0;
+ regs_set_return_ip(regs, be64_to_cpu((__be64)regs->nip));
+ regs_set_return_msr(regs, 0);
}
#endif
if (fwnmi_active) {
- struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs);
- if (errhdr) {
- /* XXX Should look at FWNMI information */
- }
- fwnmi_release_errinfo();
+ __be64 *savep;
+
+ /*
+ * Firmware (PowerVM and KVM) saves r3 to a save area like
+ * machine check, which is not exactly what PAPR (2.9)
+ * suggests but there is no way to detect otherwise, so this
+ * is the interface now.
+ *
+ * System resets do not save any error log or require an
+ * "ibm,nmi-interlock" rtas call to release.
+ */
+
+ savep = fwnmi_get_savep(regs);
+ if (savep)
+ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
}
if (smp_handle_nmi_ipi(regs))
@@ -494,18 +524,60 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
return 0; /* need to perform reset */
}
+static int mce_handle_err_realmode(int disposition, u8 error_type)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (disposition == RTAS_DISP_NOT_RECOVERED) {
+ switch (error_type) {
+ case MC_ERROR_TYPE_ERAT:
+ flush_erat();
+ disposition = RTAS_DISP_FULLY_RECOVERED;
+ break;
+ case MC_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ /*
+ * Store the old slb content in paca before flushing.
+ * Print this when we go to virtual mode.
+ * There are chances that we may hit MCE again if there
+ * is a parity error on the SLB entry we trying to read
+ * for saving. Hence limit the slb saving to single
+ * level of recursion.
+ */
+ if (local_paca->in_mce == 1)
+ slb_save_contents(local_paca->mce_faulty_slbs);
+ flush_and_reload_slb();
+ disposition = RTAS_DISP_FULLY_RECOVERED;
+#endif
+ break;
+ default:
+ break;
+ }
+ } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
+ /* Platform corrected itself but could be degraded */
+ pr_err("MCE: limited recovery, system may be degraded\n");
+ disposition = RTAS_DISP_FULLY_RECOVERED;
+ }
+#endif
+ return disposition;
+}
-static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
+static int mce_handle_err_virtmode(struct pt_regs *regs,
+ struct rtas_error_log *errp,
+ struct pseries_mc_errorlog *mce_log,
+ int disposition)
{
struct mce_error_info mce_err = { 0 };
- unsigned long eaddr = 0, paddr = 0;
- struct pseries_errorlog *pseries_log;
- struct pseries_mc_errorlog *mce_log;
- int disposition = rtas_error_disposition(errp);
int initiator = rtas_error_initiator(errp);
int severity = rtas_error_severity(errp);
+ unsigned long eaddr = 0, paddr = 0;
u8 error_type, err_sub_type;
+ if (!mce_log)
+ goto out;
+
+ error_type = mce_log->error_type;
+ err_sub_type = rtas_mc_error_sub_type(mce_log);
+
if (initiator == RTAS_INITIATOR_UNKNOWN)
mce_err.initiator = MCE_INITIATOR_UNKNOWN;
else if (initiator == RTAS_INITIATOR_CPU)
@@ -531,8 +603,6 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
mce_err.severity = MCE_SEV_SEVERE;
else if (severity == RTAS_SEVERITY_ERROR)
mce_err.severity = MCE_SEV_SEVERE;
- else if (severity == RTAS_SEVERITY_FATAL)
- mce_err.severity = MCE_SEV_FATAL;
else
mce_err.severity = MCE_SEV_FATAL;
@@ -544,20 +614,12 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
mce_err.error_class = MCE_ECLASS_UNKNOWN;
- if (!rtas_error_extended(errp))
- goto out;
-
- pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
- if (pseries_log == NULL)
- goto out;
-
- mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
- error_type = mce_log->error_type;
- err_sub_type = rtas_mc_error_sub_type(mce_log);
-
- switch (mce_log->error_type) {
+ switch (error_type) {
case MC_ERROR_TYPE_UE:
mce_err.error_type = MCE_ERROR_TYPE_UE;
+ mce_common_process_ue(regs, &mce_err);
+ if (mce_err.ignore_event)
+ disposition = RTAS_DISP_FULLY_RECOVERED;
switch (err_sub_type) {
case MC_ERROR_UE_IFETCH:
mce_err.u.ue_error_type = MCE_UE_ERROR_IFETCH;
@@ -604,7 +666,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
mce_err.u.slb_error_type = MCE_SLB_ERROR_INDETERMINATE;
break;
}
- if (mce_log->sub_err_type & 0x80)
+ if (mce_log->sub_err_type & MC_EFFECTIVE_ADDR_PROVIDED)
eaddr = be64_to_cpu(mce_log->effective_address);
break;
case MC_ERROR_TYPE_ERAT:
@@ -621,7 +683,7 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
mce_err.u.erat_error_type = MCE_ERAT_ERROR_INDETERMINATE;
break;
}
- if (mce_log->sub_err_type & 0x80)
+ if (mce_log->sub_err_type & MC_EFFECTIVE_ADDR_PROVIDED)
eaddr = be64_to_cpu(mce_log->effective_address);
break;
case MC_ERROR_TYPE_TLB:
@@ -638,61 +700,69 @@ static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
mce_err.u.tlb_error_type = MCE_TLB_ERROR_INDETERMINATE;
break;
}
- if (mce_log->sub_err_type & 0x80)
+ if (mce_log->sub_err_type & MC_EFFECTIVE_ADDR_PROVIDED)
eaddr = be64_to_cpu(mce_log->effective_address);
break;
case MC_ERROR_TYPE_D_CACHE:
mce_err.error_type = MCE_ERROR_TYPE_DCACHE;
break;
case MC_ERROR_TYPE_I_CACHE:
- mce_err.error_type = MCE_ERROR_TYPE_DCACHE;
+ mce_err.error_type = MCE_ERROR_TYPE_ICACHE;
+ break;
+ case MC_ERROR_TYPE_CTRL_MEM_ACCESS:
+ mce_err.error_type = MCE_ERROR_TYPE_RA;
+ switch (err_sub_type) {
+ case MC_ERROR_CTRL_MEM_ACCESS_PTABLE_WALK:
+ mce_err.u.ra_error_type =
+ MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
+ break;
+ case MC_ERROR_CTRL_MEM_ACCESS_OP_ACCESS:
+ mce_err.u.ra_error_type =
+ MCE_RA_ERROR_LOAD_STORE_FOREIGN;
+ break;
+ }
+ if (mce_log->sub_err_type & MC_EFFECTIVE_ADDR_PROVIDED)
+ eaddr = be64_to_cpu(mce_log->effective_address);
break;
case MC_ERROR_TYPE_UNKNOWN:
default:
mce_err.error_type = MCE_ERROR_TYPE_UNKNOWN;
break;
}
-
-#ifdef CONFIG_PPC_BOOK3S_64
- if (disposition == RTAS_DISP_NOT_RECOVERED) {
- switch (error_type) {
- case MC_ERROR_TYPE_SLB:
- case MC_ERROR_TYPE_ERAT:
- /*
- * Store the old slb content in paca before flushing.
- * Print this when we go to virtual mode.
- * There are chances that we may hit MCE again if there
- * is a parity error on the SLB entry we trying to read
- * for saving. Hence limit the slb saving to single
- * level of recursion.
- */
- if (local_paca->in_mce == 1)
- slb_save_contents(local_paca->mce_faulty_slbs);
- flush_and_reload_slb();
- disposition = RTAS_DISP_FULLY_RECOVERED;
- break;
- default:
- break;
- }
- } else if (disposition == RTAS_DISP_LIMITED_RECOVERY) {
- /* Platform corrected itself but could be degraded */
- printk(KERN_ERR "MCE: limited recovery, system may "
- "be degraded\n");
- disposition = RTAS_DISP_FULLY_RECOVERED;
- }
-#endif
-
out:
save_mce_event(regs, disposition == RTAS_DISP_FULLY_RECOVERED,
- &mce_err, regs->nip, eaddr, paddr);
+ &mce_err, regs->nip, eaddr, paddr);
+ return disposition;
+}
+
+static int mce_handle_error(struct pt_regs *regs, struct rtas_error_log *errp)
+{
+ struct pseries_errorlog *pseries_log;
+ struct pseries_mc_errorlog *mce_log = NULL;
+ int disposition = rtas_error_disposition(errp);
+ u8 error_type;
+
+ if (!rtas_error_extended(errp))
+ goto out;
+ pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
+ if (!pseries_log)
+ goto out;
+
+ mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
+ error_type = mce_log->error_type;
+
+ disposition = mce_handle_err_realmode(disposition, error_type);
+out:
+ disposition = mce_handle_err_virtmode(regs, errp, mce_log,
+ disposition);
return disposition;
}
/*
* Process MCE rtas errlog event.
*/
-static void mce_process_errlog_event(struct irq_work *work)
+void pSeries_machine_check_log_err(void)
{
struct rtas_error_log *err;
@@ -713,7 +783,7 @@ static int recover_mce(struct pt_regs *regs, struct machine_check_event *evt)
{
int recovered = 0;
- if (!(regs->msr & MSR_RI)) {
+ if (regs_is_unrecoverable(regs)) {
/* If MSR_RI isn't set, we cannot recover */
pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
recovered = 0;
@@ -749,7 +819,7 @@ static int recover_mce(struct pt_regs *regs, struct machine_check_event *evt)
*/
recovered = 0;
} else {
- die("Machine check", regs, SIGBUS);
+ die_mce("Machine check", regs, SIGBUS);
recovered = 1;
}
}
@@ -801,10 +871,8 @@ long pseries_machine_check_realmode(struct pt_regs *regs)
* virtual mode.
*/
disposition = mce_handle_error(regs, errp);
- fwnmi_release_errinfo();
- /* Queue irq work to log this rtas event later. */
- irq_work_queue(&mce_errlog_process_work);
+ fwnmi_release_errinfo();
if (disposition == RTAS_DISP_FULLY_RECOVERED)
return 1;
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 7f7369fec46b..599bd2c78514 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -10,10 +10,10 @@
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
+#include <linux/security.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <linux/uaccess.h>
#include <asm/mmu.h>
@@ -362,6 +362,10 @@ static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t coun
char *kbuf;
char *tmp;
+ rv = security_locked_down(LOCKDOWN_DEVICE_TREE);
+ if (rv)
+ return rv;
+
kbuf = memdup_user_nul(buf, count);
if (IS_ERR(kbuf))
return PTR_ERR(kbuf);
diff --git a/arch/powerpc/platforms/pseries/rng.c b/arch/powerpc/platforms/pseries/rng.c
index bbb97169bf63..6ddfdeaace9e 100644
--- a/arch/powerpc/platforms/pseries/rng.c
+++ b/arch/powerpc/platforms/pseries/rng.c
@@ -10,6 +10,7 @@
#include <asm/archrandom.h>
#include <asm/machdep.h>
#include <asm/plpar_wrappers.h>
+#include "pseries.h"
static int pseries_get_random_long(unsigned long *v)
@@ -24,18 +25,13 @@ static int pseries_get_random_long(unsigned long *v)
return 0;
}
-static __init int rng_init(void)
+void __init pseries_rng_init(void)
{
struct device_node *dn;
dn = of_find_compatible_node(NULL, NULL, "ibm,random");
if (!dn)
- return -ENODEV;
-
- pr_info("Registering arch random hook.\n");
-
+ return;
ppc_md.get_random_seed = pseries_get_random_long;
-
- return 0;
+ of_node_put(dn);
}
-machine_subsys_initcall(pseries, rng_init);
diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c
index 70c3013fdd07..b5853e9fcc3c 100644
--- a/arch/powerpc/platforms/pseries/rtas-fadump.c
+++ b/arch/powerpc/platforms/pseries/rtas-fadump.c
@@ -13,9 +13,10 @@
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/crash_dump.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
#include <asm/page.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/fadump.h>
#include <asm/fadump-internal.h>
@@ -39,7 +40,7 @@ static void rtas_fadump_update_config(struct fw_dump *fadump_conf,
* This function is called in the capture kernel to get configuration details
* setup in the first kernel and passed to the f/w.
*/
-static void rtas_fadump_get_config(struct fw_dump *fadump_conf,
+static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf,
const struct rtas_fadump_mem_struct *fdm)
{
fadump_conf->boot_mem_addr[0] =
@@ -108,6 +109,12 @@ static u64 rtas_fadump_init_mem_struct(struct fw_dump *fadump_conf)
fdm.hpte_region.destination_address = cpu_to_be64(addr);
addr += fadump_conf->hpte_region_size;
+ /*
+ * Align boot memory area destination address to page boundary to
+ * be able to mmap read this area in the vmcore.
+ */
+ addr = PAGE_ALIGN(addr);
+
/* RMA region section */
fdm.rmr_region.request_flag = cpu_to_be32(RTAS_FADUMP_REQUEST_FLAG);
fdm.rmr_region.source_data_type =
@@ -247,7 +254,7 @@ static inline int rtas_fadump_gpr_index(u64 id)
return i;
}
-void rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val)
+static void __init rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val)
{
int i;
@@ -272,7 +279,7 @@ void rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val)
regs->dsisr = (unsigned long)reg_val;
}
-static struct rtas_fadump_reg_entry*
+static struct rtas_fadump_reg_entry* __init
rtas_fadump_read_regs(struct rtas_fadump_reg_entry *reg_entry,
struct pt_regs *regs)
{
@@ -351,7 +358,7 @@ static int __init rtas_fadump_build_cpu_notes(struct fw_dump *fadump_conf)
/* Lower 4 bytes of reg_value contains logical cpu id */
cpu = (be64_to_cpu(reg_entry->reg_value) &
RTAS_FADUMP_CPU_ID_MASK);
- if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) {
+ if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_mask)) {
RTAS_FADUMP_SKIP_TO_NEXT_CPU(reg_entry);
continue;
}
@@ -462,10 +469,10 @@ static void rtas_fadump_region_show(struct fw_dump *fadump_conf,
be64_to_cpu(fdm_ptr->rmr_region.source_len),
be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped));
- /* Dump is active. Show reserved area start address. */
+ /* Dump is active. Show preserved area start address. */
if (fdm_active) {
- seq_printf(m, "\nMemory above %#016lx is reserved for saving crash dump\n",
- fadump_conf->reserve_dump_area_start);
+ seq_printf(m, "\nMemory above %#016llx is reserved for saving crash dump\n",
+ fadump_conf->boot_mem_top);
}
}
@@ -506,7 +513,7 @@ void __init rtas_fadump_dt_scan(struct fw_dump *fadump_conf, u64 node)
fadump_conf->fadump_supported = 1;
/* Firmware supports 64-bit value for size, align it to pagesize. */
- fadump_conf->max_copy_size = _ALIGN_DOWN(U64_MAX, PAGE_SIZE);
+ fadump_conf->max_copy_size = ALIGN_DOWN(U64_MAX, PAGE_SIZE);
/*
* The 'ibm,kernel-dump' rtas node is present only if there is
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
deleted file mode 100644
index 2879c4f0ceb7..000000000000
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ /dev/null
@@ -1,195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * scan-log-data driver for PPC64 Todd Inglett <tinglett@vnet.ibm.com>
- *
- * When ppc64 hardware fails the service processor dumps internal state
- * of the system. After a reboot the operating system can access a dump
- * of this data using this driver. A dump exists if the device-tree
- * /chosen/ibm,scan-log-data property exists.
- *
- * This driver exports /proc/powerpc/scan-log-dump which can be read.
- * The driver supports only sequential reads.
- *
- * The driver looks at a write to the driver for the single word "reset".
- * If given, the driver will reset the scanlog so the platform can free it.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <asm/rtas.h>
-#include <asm/prom.h>
-
-#define MODULE_VERS "1.0"
-#define MODULE_NAME "scanlog"
-
-/* Status returns from ibm,scan-log-dump */
-#define SCANLOG_COMPLETE 0
-#define SCANLOG_HWERROR -1
-#define SCANLOG_CONTINUE 1
-
-
-static unsigned int ibm_scan_log_dump; /* RTAS token */
-static unsigned int *scanlog_buffer; /* The data buffer */
-
-static ssize_t scanlog_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned int *data = scanlog_buffer;
- int status;
- unsigned long len, off;
- unsigned int wait_time;
-
- if (count > RTAS_DATA_BUF_SIZE)
- count = RTAS_DATA_BUF_SIZE;
-
- if (count < 1024) {
- /* This is the min supported by this RTAS call. Rather
- * than do all the buffering we insist the user code handle
- * larger reads. As long as cp works... :)
- */
- printk(KERN_ERR "scanlog: cannot perform a small read (%ld)\n", count);
- return -EINVAL;
- }
-
- if (!access_ok(buf, count))
- return -EFAULT;
-
- for (;;) {
- wait_time = 500; /* default wait if no data */
- spin_lock(&rtas_data_buf_lock);
- memcpy(rtas_data_buf, data, RTAS_DATA_BUF_SIZE);
- status = rtas_call(ibm_scan_log_dump, 2, 1, NULL,
- (u32) __pa(rtas_data_buf), (u32) count);
- memcpy(data, rtas_data_buf, RTAS_DATA_BUF_SIZE);
- spin_unlock(&rtas_data_buf_lock);
-
- pr_debug("scanlog: status=%d, data[0]=%x, data[1]=%x, " \
- "data[2]=%x\n", status, data[0], data[1], data[2]);
- switch (status) {
- case SCANLOG_COMPLETE:
- pr_debug("scanlog: hit eof\n");
- return 0;
- case SCANLOG_HWERROR:
- pr_debug("scanlog: hardware error reading data\n");
- return -EIO;
- case SCANLOG_CONTINUE:
- /* We may or may not have data yet */
- len = data[1];
- off = data[2];
- if (len > 0) {
- if (copy_to_user(buf, ((char *)data)+off, len))
- return -EFAULT;
- return len;
- }
- /* Break to sleep default time */
- break;
- default:
- /* Assume extended busy */
- wait_time = rtas_busy_delay_time(status);
- if (!wait_time) {
- printk(KERN_ERR "scanlog: unknown error " \
- "from rtas: %d\n", status);
- return -EIO;
- }
- }
- /* Apparently no data yet. Wait and try again. */
- msleep_interruptible(wait_time);
- }
- /*NOTREACHED*/
-}
-
-static ssize_t scanlog_write(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- char stkbuf[20];
- int status;
-
- if (count > 19) count = 19;
- if (copy_from_user (stkbuf, buf, count)) {
- return -EFAULT;
- }
- stkbuf[count] = 0;
-
- if (buf) {
- if (strncmp(stkbuf, "reset", 5) == 0) {
- pr_debug("scanlog: reset scanlog\n");
- status = rtas_call(ibm_scan_log_dump, 2, 1, NULL, 0, 0);
- pr_debug("scanlog: rtas returns %d\n", status);
- }
- }
- return count;
-}
-
-static int scanlog_open(struct inode * inode, struct file * file)
-{
- unsigned int *data = scanlog_buffer;
-
- if (data[0] != 0) {
- /* This imperfect test stops a second copy of the
- * data (or a reset while data is being copied)
- */
- return -EBUSY;
- }
-
- data[0] = 0; /* re-init so we restart the scan */
-
- return 0;
-}
-
-static int scanlog_release(struct inode * inode, struct file * file)
-{
- unsigned int *data = scanlog_buffer;
-
- data[0] = 0;
- return 0;
-}
-
-static const struct proc_ops scanlog_proc_ops = {
- .proc_read = scanlog_read,
- .proc_write = scanlog_write,
- .proc_open = scanlog_open,
- .proc_release = scanlog_release,
- .proc_lseek = noop_llseek,
-};
-
-static int __init scanlog_init(void)
-{
- struct proc_dir_entry *ent;
- int err = -ENOMEM;
-
- ibm_scan_log_dump = rtas_token("ibm,scan-log-dump");
- if (ibm_scan_log_dump == RTAS_UNKNOWN_SERVICE)
- return -ENODEV;
-
- /* Ideally we could allocate a buffer < 4G */
- scanlog_buffer = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
- if (!scanlog_buffer)
- goto err;
-
- ent = proc_create("powerpc/rtas/scan-log-dump", 0400, NULL,
- &scanlog_proc_ops);
- if (!ent)
- goto err;
- return 0;
-err:
- kfree(scanlog_buffer);
- return err;
-}
-
-static void __exit scanlog_cleanup(void)
-{
- remove_proc_entry("powerpc/rtas/scan-log-dump", NULL);
- kfree(scanlog_buffer);
-}
-
-module_init(scanlog_init);
-module_exit(scanlog_cleanup);
-MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 0c8421dd01ab..8ef3270515a9 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -14,6 +14,7 @@
#include <linux/cpu.h>
#include <linux/errno.h>
+#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -36,15 +37,15 @@
#include <linux/seq_file.h>
#include <linux/root_dev.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
#include <linux/memblock.h>
#include <linux/swiotlb.h>
+#include <linux/seq_buf.h>
#include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/pci-bridge.h>
#include <asm/iommu.h>
@@ -68,14 +69,31 @@
#include <asm/isa-bridge.h>
#include <asm/security_features.h>
#include <asm/asm-const.h>
+#include <asm/idle.h>
#include <asm/swiotlb.h>
#include <asm/svm.h>
+#include <asm/dtl.h>
+#include <asm/hvconsole.h>
+#include <asm/setup.h>
#include "pseries.h"
-#include "../../../../drivers/pci/pci.h"
DEFINE_STATIC_KEY_FALSE(shared_processor);
-EXPORT_SYMBOL_GPL(shared_processor);
+EXPORT_SYMBOL(shared_processor);
+
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+struct static_key paravirt_steal_enabled;
+struct static_key paravirt_steal_rq_enabled;
+
+static bool steal_acc = true;
+static int __init parse_no_stealacc(char *arg)
+{
+ steal_acc = false;
+ return 0;
+}
+
+early_param("no-steal-acc", parse_no_stealacc);
+#endif
int CMO_PrPSP = -1;
int CMO_SecPSP = -1;
@@ -83,6 +101,8 @@ unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
EXPORT_SYMBOL(CMO_PageSize);
int fwnmi_active; /* TRUE if an FWNMI handler is present */
+int ibm_nmi_interlock_token;
+u32 pseries_security_flavor;
static void pSeries_show_cpuinfo(struct seq_file *m)
{
@@ -109,13 +129,18 @@ static void __init fwnmi_init(void)
u8 *mce_data_buf;
unsigned int i;
int nr_cpus = num_possible_cpus();
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
struct slb_entry *slb_ptr;
size_t size;
#endif
+ int ibm_nmi_register_token;
+
+ ibm_nmi_register_token = rtas_token("ibm,nmi-register");
+ if (ibm_nmi_register_token == RTAS_UNKNOWN_SERVICE)
+ return;
- int ibm_nmi_register = rtas_token("ibm,nmi-register");
- if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
+ ibm_nmi_interlock_token = rtas_token("ibm,nmi-interlock");
+ if (WARN_ON(ibm_nmi_interlock_token == RTAS_UNKNOWN_SERVICE))
return;
/* If the kernel's not linked at zero we point the firmware at low
@@ -123,8 +148,8 @@ static void __init fwnmi_init(void)
system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START;
machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START;
- if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr,
- machine_check_addr))
+ if (0 == rtas_call(ibm_nmi_register_token, 2, 1, NULL,
+ system_reset_addr, machine_check_addr))
fwnmi_active = 1;
/*
@@ -144,7 +169,7 @@ static void __init fwnmi_init(void)
(RTAS_ERROR_LOG_MAX * i);
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (!radix_enabled()) {
/* Allocate per cpu area to save old slb contents during MCE */
size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus;
@@ -161,6 +186,18 @@ static void __init fwnmi_init(void)
#endif
}
+/*
+ * Affix a device for the first timer to the platform bus if
+ * we have firmware support for the H_WATCHDOG hypercall.
+ */
+static __init int pseries_wdt_init(void)
+{
+ if (firmware_has_feature(FW_FEATURE_WATCHDOG))
+ platform_device_register_simple("pseries-wdt", 0, NULL, 0);
+ return 0;
+}
+machine_subsys_initcall(pseries, pseries_wdt_init);
+
static void pseries_8259_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -317,6 +354,9 @@ static int alloc_dispatch_log_kmem_cache(void)
}
machine_early_initcall(pseries, alloc_dispatch_log_kmem_cache);
+DEFINE_PER_CPU(u64, idle_spurr_cycles);
+DEFINE_PER_CPU(u64, idle_entry_purr_snap);
+DEFINE_PER_CPU(u64, idle_entry_spurr_snap);
static void pseries_lpar_idle(void)
{
/*
@@ -328,7 +368,7 @@ static void pseries_lpar_idle(void)
return;
/* Indicate to hypervisor that we are idle. */
- get_lppaca()->idle = 1;
+ pseries_idle_prolog();
/*
* Yield the processor to the hypervisor. We return if
@@ -339,9 +379,17 @@ static void pseries_lpar_idle(void)
*/
cede_processor();
- get_lppaca()->idle = 0;
+ pseries_idle_epilog();
}
+static bool pseries_reloc_on_exception_enabled;
+
+bool pseries_reloc_on_exception(void)
+{
+ return pseries_reloc_on_exception_enabled;
+}
+EXPORT_SYMBOL_GPL(pseries_reloc_on_exception);
+
/*
* Enable relocation on during exceptions. This has partition wide scope and
* may take a while to complete, if it takes longer than one second we will
@@ -349,7 +397,7 @@ static void pseries_lpar_idle(void)
* to ever be a problem in practice we can move this into a kernel thread to
* finish off the process later in boot.
*/
-void pseries_enable_reloc_on_exc(void)
+bool pseries_enable_reloc_on_exc(void)
{
long rc;
unsigned int delay, total_delay = 0;
@@ -360,11 +408,14 @@ void pseries_enable_reloc_on_exc(void)
if (rc == H_P2) {
pr_info("Relocation on exceptions not"
" supported\n");
+ return false;
} else if (rc != H_SUCCESS) {
pr_warn("Unable to enable relocation"
" on exceptions: %ld\n", rc);
+ return false;
}
- break;
+ pseries_reloc_on_exception_enabled = true;
+ return true;
}
delay = get_longbusy_msecs(rc);
@@ -373,7 +424,7 @@ void pseries_enable_reloc_on_exc(void)
pr_warn("Warning: Giving up waiting to enable "
"relocation on exceptions (%u msec)!\n",
total_delay);
- return;
+ return false;
}
mdelay(delay);
@@ -391,22 +442,14 @@ void pseries_disable_reloc_on_exc(void)
break;
mdelay(get_longbusy_msecs(rc));
}
- if (rc != H_SUCCESS)
+ if (rc == H_SUCCESS)
+ pseries_reloc_on_exception_enabled = false;
+ else
pr_warn("Warning: Failed to disable relocation on exceptions: %ld\n",
rc);
}
EXPORT_SYMBOL(pseries_disable_reloc_on_exc);
-#ifdef CONFIG_KEXEC_CORE
-static void pSeries_machine_kexec(struct kimage *image)
-{
- if (firmware_has_feature(FW_FEATURE_SET_MODE))
- pseries_disable_reloc_on_exc();
-
- default_machine_kexec(image);
-}
-#endif
-
#ifdef __LITTLE_ENDIAN__
void pseries_big_endian_exceptions(void)
{
@@ -434,7 +477,7 @@ void pseries_big_endian_exceptions(void)
panic("Could not enable big endian exceptions");
}
-void pseries_little_endian_exceptions(void)
+void __init pseries_little_endian_exceptions(void)
{
long rc;
@@ -451,7 +494,7 @@ void pseries_little_endian_exceptions(void)
}
#endif
-static void __init find_and_init_phbs(void)
+static void __init pSeries_discover_phbs(void)
{
struct device_node *node;
struct pci_controller *phb;
@@ -469,6 +512,11 @@ static void __init find_and_init_phbs(void)
pci_process_bridge_OF_ranges(phb, node, 0);
isa_bridge_find_early(phb);
phb->controller_ops = pseries_pci_controller_ops;
+
+ /* create pci_dn's for DT nodes under this PHB */
+ pci_devs_phb_init_dynamic(phb);
+
+ pseries_msi_allocate_domains(phb);
}
of_node_put(root);
@@ -507,24 +555,46 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST)
security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
+ if (result->character & H_CPU_CHAR_BCCTR_LINK_FLUSH_ASSIST)
+ security_ftr_set(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST);
+
if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE)
security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
+ if (result->behaviour & H_CPU_BEHAV_FLUSH_LINK_STACK)
+ security_ftr_set(SEC_FTR_FLUSH_LINK_STACK);
+
/*
* The features below are enabled by default, so we instead look to see
* if firmware has *disabled* them, and clear them if so.
+ * H_CPU_BEHAV_FAVOUR_SECURITY_H could be set only if
+ * H_CPU_BEHAV_FAVOUR_SECURITY is.
*/
- if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY))
+ if (!(result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)) {
security_ftr_clear(SEC_FTR_FAVOUR_SECURITY);
+ pseries_security_flavor = 0;
+ } else if (result->behaviour & H_CPU_BEHAV_FAVOUR_SECURITY_H)
+ pseries_security_flavor = 1;
+ else
+ pseries_security_flavor = 2;
if (!(result->behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
security_ftr_clear(SEC_FTR_L1D_FLUSH_PR);
+ if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_ENTRY)
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
+
+ if (result->behaviour & H_CPU_BEHAV_NO_L1D_FLUSH_UACCESS)
+ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
+
+ if (result->behaviour & H_CPU_BEHAV_NO_STF_BARRIER)
+ security_ftr_clear(SEC_FTR_STF_BARRIER);
+
if (!(result->behaviour & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR))
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
}
-void pseries_setup_rfi_flush(void)
+void pseries_setup_security_mitigations(void)
{
struct h_cpu_char_result result;
enum l1d_flush_type types;
@@ -561,6 +631,16 @@ void pseries_setup_rfi_flush(void)
setup_rfi_flush(types, enable);
setup_count_cache_flush();
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
+ setup_entry_flush(enable);
+
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
+ setup_uaccess_flush(enable);
+
+ setup_stf_barrier();
}
#ifdef CONFIG_PCI_IOV
@@ -579,8 +659,8 @@ enum get_iov_fw_value_index {
WDW_SIZE = 3 /* Get Window Size */
};
-resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
- enum get_iov_fw_value_index value)
+static resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
+ enum get_iov_fw_value_index value)
{
const int *indexes;
struct device_node *dn = pci_device_to_OF_node(dev);
@@ -597,7 +677,7 @@ resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
*/
num_res = of_read_number(&indexes[NUM_RES_PROPERTY], 1);
if (resno >= num_res)
- return 0; /* or an errror */
+ return 0; /* or an error */
i = START_OF_ENTRIES + NEXT_ENTRY * resno;
switch (value) {
@@ -615,7 +695,7 @@ resource_size_t pseries_get_iov_fw_value(struct pci_dev *dev, int resno,
return ret;
}
-void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes)
+static void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes)
{
struct resource *res;
resource_size_t base, size;
@@ -637,7 +717,7 @@ void of_pci_set_vf_bar_size(struct pci_dev *dev, const int *indexes)
}
}
-void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
+static void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
{
struct resource *res, *root, *conflict;
resource_size_t base, size;
@@ -699,9 +779,9 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
const int *indexes;
struct device_node *dn = pci_device_to_OF_node(pdev);
- if (!pdev->is_physfn || pci_dev_is_added(pdev))
+ if (!pdev->is_physfn)
return;
- /*Firmware must support open sriov otherwise dont configure*/
+ /*Firmware must support open sriov otherwise don't configure*/
indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
if (indexes)
of_pci_parse_iov_addrs(pdev, indexes);
@@ -736,6 +816,11 @@ static void __init pSeries_setup_arch(void)
smp_init_pseries();
+ if (radix_enabled() && !mmu_has_feature(MMU_FTR_GTSE))
+ if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
+ panic("BUG: Radix support requires either GTSE or RPT_INVALIDATE\n");
+
+
/* openpic global configuration register (64-bit format). */
/* openpic Interrupt Source Unit pointer (64-bit format). */
/* python0 facility area (mmio) (64-bit format) REAL address. */
@@ -745,16 +830,15 @@ static void __init pSeries_setup_arch(void)
fwnmi_init();
- pseries_setup_rfi_flush();
- setup_stf_barrier();
- pseries_lpar_read_hblkrm_characteristics();
+ pseries_setup_security_mitigations();
+ if (!radix_enabled())
+ pseries_lpar_read_hblkrm_characteristics();
/* By default, only probe PCI (can be overridden by rtas_pci) */
pci_add_flags(PCI_PROBE_ONLY);
/* Find and initialize PCI host bridges */
init_pci_config_tokens();
- find_and_init_phbs();
of_reconfig_notifier_register(&pci_dn_reconfig_nb);
pSeries_nvram_init();
@@ -762,8 +846,15 @@ static void __init pSeries_setup_arch(void)
if (firmware_has_feature(FW_FEATURE_LPAR)) {
vpa_init(boot_cpuid);
- if (lppaca_shared_proc(get_lppaca()))
+ if (lppaca_shared_proc(get_lppaca())) {
static_branch_enable(&shared_processor);
+ pv_spinlocks_init();
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ static_key_slow_inc(&paravirt_steal_enabled);
+ if (steal_acc)
+ static_key_slow_inc(&paravirt_steal_rq_enabled);
+#endif
+ }
ppc_md.power_save = pseries_lpar_idle;
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
@@ -781,9 +872,7 @@ static void __init pSeries_setup_arch(void)
}
ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
-
- if (swiotlb_force == SWIOTLB_FORCE)
- ppc_swiotlb_enable = 1;
+ pseries_rng_init();
}
static void pseries_panic(char *str)
@@ -822,12 +911,15 @@ static int pseries_set_xdabr(unsigned long dabr, unsigned long dabrx)
return plpar_hcall_norets(H_SET_XDABR, dabr, dabrx);
}
-static int pseries_set_dawr(unsigned long dawr, unsigned long dawrx)
+static int pseries_set_dawr(int nr, unsigned long dawr, unsigned long dawrx)
{
/* PAPR says we can't set HYP */
dawrx &= ~DAWRX_HYP;
- return plpar_set_watchpoint0(dawr, dawrx);
+ if (nr == 0)
+ return plpar_set_watchpoint0(dawr, dawrx);
+ else
+ return plpar_set_watchpoint1(dawr, dawrx);
}
#define CMO_CHARACTERISTICS_TOKEN 44
@@ -847,7 +939,7 @@ void pSeries_coalesce_init(void)
* fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
* handle that here. (Stolen from parse_system_parameter_string)
*/
-static void pSeries_cmo_feature_init(void)
+static void __init pSeries_cmo_feature_init(void)
{
char *ptr, *key, *value, *end;
int call_status;
@@ -920,6 +1012,33 @@ static void pSeries_cmo_feature_init(void)
pr_debug(" <- fw_cmo_feature_init()\n");
}
+static void __init pseries_add_hw_description(void)
+{
+ struct device_node *dn;
+ const char *s;
+
+ dn = of_find_node_by_path("/openprom");
+ if (dn) {
+ if (of_property_read_string(dn, "model", &s) == 0)
+ seq_buf_printf(&ppc_hw_desc, "of:%s ", s);
+
+ of_node_put(dn);
+ }
+
+ dn = of_find_node_by_path("/hypervisor");
+ if (dn) {
+ if (of_property_read_string(dn, "compatible", &s) == 0)
+ seq_buf_printf(&ppc_hw_desc, "hv:%s ", s);
+
+ of_node_put(dn);
+ return;
+ }
+
+ if (of_property_read_bool(of_root, "ibm,powervm-partition") ||
+ of_property_read_bool(of_root, "ibm,fw-net-version"))
+ seq_buf_printf(&ppc_hw_desc, "hv:phyp ");
+}
+
/*
* Early initialization. Relocation is on but do not reference unbolted pages
*/
@@ -927,6 +1046,8 @@ static void __init pseries_init(void)
{
pr_debug(" -> pseries_init()\n");
+ pseries_add_hw_description();
+
#ifdef CONFIG_HVC_CONSOLE
if (firmware_has_feature(FW_FEATURE_LPAR))
hvc_vio_init_early();
@@ -1013,6 +1134,7 @@ define_machine(pseries) {
.init_IRQ = pseries_init_irq,
.show_cpuinfo = pSeries_show_cpuinfo,
.log_error = pSeries_log_error,
+ .discover_phbs = pSeries_discover_phbs,
.pcibios_fixup = pSeries_final_fixup,
.restart = rtas_restart,
.halt = rtas_halt,
@@ -1025,11 +1147,12 @@ define_machine(pseries) {
.system_reset_exception = pSeries_system_reset_exception,
.machine_check_early = pseries_machine_check_realmode,
.machine_check_exception = pSeries_machine_check_exception,
+ .machine_check_log_err = pSeries_machine_check_log_err,
#ifdef CONFIG_KEXEC_CORE
- .machine_kexec = pSeries_machine_kexec,
+ .machine_kexec = pseries_machine_kexec,
.kexec_cpu_down = pseries_kexec_cpu_down,
#endif
-#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifdef CONFIG_MEMORY_HOTPLUG
.memory_block_size = pseries_memory_block_size,
#endif
};
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index ad61e90032da..fd2174edfa1d 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -20,14 +20,13 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/cpu.h>
+#include <linux/pgtable.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/paca.h>
#include <asm/machdep.h>
@@ -42,10 +41,9 @@
#include <asm/plpar_wrappers.h>
#include <asm/code-patching.h>
#include <asm/svm.h>
+#include <asm/kvm_guest.h>
#include "pseries.h"
-#include "offline_states.h"
-
/*
* The Primary thread of each non-boot processor was started from the OF client
@@ -106,12 +104,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
return 1;
}
- /* Fixup atomic count: it exited inside IRQ handler. */
- task_thread_info(paca_ptrs[lcpu]->__current)->preempt_count = 0;
-#ifdef CONFIG_HOTPLUG_CPU
- if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE)
- goto out;
-#endif
/*
* If the RTAS start-cpu token does not exist then presume the
* cpu is already spinning.
@@ -126,9 +118,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
return 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-out:
-#endif
return 1;
}
@@ -143,10 +132,6 @@ static void smp_setup_cpu(int cpu)
vpa_init(cpu);
cpumask_clear_cpu(cpu, of_spin_mask);
-#ifdef CONFIG_HOTPLUG_CPU
- set_cpu_current_state(cpu, CPU_STATE_ONLINE);
- set_default_offline_state(cpu);
-#endif
}
static int smp_pSeries_kick_cpu(int nr)
@@ -163,20 +148,6 @@ static int smp_pSeries_kick_cpu(int nr)
* the processor will continue on to secondary_start
*/
paca_ptrs[nr]->cpu_start = 1;
-#ifdef CONFIG_HOTPLUG_CPU
- set_preferred_offline_state(nr, CPU_STATE_ONLINE);
-
- if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) {
- long rc;
- unsigned long hcpuid;
-
- hcpuid = get_hard_smp_processor_id(nr);
- rc = plpar_hcall_norets(H_PROD, hcpuid);
- if (rc != H_SUCCESS)
- printk(KERN_ERR "Error: Prod to wake up processor %d "
- "Ret= %ld\n", nr, rc);
- }
-#endif
return 0;
}
@@ -188,13 +159,16 @@ static int pseries_smp_prepare_cpu(int cpu)
return 0;
}
-static void smp_pseries_cause_ipi(int cpu)
+/* Cause IPI as setup by the interrupt controller (xics or xive) */
+static void (*ic_cause_ipi)(int cpu) __ro_after_init;
+
+/* Use msgsndp doorbells target is a sibling, else use interrupt controller */
+static void dbell_or_ic_cause_ipi(int cpu)
{
- /* POWER9 should not use this handler */
if (doorbell_try_core_ipi(cpu))
return;
- icp_ops->cause_ipi(cpu);
+ ic_cause_ipi(cpu);
}
static int pseries_cause_nmi_ipi(int cpu)
@@ -218,26 +192,51 @@ static int pseries_cause_nmi_ipi(int cpu)
return 0;
}
-static __init void pSeries_smp_probe_xics(void)
-{
- xics_smp_probe();
-
- if (cpu_has_feature(CPU_FTR_DBELL) && !is_secure_guest())
- smp_ops->cause_ipi = smp_pseries_cause_ipi;
- else
- smp_ops->cause_ipi = icp_ops->cause_ipi;
-}
-
static __init void pSeries_smp_probe(void)
{
if (xive_enabled())
- /*
- * Don't use P9 doorbells when XIVE is enabled. IPIs
- * using MMIOs should be faster
- */
xive_smp_probe();
else
- pSeries_smp_probe_xics();
+ xics_smp_probe();
+
+ /* No doorbell facility, must use the interrupt controller for IPIs */
+ if (!cpu_has_feature(CPU_FTR_DBELL))
+ return;
+
+ /* Doorbells can only be used for IPIs between SMT siblings */
+ if (!cpu_has_feature(CPU_FTR_SMT))
+ return;
+
+ check_kvm_guest();
+
+ if (is_kvm_guest()) {
+ /*
+ * KVM emulates doorbells by disabling FSCR[MSGP] so msgsndp
+ * faults to the hypervisor which then reads the instruction
+ * from guest memory, which tends to be slower than using XIVE.
+ */
+ if (xive_enabled())
+ return;
+
+ /*
+ * XICS hcalls aren't as fast, so we can use msgsndp (which
+ * also helps exercise KVM emulation), however KVM can't
+ * emulate secure guests because it can't read the instruction
+ * out of their memory.
+ */
+ if (is_secure_guest())
+ return;
+ }
+
+ /*
+ * Under PowerVM, FSCR[MSGP] is enabled as guest vCPU siblings are
+ * gang scheduled on the same physical core, so doorbells are always
+ * faster than the interrupt controller, and they can be used by
+ * secure guests.
+ */
+
+ ic_cause_ipi = smp_ops->cause_ipi;
+ smp_ops->cause_ipi = dbell_or_ic_cause_ipi;
}
static struct smp_ops_t pseries_smp_ops = {
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 0a24a5a185f0..1b902cbf85c5 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -13,13 +13,8 @@
#include <asm/mmu.h>
#include <asm/rtas.h>
#include <asm/topology.h>
-#include "../../kernel/cacheinfo.h"
-static u64 stream_id;
static struct device suspend_dev;
-static DECLARE_COMPLETION(suspend_work);
-static struct rtas_suspend_me_data suspend_data;
-static atomic_t suspending;
/**
* pseries_suspend_begin - First phase of hibernation
@@ -29,7 +24,7 @@ static atomic_t suspending;
* Return value:
* 0 on success / other on failure
**/
-static int pseries_suspend_begin(suspend_state_t state)
+static int pseries_suspend_begin(u64 stream_id)
{
long vasi_state, rc;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
@@ -49,41 +44,10 @@ static int pseries_suspend_begin(suspend_state_t state)
vasi_state);
return -EIO;
}
-
- return 0;
-}
-
-/**
- * pseries_suspend_cpu - Suspend a single CPU
- *
- * Makes the H_JOIN call to suspend the CPU
- *
- **/
-static int pseries_suspend_cpu(void)
-{
- if (atomic_read(&suspending))
- return rtas_suspend_cpu(&suspend_data);
return 0;
}
/**
- * pseries_suspend_enable_irqs
- *
- * Post suspend configuration updates
- *
- **/
-static void pseries_suspend_enable_irqs(void)
-{
- /*
- * Update configuration which can be modified based on device tree
- * changes during resume.
- */
- cacheinfo_cpu_offline(smp_processor_id());
- post_mobility_fixup();
- cacheinfo_cpu_online(smp_processor_id());
-}
-
-/**
* pseries_suspend_enter - Final phase of hibernation
*
* Return value:
@@ -91,28 +55,7 @@ static void pseries_suspend_enable_irqs(void)
**/
static int pseries_suspend_enter(suspend_state_t state)
{
- int rc = rtas_suspend_last_cpu(&suspend_data);
-
- atomic_set(&suspending, 0);
- atomic_set(&suspend_data.done, 1);
- return rc;
-}
-
-/**
- * pseries_prepare_late - Prepare to suspend all other CPUs
- *
- * Return value:
- * 0 on success / other on failure
- **/
-static int pseries_prepare_late(void)
-{
- atomic_set(&suspending, 1);
- atomic_set(&suspend_data.working, 0);
- atomic_set(&suspend_data.done, 0);
- atomic_set(&suspend_data.error, 0);
- suspend_data.complete = &suspend_work;
- reinit_completion(&suspend_work);
- return 0;
+ return rtas_ibm_suspend_me(NULL);
}
/**
@@ -132,50 +75,29 @@ static ssize_t store_hibernate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- cpumask_var_t offline_mask;
+ u64 stream_id;
int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!alloc_cpumask_var(&offline_mask, GFP_KERNEL))
- return -ENOMEM;
-
stream_id = simple_strtoul(buf, NULL, 16);
do {
- rc = pseries_suspend_begin(PM_SUSPEND_MEM);
+ rc = pseries_suspend_begin(stream_id);
if (rc == -EAGAIN)
ssleep(1);
} while (rc == -EAGAIN);
- if (!rc) {
- /* All present CPUs must be online */
- cpumask_andnot(offline_mask, cpu_present_mask,
- cpu_online_mask);
- rc = rtas_online_cpus_mask(offline_mask);
- if (rc) {
- pr_err("%s: Could not bring present CPUs online.\n",
- __func__);
- goto out;
- }
-
- stop_topology_update();
+ if (!rc)
rc = pm_suspend(PM_SUSPEND_MEM);
- start_topology_update();
- /* Take down CPUs not online prior to suspend */
- if (!rtas_offline_cpus_mask(offline_mask))
- pr_warn("%s: Could not restore CPUs to offline "
- "state.\n", __func__);
+ if (!rc) {
+ rc = count;
+ post_mobility_fixup();
}
- stream_id = 0;
- if (!rc)
- rc = count;
-out:
- free_cpumask_var(offline_mask);
return rc;
}
@@ -210,8 +132,6 @@ static struct bus_type suspend_subsys = {
static const struct platform_suspend_ops pseries_suspend_ops = {
.valid = suspend_valid_only_mem,
- .begin = pseries_suspend_begin,
- .prepare_late = pseries_prepare_late,
.enter = pseries_suspend_enter,
};
@@ -254,15 +174,9 @@ static int __init pseries_suspend_init(void)
if (!firmware_has_feature(FW_FEATURE_LPAR))
return 0;
- suspend_data.token = rtas_token("ibm,suspend-me");
- if (suspend_data.token == RTAS_UNKNOWN_SERVICE)
- return 0;
-
if ((rc = pseries_suspend_sysfs_register(&suspend_dev)))
return rc;
- ppc_md.suspend_disable_cpu = pseries_suspend_cpu;
- ppc_md.suspend_enable_irqs = pseries_suspend_enable_irqs;
suspend_set_ops(&pseries_suspend_ops);
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/svm.c b/arch/powerpc/platforms/pseries/svm.c
index 40c0637203d5..3b4045d508ec 100644
--- a/arch/powerpc/platforms/pseries/svm.c
+++ b/arch/powerpc/platforms/pseries/svm.c
@@ -7,10 +7,13 @@
*/
#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/cc_platform.h>
#include <asm/machdep.h>
#include <asm/svm.h>
#include <asm/swiotlb.h>
#include <asm/ultravisor.h>
+#include <asm/dtl.h>
static int __init init_svm(void)
{
@@ -25,7 +28,7 @@ static int __init init_svm(void)
* need to use the SWIOTLB buffer for DMA even if dma_capable() says
* otherwise.
*/
- swiotlb_force = SWIOTLB_FORCE;
+ ppc_swiotlb_flags |= SWIOTLB_ANY | SWIOTLB_FORCE;
/* Share the SWIOTLB buffer with the host. */
swiotlb_update_mem_attributes();
@@ -36,6 +39,9 @@ machine_early_initcall(pseries, init_svm);
int set_memory_encrypted(unsigned long addr, int numpages)
{
+ if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
+ return 0;
+
if (!PAGE_ALIGNED(addr))
return -EINVAL;
@@ -46,6 +52,9 @@ int set_memory_encrypted(unsigned long addr, int numpages)
int set_memory_decrypted(unsigned long addr, int numpages)
{
+ if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
+ return 0;
+
if (!PAGE_ALIGNED(addr))
return -EINVAL;
diff --git a/arch/powerpc/platforms/pseries/vas-sysfs.c b/arch/powerpc/platforms/pseries/vas-sysfs.c
new file mode 100644
index 000000000000..f9f682724e77
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/vas-sysfs.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2022-23 IBM Corp.
+ */
+
+#define pr_fmt(fmt) "vas: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include "vas.h"
+
+#ifdef CONFIG_SYSFS
+static struct kobject *pseries_vas_kobj;
+static struct kobject *gzip_caps_kobj;
+
+struct vas_caps_entry {
+ struct kobject kobj;
+ struct vas_cop_feat_caps *caps;
+};
+
+#define to_caps_entry(entry) container_of(entry, struct vas_caps_entry, kobj)
+
+/*
+ * This function is used to get the notification from the drmgr when
+ * QoS credits are changed.
+ */
+static ssize_t update_total_credits_store(struct vas_cop_feat_caps *caps,
+ const char *buf, size_t count)
+{
+ int err;
+ u16 creds;
+
+ err = kstrtou16(buf, 0, &creds);
+ /*
+ * The user space interface from the management console
+ * notifies OS with the new QoS credits and then the
+ * hypervisor. So OS has to use this new credits value
+ * and reconfigure VAS windows (close or reopen depends
+ * on the credits available) instead of depending on VAS
+ * QoS capabilities from the hypervisor.
+ */
+ if (!err)
+ err = vas_reconfig_capabilties(caps->win_type, creds);
+
+ if (err)
+ return -EINVAL;
+
+ pr_info("Set QoS total credits %u\n", creds);
+
+ return count;
+}
+
+#define sysfs_caps_entry_read(_name) \
+static ssize_t _name##_show(struct vas_cop_feat_caps *caps, char *buf) \
+{ \
+ return sprintf(buf, "%d\n", atomic_read(&caps->_name)); \
+}
+
+struct vas_sysfs_entry {
+ struct attribute attr;
+ ssize_t (*show)(struct vas_cop_feat_caps *, char *);
+ ssize_t (*store)(struct vas_cop_feat_caps *, const char *, size_t);
+};
+
+#define VAS_ATTR_RO(_name) \
+ sysfs_caps_entry_read(_name); \
+ static struct vas_sysfs_entry _name##_attribute = __ATTR(_name, \
+ 0444, _name##_show, NULL);
+
+/*
+ * Create sysfs interface:
+ * /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities
+ * This directory contains the following VAS GZIP capabilities
+ * for the default credit type.
+ * /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities/nr_total_credits
+ * Total number of default credits assigned to the LPAR which
+ * can be changed with DLPAR operation.
+ * /sys/devices/virtual/misc/vas/vas0/gzip/default_capabilities/nr_used_credits
+ * Number of credits used by the user space. One credit will
+ * be assigned for each window open.
+ *
+ * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities
+ * This directory contains the following VAS GZIP capabilities
+ * for the Quality of Service (QoS) credit type.
+ * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/nr_total_credits
+ * Total number of QoS credits assigned to the LPAR. The user
+ * has to define this value using HMC interface. It can be
+ * changed dynamically by the user.
+ * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/nr_used_credits
+ * Number of credits used by the user space.
+ * /sys/devices/virtual/misc/vas/vas0/gzip/qos_capabilities/update_total_credits
+ * Update total QoS credits dynamically
+ */
+
+VAS_ATTR_RO(nr_total_credits);
+VAS_ATTR_RO(nr_used_credits);
+
+static struct vas_sysfs_entry update_total_credits_attribute =
+ __ATTR(update_total_credits, 0200, NULL, update_total_credits_store);
+
+static struct attribute *vas_def_capab_attrs[] = {
+ &nr_total_credits_attribute.attr,
+ &nr_used_credits_attribute.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vas_def_capab);
+
+static struct attribute *vas_qos_capab_attrs[] = {
+ &nr_total_credits_attribute.attr,
+ &nr_used_credits_attribute.attr,
+ &update_total_credits_attribute.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vas_qos_capab);
+
+static ssize_t vas_type_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct vas_caps_entry *centry;
+ struct vas_cop_feat_caps *caps;
+ struct vas_sysfs_entry *entry;
+
+ centry = to_caps_entry(kobj);
+ caps = centry->caps;
+ entry = container_of(attr, struct vas_sysfs_entry, attr);
+
+ if (!entry->show)
+ return -EIO;
+
+ return entry->show(caps, buf);
+}
+
+static ssize_t vas_type_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct vas_caps_entry *centry;
+ struct vas_cop_feat_caps *caps;
+ struct vas_sysfs_entry *entry;
+
+ centry = to_caps_entry(kobj);
+ caps = centry->caps;
+ entry = container_of(attr, struct vas_sysfs_entry, attr);
+ if (!entry->store)
+ return -EIO;
+
+ return entry->store(caps, buf, count);
+}
+
+static void vas_type_release(struct kobject *kobj)
+{
+ struct vas_caps_entry *centry = to_caps_entry(kobj);
+ kfree(centry);
+}
+
+static const struct sysfs_ops vas_sysfs_ops = {
+ .show = vas_type_show,
+ .store = vas_type_store,
+};
+
+static struct kobj_type vas_def_attr_type = {
+ .release = vas_type_release,
+ .sysfs_ops = &vas_sysfs_ops,
+ .default_groups = vas_def_capab_groups,
+};
+
+static struct kobj_type vas_qos_attr_type = {
+ .release = vas_type_release,
+ .sysfs_ops = &vas_sysfs_ops,
+ .default_groups = vas_qos_capab_groups,
+};
+
+static char *vas_caps_kobj_name(struct vas_caps_entry *centry,
+ struct kobject **kobj)
+{
+ struct vas_cop_feat_caps *caps = centry->caps;
+
+ if (caps->descriptor == VAS_GZIP_QOS_CAPABILITIES) {
+ kobject_init(&centry->kobj, &vas_qos_attr_type);
+ *kobj = gzip_caps_kobj;
+ return "qos_capabilities";
+ } else if (caps->descriptor == VAS_GZIP_DEFAULT_CAPABILITIES) {
+ kobject_init(&centry->kobj, &vas_def_attr_type);
+ *kobj = gzip_caps_kobj;
+ return "default_capabilities";
+ } else
+ return "Unknown";
+}
+
+/*
+ * Add feature specific capability dir entry.
+ * Ex: VDefGzip or VQosGzip
+ */
+int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps)
+{
+ struct vas_caps_entry *centry;
+ struct kobject *kobj = NULL;
+ int ret = 0;
+ char *name;
+
+ centry = kzalloc(sizeof(*centry), GFP_KERNEL);
+ if (!centry)
+ return -ENOMEM;
+
+ centry->caps = caps;
+ name = vas_caps_kobj_name(centry, &kobj);
+
+ if (kobj) {
+ ret = kobject_add(&centry->kobj, kobj, "%s", name);
+
+ if (ret) {
+ pr_err("VAS: sysfs kobject add / event failed %d\n",
+ ret);
+ kobject_put(&centry->kobj);
+ }
+ }
+
+ return ret;
+}
+
+static struct miscdevice vas_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vas",
+};
+
+/*
+ * Add VAS and VasCaps (overall capabilities) dir entries.
+ */
+int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps)
+{
+ int ret;
+
+ ret = misc_register(&vas_miscdev);
+ if (ret < 0) {
+ pr_err("%s: register vas misc device failed\n", __func__);
+ return ret;
+ }
+
+ /*
+ * The hypervisor does not expose multiple VAS instances, but can
+ * see multiple VAS instances on PowerNV. So create 'vas0' directory
+ * on pseries.
+ */
+ pseries_vas_kobj = kobject_create_and_add("vas0",
+ &vas_miscdev.this_device->kobj);
+ if (!pseries_vas_kobj) {
+ misc_deregister(&vas_miscdev);
+ pr_err("Failed to create VAS sysfs entry\n");
+ return -ENOMEM;
+ }
+
+ if ((vas_caps->feat_type & VAS_GZIP_QOS_FEAT_BIT) ||
+ (vas_caps->feat_type & VAS_GZIP_DEF_FEAT_BIT)) {
+ gzip_caps_kobj = kobject_create_and_add("gzip",
+ pseries_vas_kobj);
+ if (!gzip_caps_kobj) {
+ pr_err("Failed to create VAS GZIP capability entry\n");
+ kobject_put(pseries_vas_kobj);
+ misc_deregister(&vas_miscdev);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+#else
+int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps)
+{
+ return 0;
+}
+
+int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps)
+{
+ return 0;
+}
+#endif
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
new file mode 100644
index 000000000000..4ad6e510d405
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -0,0 +1,1071 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2020-21 IBM Corp.
+ */
+
+#define pr_fmt(fmt) "vas: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <asm/machdep.h>
+#include <asm/hvcall.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/firmware.h>
+#include <asm/vas.h>
+#include "vas.h"
+
+#define VAS_INVALID_WIN_ADDRESS 0xFFFFFFFFFFFFFFFFul
+#define VAS_DEFAULT_DOMAIN_ID 0xFFFFFFFFFFFFFFFFul
+/* The hypervisor allows one credit per window right now */
+#define DEF_WIN_CREDS 1
+
+static struct vas_all_caps caps_all;
+static bool copypaste_feat;
+static struct hv_vas_cop_feat_caps hv_cop_caps;
+
+static struct vas_caps vascaps[VAS_MAX_FEAT_TYPE];
+static DEFINE_MUTEX(vas_pseries_mutex);
+static bool migration_in_progress;
+
+static long hcall_return_busy_check(long rc)
+{
+ /* Check if we are stalled for some time */
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ } else if (rc == H_BUSY) {
+ cond_resched();
+ }
+
+ return rc;
+}
+
+/*
+ * Allocate VAS window hcall
+ */
+static int h_allocate_vas_window(struct pseries_vas_window *win, u64 *domain,
+ u8 wintype, u16 credits)
+{
+ long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
+ long rc;
+
+ do {
+ rc = plpar_hcall9(H_ALLOCATE_VAS_WINDOW, retbuf, wintype,
+ credits, domain[0], domain[1], domain[2],
+ domain[3], domain[4], domain[5]);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS) {
+ if (win->win_addr == VAS_INVALID_WIN_ADDRESS) {
+ pr_err("H_ALLOCATE_VAS_WINDOW: COPY/PASTE is not supported\n");
+ return -ENOTSUPP;
+ }
+ win->vas_win.winid = retbuf[0];
+ win->win_addr = retbuf[1];
+ win->complete_irq = retbuf[2];
+ win->fault_irq = retbuf[3];
+ return 0;
+ }
+
+ pr_err("H_ALLOCATE_VAS_WINDOW error: %ld, wintype: %u, credits: %u\n",
+ rc, wintype, credits);
+
+ return -EIO;
+}
+
+/*
+ * Deallocate VAS window hcall.
+ */
+static int h_deallocate_vas_window(u64 winid)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_DEALLOCATE_VAS_WINDOW, winid);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_DEALLOCATE_VAS_WINDOW error: %ld, winid: %llu\n",
+ rc, winid);
+ return -EIO;
+}
+
+/*
+ * Modify VAS window.
+ * After the window is opened with allocate window hcall, configure it
+ * with flags and LPAR PID before using.
+ */
+static int h_modify_vas_window(struct pseries_vas_window *win)
+{
+ long rc;
+
+ /*
+ * AMR value is not supported in Linux VAS implementation.
+ * The hypervisor ignores it if 0 is passed.
+ */
+ do {
+ rc = plpar_hcall_norets(H_MODIFY_VAS_WINDOW,
+ win->vas_win.winid, win->pid, 0,
+ VAS_MOD_WIN_FLAGS, 0);
+
+ rc = hcall_return_busy_check(rc);
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_MODIFY_VAS_WINDOW error: %ld, winid %u pid %u\n",
+ rc, win->vas_win.winid, win->pid);
+ return -EIO;
+}
+
+/*
+ * This hcall is used to determine the capabilities from the hypervisor.
+ * @hcall: H_QUERY_VAS_CAPABILITIES or H_QUERY_NX_CAPABILITIES
+ * @query_type: If 0 is passed, the hypervisor returns the overall
+ * capabilities which provides all feature(s) that are
+ * available. Then query the hypervisor to get the
+ * corresponding capabilities for the specific feature.
+ * Example: H_QUERY_VAS_CAPABILITIES provides VAS GZIP QoS
+ * and VAS GZIP Default capabilities.
+ * H_QUERY_NX_CAPABILITIES provides NX GZIP
+ * capabilities.
+ * @result: Return buffer to save capabilities.
+ */
+int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result)
+{
+ long rc;
+
+ rc = plpar_hcall_norets(hcall, query_type, result);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ /* H_FUNCTION means HV does not support VAS so don't print an error */
+ if (rc != H_FUNCTION) {
+ pr_err("%s error %ld, query_type %u, result buffer 0x%llx\n",
+ (hcall == H_QUERY_VAS_CAPABILITIES) ?
+ "H_QUERY_VAS_CAPABILITIES" :
+ "H_QUERY_NX_CAPABILITIES",
+ rc, query_type, result);
+ }
+
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(h_query_vas_capabilities);
+
+/*
+ * hcall to get fault CRB from the hypervisor.
+ */
+static int h_get_nx_fault(u32 winid, u64 buffer)
+{
+ long rc;
+
+ rc = plpar_hcall_norets(H_GET_NX_FAULT, winid, buffer);
+
+ if (rc == H_SUCCESS)
+ return 0;
+
+ pr_err("H_GET_NX_FAULT error: %ld, winid %u, buffer 0x%llx\n",
+ rc, winid, buffer);
+ return -EIO;
+
+}
+
+/*
+ * Handle the fault interrupt.
+ * When the fault interrupt is received for each window, query the
+ * hypervisor to get the fault CRB on the specific fault. Then
+ * process the CRB by updating CSB or send signal if the user space
+ * CSB is invalid.
+ * Note: The hypervisor forwards an interrupt for each fault request.
+ * So one fault CRB to process for each H_GET_NX_FAULT hcall.
+ */
+static irqreturn_t pseries_vas_fault_thread_fn(int irq, void *data)
+{
+ struct pseries_vas_window *txwin = data;
+ struct coprocessor_request_block crb;
+ struct vas_user_win_ref *tsk_ref;
+ int rc;
+
+ while (atomic_read(&txwin->pending_faults)) {
+ rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb));
+ if (!rc) {
+ tsk_ref = &txwin->vas_win.task_ref;
+ vas_dump_crb(&crb);
+ vas_update_csb(&crb, tsk_ref);
+ }
+ atomic_dec(&txwin->pending_faults);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * irq_default_primary_handler() can be used only with IRQF_ONESHOT
+ * which disables IRQ before executing the thread handler and enables
+ * it after. But this disabling interrupt sets the VAS IRQ OFF
+ * state in the hypervisor. If the NX generates fault interrupt
+ * during this window, the hypervisor will not deliver this
+ * interrupt to the LPAR. So use VAS specific IRQ handler instead
+ * of calling the default primary handler.
+ */
+static irqreturn_t pseries_vas_irq_handler(int irq, void *data)
+{
+ struct pseries_vas_window *txwin = data;
+
+ /*
+ * The thread hanlder will process this interrupt if it is
+ * already running.
+ */
+ atomic_inc(&txwin->pending_faults);
+
+ return IRQ_WAKE_THREAD;
+}
+
+/*
+ * Allocate window and setup IRQ mapping.
+ */
+static int allocate_setup_window(struct pseries_vas_window *txwin,
+ u64 *domain, u8 wintype)
+{
+ int rc;
+
+ rc = h_allocate_vas_window(txwin, domain, wintype, DEF_WIN_CREDS);
+ if (rc)
+ return rc;
+ /*
+ * On PowerVM, the hypervisor setup and forwards the fault
+ * interrupt per window. So the IRQ setup and fault handling
+ * will be done for each open window separately.
+ */
+ txwin->fault_virq = irq_create_mapping(NULL, txwin->fault_irq);
+ if (!txwin->fault_virq) {
+ pr_err("Failed irq mapping %d\n", txwin->fault_irq);
+ rc = -EINVAL;
+ goto out_win;
+ }
+
+ txwin->name = kasprintf(GFP_KERNEL, "vas-win-%d",
+ txwin->vas_win.winid);
+ if (!txwin->name) {
+ rc = -ENOMEM;
+ goto out_irq;
+ }
+
+ rc = request_threaded_irq(txwin->fault_virq,
+ pseries_vas_irq_handler,
+ pseries_vas_fault_thread_fn, 0,
+ txwin->name, txwin);
+ if (rc) {
+ pr_err("VAS-Window[%d]: Request IRQ(%u) failed with %d\n",
+ txwin->vas_win.winid, txwin->fault_virq, rc);
+ goto out_free;
+ }
+
+ txwin->vas_win.wcreds_max = DEF_WIN_CREDS;
+
+ return 0;
+out_free:
+ kfree(txwin->name);
+out_irq:
+ irq_dispose_mapping(txwin->fault_virq);
+out_win:
+ h_deallocate_vas_window(txwin->vas_win.winid);
+ return rc;
+}
+
+static inline void free_irq_setup(struct pseries_vas_window *txwin)
+{
+ free_irq(txwin->fault_virq, txwin);
+ kfree(txwin->name);
+ irq_dispose_mapping(txwin->fault_virq);
+}
+
+static struct vas_window *vas_allocate_window(int vas_id, u64 flags,
+ enum vas_cop_type cop_type)
+{
+ long domain[PLPAR_HCALL9_BUFSIZE] = {VAS_DEFAULT_DOMAIN_ID};
+ struct vas_cop_feat_caps *cop_feat_caps;
+ struct vas_caps *caps;
+ struct pseries_vas_window *txwin;
+ int rc;
+
+ txwin = kzalloc(sizeof(*txwin), GFP_KERNEL);
+ if (!txwin)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * A VAS window can have many credits which means that many
+ * requests can be issued simultaneously. But the hypervisor
+ * restricts one credit per window.
+ * The hypervisor introduces 2 different types of credits:
+ * Default credit type (Uses normal priority FIFO):
+ * A limited number of credits are assigned to partitions
+ * based on processor entitlement. But these credits may be
+ * over-committed on a system depends on whether the CPUs
+ * are in shared or dedicated modes - that is, more requests
+ * may be issued across the system than NX can service at
+ * once which can result in paste command failure (RMA_busy).
+ * Then the process has to resend requests or fall-back to
+ * SW compression.
+ * Quality of Service (QoS) credit type (Uses high priority FIFO):
+ * To avoid NX HW contention, the system admins can assign
+ * QoS credits for each LPAR so that this partition is
+ * guaranteed access to NX resources. These credits are
+ * assigned to partitions via the HMC.
+ * Refer PAPR for more information.
+ *
+ * Allocate window with QoS credits if user requested. Otherwise
+ * default credits are used.
+ */
+ if (flags & VAS_TX_WIN_FLAG_QOS_CREDIT)
+ caps = &vascaps[VAS_GZIP_QOS_FEAT_TYPE];
+ else
+ caps = &vascaps[VAS_GZIP_DEF_FEAT_TYPE];
+
+ cop_feat_caps = &caps->caps;
+
+ if (atomic_inc_return(&cop_feat_caps->nr_used_credits) >
+ atomic_read(&cop_feat_caps->nr_total_credits)) {
+ pr_err("Credits are not available to allocate window\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (vas_id == -1) {
+ /*
+ * The user space is requesting to allocate a window on
+ * a VAS instance where the process is executing.
+ * On PowerVM, domain values are passed to the hypervisor
+ * to select VAS instance. Useful if the process is
+ * affinity to NUMA node.
+ * The hypervisor selects VAS instance if
+ * VAS_DEFAULT_DOMAIN_ID (-1) is passed for domain values.
+ * The h_allocate_vas_window hcall is defined to take a
+ * domain values as specified by h_home_node_associativity,
+ * So no unpacking needs to be done.
+ */
+ rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, domain,
+ VPHN_FLAG_VCPU, hard_smp_processor_id());
+ if (rc != H_SUCCESS) {
+ pr_err("H_HOME_NODE_ASSOCIATIVITY error: %d\n", rc);
+ goto out;
+ }
+ }
+
+ txwin->pid = mfspr(SPRN_PID);
+
+ /*
+ * Allocate / Deallocate window hcalls and setup / free IRQs
+ * have to be protected with mutex.
+ * Open VAS window: Allocate window hcall and setup IRQ
+ * Close VAS window: Deallocate window hcall and free IRQ
+ * The hypervisor waits until all NX requests are
+ * completed before closing the window. So expects OS
+ * to handle NX faults, means IRQ can be freed only
+ * after the deallocate window hcall is returned.
+ * So once the window is closed with deallocate hcall before
+ * the IRQ is freed, it can be assigned to new allocate
+ * hcall with the same fault IRQ by the hypervisor. It can
+ * result in setup IRQ fail for the new window since the
+ * same fault IRQ is not freed by the OS before.
+ */
+ mutex_lock(&vas_pseries_mutex);
+ if (migration_in_progress)
+ rc = -EBUSY;
+ else
+ rc = allocate_setup_window(txwin, (u64 *)&domain[0],
+ cop_feat_caps->win_type);
+ mutex_unlock(&vas_pseries_mutex);
+ if (rc)
+ goto out;
+
+ /*
+ * Modify window and it is ready to use.
+ */
+ rc = h_modify_vas_window(txwin);
+ if (!rc)
+ rc = get_vas_user_win_ref(&txwin->vas_win.task_ref);
+ if (rc)
+ goto out_free;
+
+ txwin->win_type = cop_feat_caps->win_type;
+ mutex_lock(&vas_pseries_mutex);
+ /*
+ * Possible to lose the acquired credit with DLPAR core
+ * removal after the window is opened. So if there are any
+ * closed windows (means with lost credits), do not give new
+ * window to user space. New windows will be opened only
+ * after the existing windows are reopened when credits are
+ * available.
+ */
+ if (!caps->nr_close_wins) {
+ list_add(&txwin->win_list, &caps->list);
+ caps->nr_open_windows++;
+ mutex_unlock(&vas_pseries_mutex);
+ vas_user_win_add_mm_context(&txwin->vas_win.task_ref);
+ return &txwin->vas_win;
+ }
+ mutex_unlock(&vas_pseries_mutex);
+
+ put_vas_user_win_ref(&txwin->vas_win.task_ref);
+ rc = -EBUSY;
+ pr_err("No credit is available to allocate window\n");
+
+out_free:
+ /*
+ * Window is not operational. Free IRQ before closing
+ * window so that do not have to hold mutex.
+ */
+ free_irq_setup(txwin);
+ h_deallocate_vas_window(txwin->vas_win.winid);
+out:
+ atomic_dec(&cop_feat_caps->nr_used_credits);
+ kfree(txwin);
+ return ERR_PTR(rc);
+}
+
+static u64 vas_paste_address(struct vas_window *vwin)
+{
+ struct pseries_vas_window *win;
+
+ win = container_of(vwin, struct pseries_vas_window, vas_win);
+ return win->win_addr;
+}
+
+static int deallocate_free_window(struct pseries_vas_window *win)
+{
+ int rc = 0;
+
+ /*
+ * The hypervisor waits for all requests including faults
+ * are processed before closing the window - Means all
+ * credits have to be returned. In the case of fault
+ * request, a credit is returned after OS issues
+ * H_GET_NX_FAULT hcall.
+ * So free IRQ after executing H_DEALLOCATE_VAS_WINDOW
+ * hcall.
+ */
+ rc = h_deallocate_vas_window(win->vas_win.winid);
+ if (!rc)
+ free_irq_setup(win);
+
+ return rc;
+}
+
+static int vas_deallocate_window(struct vas_window *vwin)
+{
+ struct pseries_vas_window *win;
+ struct vas_cop_feat_caps *caps;
+ int rc = 0;
+
+ if (!vwin)
+ return -EINVAL;
+
+ win = container_of(vwin, struct pseries_vas_window, vas_win);
+
+ /* Should not happen */
+ if (win->win_type >= VAS_MAX_FEAT_TYPE) {
+ pr_err("Window (%u): Invalid window type %u\n",
+ vwin->winid, win->win_type);
+ return -EINVAL;
+ }
+
+ caps = &vascaps[win->win_type].caps;
+ mutex_lock(&vas_pseries_mutex);
+ /*
+ * VAS window is already closed in the hypervisor when
+ * lost the credit or with migration. So just remove the entry
+ * from the list, remove task references and free vas_window
+ * struct.
+ */
+ if (!(win->vas_win.status & VAS_WIN_NO_CRED_CLOSE) &&
+ !(win->vas_win.status & VAS_WIN_MIGRATE_CLOSE)) {
+ rc = deallocate_free_window(win);
+ if (rc) {
+ mutex_unlock(&vas_pseries_mutex);
+ return rc;
+ }
+ } else
+ vascaps[win->win_type].nr_close_wins--;
+
+ list_del(&win->win_list);
+ atomic_dec(&caps->nr_used_credits);
+ vascaps[win->win_type].nr_open_windows--;
+ mutex_unlock(&vas_pseries_mutex);
+
+ put_vas_user_win_ref(&vwin->task_ref);
+ mm_context_remove_vas_window(vwin->task_ref.mm);
+
+ kfree(win);
+ return 0;
+}
+
+static const struct vas_user_win_ops vops_pseries = {
+ .open_win = vas_allocate_window, /* Open and configure window */
+ .paste_addr = vas_paste_address, /* To do copy/paste */
+ .close_win = vas_deallocate_window, /* Close window */
+};
+
+/*
+ * Supporting only nx-gzip coprocessor type now, but this API code
+ * extended to other coprocessor types later.
+ */
+int vas_register_api_pseries(struct module *mod, enum vas_cop_type cop_type,
+ const char *name)
+{
+ if (!copypaste_feat)
+ return -ENOTSUPP;
+
+ return vas_register_coproc_api(mod, cop_type, name, &vops_pseries);
+}
+EXPORT_SYMBOL_GPL(vas_register_api_pseries);
+
+void vas_unregister_api_pseries(void)
+{
+ vas_unregister_coproc_api();
+}
+EXPORT_SYMBOL_GPL(vas_unregister_api_pseries);
+
+/*
+ * Get the specific capabilities based on the feature type.
+ * Right now supports GZIP default and GZIP QoS capabilities.
+ */
+static int __init get_vas_capabilities(u8 feat, enum vas_cop_feat_type type,
+ struct hv_vas_cop_feat_caps *hv_caps)
+{
+ struct vas_cop_feat_caps *caps;
+ struct vas_caps *vcaps;
+ int rc = 0;
+
+ vcaps = &vascaps[type];
+ memset(vcaps, 0, sizeof(*vcaps));
+ INIT_LIST_HEAD(&vcaps->list);
+
+ vcaps->feat = feat;
+ caps = &vcaps->caps;
+
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, feat,
+ (u64)virt_to_phys(hv_caps));
+ if (rc)
+ return rc;
+
+ caps->user_mode = hv_caps->user_mode;
+ if (!(caps->user_mode & VAS_COPY_PASTE_USER_MODE)) {
+ pr_err("User space COPY/PASTE is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ caps->descriptor = be64_to_cpu(hv_caps->descriptor);
+ caps->win_type = hv_caps->win_type;
+ if (caps->win_type >= VAS_MAX_FEAT_TYPE) {
+ pr_err("Unsupported window type %u\n", caps->win_type);
+ return -EINVAL;
+ }
+ caps->max_lpar_creds = be16_to_cpu(hv_caps->max_lpar_creds);
+ caps->max_win_creds = be16_to_cpu(hv_caps->max_win_creds);
+ atomic_set(&caps->nr_total_credits,
+ be16_to_cpu(hv_caps->target_lpar_creds));
+ if (feat == VAS_GZIP_DEF_FEAT) {
+ caps->def_lpar_creds = be16_to_cpu(hv_caps->def_lpar_creds);
+
+ if (caps->max_win_creds < DEF_WIN_CREDS) {
+ pr_err("Window creds(%u) > max allowed window creds(%u)\n",
+ DEF_WIN_CREDS, caps->max_win_creds);
+ return -EINVAL;
+ }
+ }
+
+ rc = sysfs_add_vas_caps(caps);
+ if (rc)
+ return rc;
+
+ copypaste_feat = true;
+
+ return 0;
+}
+
+/*
+ * VAS windows can be closed due to lost credits when the core is
+ * removed. So reopen them if credits are available due to DLPAR
+ * core add and set the window active status. When NX sees the page
+ * fault on the unmapped paste address, the kernel handles the fault
+ * by setting the remapping to new paste address if the window is
+ * active.
+ */
+static int reconfig_open_windows(struct vas_caps *vcaps, int creds,
+ bool migrate)
+{
+ long domain[PLPAR_HCALL9_BUFSIZE] = {VAS_DEFAULT_DOMAIN_ID};
+ struct vas_cop_feat_caps *caps = &vcaps->caps;
+ struct pseries_vas_window *win = NULL, *tmp;
+ int rc, mv_ents = 0;
+ int flag;
+
+ /*
+ * Nothing to do if there are no closed windows.
+ */
+ if (!vcaps->nr_close_wins)
+ return 0;
+
+ /*
+ * For the core removal, the hypervisor reduces the credits
+ * assigned to the LPAR and the kernel closes VAS windows
+ * in the hypervisor depends on reduced credits. The kernel
+ * uses LIFO (the last windows that are opened will be closed
+ * first) and expects to open in the same order when credits
+ * are available.
+ * For example, 40 windows are closed when the LPAR lost 2 cores
+ * (dedicated). If 1 core is added, this LPAR can have 20 more
+ * credits. It means the kernel can reopen 20 windows. So move
+ * 20 entries in the VAS windows lost and reopen next 20 windows.
+ * For partition migration, reopen all windows that are closed
+ * during resume.
+ */
+ if ((vcaps->nr_close_wins > creds) && !migrate)
+ mv_ents = vcaps->nr_close_wins - creds;
+
+ list_for_each_entry_safe(win, tmp, &vcaps->list, win_list) {
+ if (!mv_ents)
+ break;
+
+ mv_ents--;
+ }
+
+ /*
+ * Open windows if they are closed only with migration or
+ * DLPAR (lost credit) before.
+ */
+ if (migrate)
+ flag = VAS_WIN_MIGRATE_CLOSE;
+ else
+ flag = VAS_WIN_NO_CRED_CLOSE;
+
+ list_for_each_entry_safe_from(win, tmp, &vcaps->list, win_list) {
+ /*
+ * This window is closed with DLPAR and migration events.
+ * So reopen the window with the last event.
+ * The user space is not suspended with the current
+ * migration notifier. So the user space can issue DLPAR
+ * CPU hotplug while migration in progress. In this case
+ * this window will be opened with the last event.
+ */
+ if ((win->vas_win.status & VAS_WIN_NO_CRED_CLOSE) &&
+ (win->vas_win.status & VAS_WIN_MIGRATE_CLOSE)) {
+ win->vas_win.status &= ~flag;
+ continue;
+ }
+
+ /*
+ * Nothing to do on this window if it is not closed
+ * with this flag
+ */
+ if (!(win->vas_win.status & flag))
+ continue;
+
+ rc = allocate_setup_window(win, (u64 *)&domain[0],
+ caps->win_type);
+ if (rc)
+ return rc;
+
+ rc = h_modify_vas_window(win);
+ if (rc)
+ goto out;
+
+ mutex_lock(&win->vas_win.task_ref.mmap_mutex);
+ /*
+ * Set window status to active
+ */
+ win->vas_win.status &= ~flag;
+ mutex_unlock(&win->vas_win.task_ref.mmap_mutex);
+ win->win_type = caps->win_type;
+ if (!--vcaps->nr_close_wins)
+ break;
+ }
+
+ return 0;
+out:
+ /*
+ * Window modify HCALL failed. So close the window to the
+ * hypervisor and return.
+ */
+ free_irq_setup(win);
+ h_deallocate_vas_window(win->vas_win.winid);
+ return rc;
+}
+
+/*
+ * The hypervisor reduces the available credits if the LPAR lost core. It
+ * means the excessive windows should not be active and the user space
+ * should not be using these windows to send compression requests to NX.
+ * So the kernel closes the excessive windows and unmap the paste address
+ * such that the user space receives paste instruction failure. Then up to
+ * the user space to fall back to SW compression and manage with the
+ * existing windows.
+ */
+static int reconfig_close_windows(struct vas_caps *vcap, int excess_creds,
+ bool migrate)
+{
+ struct pseries_vas_window *win, *tmp;
+ struct vas_user_win_ref *task_ref;
+ struct vm_area_struct *vma;
+ int rc = 0, flag;
+
+ if (migrate)
+ flag = VAS_WIN_MIGRATE_CLOSE;
+ else
+ flag = VAS_WIN_NO_CRED_CLOSE;
+
+ list_for_each_entry_safe(win, tmp, &vcap->list, win_list) {
+ /*
+ * This window is already closed due to lost credit
+ * or for migration before. Go for next window.
+ * For migration, nothing to do since this window
+ * closed for DLPAR and will be reopened even on
+ * the destination system with other DLPAR operation.
+ */
+ if ((win->vas_win.status & VAS_WIN_MIGRATE_CLOSE) ||
+ (win->vas_win.status & VAS_WIN_NO_CRED_CLOSE)) {
+ win->vas_win.status |= flag;
+ continue;
+ }
+
+ task_ref = &win->vas_win.task_ref;
+ mutex_lock(&task_ref->mmap_mutex);
+ vma = task_ref->vma;
+ /*
+ * Number of available credits are reduced, So select
+ * and close windows.
+ */
+ win->vas_win.status |= flag;
+
+ mmap_write_lock(task_ref->mm);
+ /*
+ * vma is set in the original mapping. But this mapping
+ * is done with mmap() after the window is opened with ioctl.
+ * so we may not see the original mapping if the core remove
+ * is done before the original mmap() and after the ioctl.
+ */
+ if (vma)
+ zap_page_range(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+
+ mmap_write_unlock(task_ref->mm);
+ mutex_unlock(&task_ref->mmap_mutex);
+ /*
+ * Close VAS window in the hypervisor, but do not
+ * free vas_window struct since it may be reused
+ * when the credit is available later (DLPAR with
+ * adding cores). This struct will be used
+ * later when the process issued with close(FD).
+ */
+ rc = deallocate_free_window(win);
+ /*
+ * This failure is from the hypervisor.
+ * No way to stop migration for these failures.
+ * So ignore error and continue closing other windows.
+ */
+ if (rc && !migrate)
+ return rc;
+
+ vcap->nr_close_wins++;
+
+ /*
+ * For migration, do not depend on lpar_creds in case if
+ * mismatch with the hypervisor value (should not happen).
+ * So close all active windows in the list and will be
+ * reopened windows based on the new lpar_creds on the
+ * destination system during resume.
+ */
+ if (!migrate && !--excess_creds)
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Get new VAS capabilities when the core add/removal configuration
+ * changes. Reconfig window configurations based on the credits
+ * availability from this new capabilities.
+ */
+int vas_reconfig_capabilties(u8 type, int new_nr_creds)
+{
+ struct vas_cop_feat_caps *caps;
+ int old_nr_creds;
+ struct vas_caps *vcaps;
+ int rc = 0, nr_active_wins;
+
+ if (type >= VAS_MAX_FEAT_TYPE) {
+ pr_err("Invalid credit type %d\n", type);
+ return -EINVAL;
+ }
+
+ vcaps = &vascaps[type];
+ caps = &vcaps->caps;
+
+ mutex_lock(&vas_pseries_mutex);
+
+ old_nr_creds = atomic_read(&caps->nr_total_credits);
+
+ atomic_set(&caps->nr_total_credits, new_nr_creds);
+ /*
+ * The total number of available credits may be decreased or
+ * increased with DLPAR operation. Means some windows have to be
+ * closed / reopened. Hold the vas_pseries_mutex so that the
+ * user space can not open new windows.
+ */
+ if (old_nr_creds < new_nr_creds) {
+ /*
+ * If the existing target credits is less than the new
+ * target, reopen windows if they are closed due to
+ * the previous DLPAR (core removal).
+ */
+ rc = reconfig_open_windows(vcaps, new_nr_creds - old_nr_creds,
+ false);
+ } else {
+ /*
+ * # active windows is more than new LPAR available
+ * credits. So close the excessive windows.
+ * On pseries, each window will have 1 credit.
+ */
+ nr_active_wins = vcaps->nr_open_windows - vcaps->nr_close_wins;
+ if (nr_active_wins > new_nr_creds)
+ rc = reconfig_close_windows(vcaps,
+ nr_active_wins - new_nr_creds,
+ false);
+ }
+
+ mutex_unlock(&vas_pseries_mutex);
+ return rc;
+}
+
+int pseries_vas_dlpar_cpu(void)
+{
+ int new_nr_creds, rc;
+
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
+ vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
+ (u64)virt_to_phys(&hv_cop_caps));
+ if (!rc) {
+ new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
+ rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE, new_nr_creds);
+ }
+
+ if (rc)
+ pr_err("Failed reconfig VAS capabilities with DLPAR\n");
+
+ return rc;
+}
+
+/*
+ * Total number of default credits available (target_credits)
+ * in LPAR depends on number of cores configured. It varies based on
+ * whether processors are in shared mode or dedicated mode.
+ * Get the notifier when CPU configuration is changed with DLPAR
+ * operation so that get the new target_credits (vas default capabilities)
+ * and then update the existing windows usage if needed.
+ */
+static int pseries_vas_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct of_reconfig_data *rd = data;
+ struct device_node *dn = rd->dn;
+ const __be32 *intserv = NULL;
+ int len;
+
+ /*
+ * For shared CPU partition, the hypervisor assigns total credits
+ * based on entitled core capacity. So updating VAS windows will
+ * be called from lparcfg_write().
+ */
+ if (is_shared_processor())
+ return NOTIFY_OK;
+
+ if ((action == OF_RECONFIG_ATTACH_NODE) ||
+ (action == OF_RECONFIG_DETACH_NODE))
+ intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s",
+ &len);
+ /*
+ * Processor config is not changed
+ */
+ if (!intserv)
+ return NOTIFY_OK;
+
+ return pseries_vas_dlpar_cpu();
+}
+
+static struct notifier_block pseries_vas_nb = {
+ .notifier_call = pseries_vas_notifier,
+};
+
+/*
+ * For LPM, all windows have to be closed on the source partition
+ * before migration and reopen them on the destination partition
+ * after migration. So closing windows during suspend and
+ * reopen them during resume.
+ */
+int vas_migration_handler(int action)
+{
+ struct vas_cop_feat_caps *caps;
+ int old_nr_creds, new_nr_creds = 0;
+ struct vas_caps *vcaps;
+ int i, rc = 0;
+
+ /*
+ * NX-GZIP is not enabled. Nothing to do for migration.
+ */
+ if (!copypaste_feat)
+ return rc;
+
+ mutex_lock(&vas_pseries_mutex);
+
+ if (action == VAS_SUSPEND)
+ migration_in_progress = true;
+ else
+ migration_in_progress = false;
+
+ for (i = 0; i < VAS_MAX_FEAT_TYPE; i++) {
+ vcaps = &vascaps[i];
+ caps = &vcaps->caps;
+ old_nr_creds = atomic_read(&caps->nr_total_credits);
+
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
+ vcaps->feat,
+ (u64)virt_to_phys(&hv_cop_caps));
+ if (!rc) {
+ new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
+ /*
+ * Should not happen. But incase print messages, close
+ * all windows in the list during suspend and reopen
+ * windows based on new lpar_creds on the destination
+ * system.
+ */
+ if (old_nr_creds != new_nr_creds) {
+ pr_err("Target credits mismatch with the hypervisor\n");
+ pr_err("state(%d): lpar creds: %d HV lpar creds: %d\n",
+ action, old_nr_creds, new_nr_creds);
+ pr_err("Used creds: %d, Active creds: %d\n",
+ atomic_read(&caps->nr_used_credits),
+ vcaps->nr_open_windows - vcaps->nr_close_wins);
+ }
+ } else {
+ pr_err("state(%d): Get VAS capabilities failed with %d\n",
+ action, rc);
+ /*
+ * We can not stop migration with the current lpm
+ * implementation. So continue closing all windows in
+ * the list (during suspend) and return without
+ * opening windows (during resume) if VAS capabilities
+ * HCALL failed.
+ */
+ if (action == VAS_RESUME)
+ goto out;
+ }
+
+ switch (action) {
+ case VAS_SUSPEND:
+ rc = reconfig_close_windows(vcaps, vcaps->nr_open_windows,
+ true);
+ break;
+ case VAS_RESUME:
+ atomic_set(&caps->nr_total_credits, new_nr_creds);
+ rc = reconfig_open_windows(vcaps, new_nr_creds, true);
+ break;
+ default:
+ /* should not happen */
+ pr_err("Invalid migration action %d\n", action);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Ignore errors during suspend and return for resume.
+ */
+ if (rc && (action == VAS_RESUME))
+ goto out;
+ }
+
+out:
+ mutex_unlock(&vas_pseries_mutex);
+ return rc;
+}
+
+static int __init pseries_vas_init(void)
+{
+ struct hv_vas_all_caps *hv_caps;
+ int rc = 0;
+
+ /*
+ * Linux supports user space COPY/PASTE only with Radix
+ */
+ if (!radix_enabled()) {
+ pr_err("API is supported only with radix page tables\n");
+ return -ENOTSUPP;
+ }
+
+ hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL);
+ if (!hv_caps)
+ return -ENOMEM;
+ /*
+ * Get VAS overall capabilities by passing 0 to feature type.
+ */
+ rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES, 0,
+ (u64)virt_to_phys(hv_caps));
+ if (rc)
+ goto out;
+
+ caps_all.descriptor = be64_to_cpu(hv_caps->descriptor);
+ caps_all.feat_type = be64_to_cpu(hv_caps->feat_type);
+
+ sysfs_pseries_vas_init(&caps_all);
+
+ /*
+ * QOS capabilities available
+ */
+ if (caps_all.feat_type & VAS_GZIP_QOS_FEAT_BIT) {
+ rc = get_vas_capabilities(VAS_GZIP_QOS_FEAT,
+ VAS_GZIP_QOS_FEAT_TYPE, &hv_cop_caps);
+
+ if (rc)
+ goto out;
+ }
+ /*
+ * Default capabilities available
+ */
+ if (caps_all.feat_type & VAS_GZIP_DEF_FEAT_BIT)
+ rc = get_vas_capabilities(VAS_GZIP_DEF_FEAT,
+ VAS_GZIP_DEF_FEAT_TYPE, &hv_cop_caps);
+
+ if (!rc && copypaste_feat) {
+ if (firmware_has_feature(FW_FEATURE_LPAR))
+ of_reconfig_notifier_register(&pseries_vas_nb);
+
+ pr_info("GZIP feature is available\n");
+ } else {
+ /*
+ * Should not happen, but only when get default
+ * capabilities HCALL failed. So disable copy paste
+ * feature.
+ */
+ copypaste_feat = false;
+ }
+
+out:
+ kfree(hv_caps);
+ return rc;
+}
+machine_device_initcall(pseries, pseries_vas_init);
diff --git a/arch/powerpc/platforms/pseries/vas.h b/arch/powerpc/platforms/pseries/vas.h
new file mode 100644
index 000000000000..7115043ec488
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/vas.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2020-21 IBM Corp.
+ */
+
+#ifndef _VAS_H
+#define _VAS_H
+#include <asm/vas.h>
+#include <linux/mutex.h>
+#include <linux/stringify.h>
+
+/*
+ * VAS window modify flags
+ */
+#define VAS_MOD_WIN_CLOSE PPC_BIT(0)
+#define VAS_MOD_WIN_JOBS_KILL PPC_BIT(1)
+#define VAS_MOD_WIN_DR PPC_BIT(3)
+#define VAS_MOD_WIN_PR PPC_BIT(4)
+#define VAS_MOD_WIN_SF PPC_BIT(5)
+#define VAS_MOD_WIN_TA PPC_BIT(6)
+#define VAS_MOD_WIN_FLAGS (VAS_MOD_WIN_JOBS_KILL | VAS_MOD_WIN_DR | \
+ VAS_MOD_WIN_PR | VAS_MOD_WIN_SF)
+
+#define VAS_WIN_ACTIVE 0x0
+#define VAS_WIN_CLOSED 0x1
+#define VAS_WIN_INACTIVE 0x2 /* Inactive due to HW failure */
+/* Process of being modified, deallocated, or quiesced */
+#define VAS_WIN_MOD_IN_PROCESS 0x3
+
+#define VAS_COPY_PASTE_USER_MODE 0x00000001
+#define VAS_COP_OP_USER_MODE 0x00000010
+
+#define VAS_GZIP_QOS_CAPABILITIES 0x56516F73477A6970
+#define VAS_GZIP_DEFAULT_CAPABILITIES 0x56446566477A6970
+
+enum vas_migrate_action {
+ VAS_SUSPEND,
+ VAS_RESUME,
+};
+
+/*
+ * Co-processor feature - GZIP QoS windows or GZIP default windows
+ */
+enum vas_cop_feat_type {
+ VAS_GZIP_QOS_FEAT_TYPE,
+ VAS_GZIP_DEF_FEAT_TYPE,
+ VAS_MAX_FEAT_TYPE,
+};
+
+/*
+ * Use to get feature specific capabilities from the
+ * hypervisor.
+ */
+struct hv_vas_cop_feat_caps {
+ __be64 descriptor;
+ u8 win_type; /* Default or QoS type */
+ u8 user_mode;
+ __be16 max_lpar_creds;
+ __be16 max_win_creds;
+ union {
+ __be16 reserved;
+ __be16 def_lpar_creds; /* Used for default capabilities */
+ };
+ __be16 target_lpar_creds;
+} __packed __aligned(0x1000);
+
+/*
+ * Feature specific (QoS or default) capabilities.
+ */
+struct vas_cop_feat_caps {
+ u64 descriptor;
+ u8 win_type; /* Default or QoS type */
+ u8 user_mode; /* User mode copy/paste or COP HCALL */
+ u16 max_lpar_creds; /* Max credits available in LPAR */
+ /* Max credits can be assigned per window */
+ u16 max_win_creds;
+ union {
+ u16 reserved; /* Used for QoS credit type */
+ u16 def_lpar_creds; /* Used for default credit type */
+ };
+ /* Total LPAR available credits. Can be different from max LPAR */
+ /* credits due to DLPAR operation */
+ atomic_t nr_total_credits; /* Total credits assigned to LPAR */
+ atomic_t nr_used_credits; /* Used credits so far */
+};
+
+/*
+ * Feature (QoS or Default) specific to store capabilities and
+ * the list of open windows.
+ */
+struct vas_caps {
+ struct vas_cop_feat_caps caps;
+ struct list_head list; /* List of open windows */
+ int nr_close_wins; /* closed windows in the hypervisor for DLPAR */
+ int nr_open_windows; /* Number of successful open windows */
+ u8 feat; /* Feature type */
+};
+
+/*
+ * To get window information from the hypervisor.
+ */
+struct hv_vas_win_lpar {
+ __be16 version;
+ u8 win_type;
+ u8 status;
+ __be16 credits; /* No of credits assigned to this window */
+ __be16 reserved;
+ __be32 pid; /* LPAR Process ID */
+ __be32 tid; /* LPAR Thread ID */
+ __be64 win_addr; /* Paste address */
+ __be32 interrupt; /* Interrupt when NX request completes */
+ __be32 fault; /* Interrupt when NX sees fault */
+ /* Associativity Domain Identifiers as returned in */
+ /* H_HOME_NODE_ASSOCIATIVITY */
+ __be64 domain[6];
+ __be64 win_util; /* Number of bytes processed */
+} __packed __aligned(0x1000);
+
+struct pseries_vas_window {
+ struct vas_window vas_win;
+ u64 win_addr; /* Physical paste address */
+ u8 win_type; /* QoS or Default window */
+ u32 complete_irq; /* Completion interrupt */
+ u32 fault_irq; /* Fault interrupt */
+ u64 domain[6]; /* Associativity domain Ids */
+ /* this window is allocated */
+ u64 util;
+ u32 pid; /* PID associated with this window */
+
+ /* List of windows opened which is used for LPM */
+ struct list_head win_list;
+ u64 flags;
+ char *name;
+ int fault_virq;
+ atomic_t pending_faults; /* Number of pending faults */
+};
+
+int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps);
+int vas_reconfig_capabilties(u8 type, int new_nr_creds);
+int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps);
+
+#ifdef CONFIG_PPC_VAS
+int vas_migration_handler(int action);
+int pseries_vas_dlpar_cpu(void);
+#else
+static inline int vas_migration_handler(int action)
+{
+ return 0;
+}
+static inline int pseries_vas_dlpar_cpu(void)
+{
+ return 0;
+}
+#endif
+#endif /* _VAS_H */
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index f682b7babc09..00ecac2c205b 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -20,8 +20,10 @@
#include <linux/console.h>
#include <linux/export.h>
#include <linux/mm.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/kobject.h>
+#include <linux/kexec.h>
+#include <linux/of_irq.h>
#include <asm/iommu.h>
#include <asm/dma.h>
@@ -31,6 +33,7 @@
#include <asm/tce.h>
#include <asm/page.h>
#include <asm/hvcall.h>
+#include <asm/machdep.h>
static struct vio_dev vio_bus_device = { /* fake "parent" device */
.name = "vio",
@@ -558,7 +561,8 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sgl, nelems, count)
alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
- if (vio_cmo_alloc(viodev, alloc_size))
+ ret = vio_cmo_alloc(viodev, alloc_size);
+ if (ret)
goto out_fail;
ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev),
direction, attrs);
@@ -575,7 +579,7 @@ out_deallocate:
vio_cmo_dealloc(viodev, alloc_size);
out_fail:
atomic_inc(&viodev->cmo.allocs_failed);
- return 0;
+ return ret;
}
static void vio_dma_iommu_unmap_sg(struct device *dev,
@@ -607,6 +611,8 @@ static const struct dma_map_ops vio_dma_mapping_ops = {
.get_required_mask = dma_iommu_get_required_mask,
.mmap = dma_common_mmap,
.get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
};
/**
@@ -1056,7 +1062,7 @@ static struct attribute *vio_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(vio_bus);
-static void vio_cmo_sysfs_init(void)
+static void __init vio_cmo_sysfs_init(void)
{
vio_bus_type.dev_groups = vio_cmo_dev_groups;
vio_bus_type.bus_groups = vio_bus_groups;
@@ -1068,7 +1074,7 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
static void vio_cmo_bus_init(void) {}
-static void vio_cmo_sysfs_init(void) { }
+static void __init vio_cmo_sysfs_init(void) { }
#endif /* CONFIG_PPC_SMLPAR */
EXPORT_SYMBOL(vio_cmo_entitlement_update);
EXPORT_SYMBOL(vio_cmo_set_dev_desired);
@@ -1253,12 +1259,11 @@ static int vio_bus_probe(struct device *dev)
}
/* convert from struct device to struct vio_dev and pass to driver. */
-static int vio_bus_remove(struct device *dev)
+static void vio_bus_remove(struct device *dev)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct vio_driver *viodrv = to_vio_driver(dev->driver);
struct device *devptr;
- int ret = 1;
/*
* Hold a reference to the device after the remove function is called
@@ -1267,13 +1272,26 @@ static int vio_bus_remove(struct device *dev)
devptr = get_device(dev);
if (viodrv->remove)
- ret = viodrv->remove(viodev);
+ viodrv->remove(viodev);
- if (!ret && firmware_has_feature(FW_FEATURE_CMO))
+ if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_remove(viodev);
put_device(devptr);
- return ret;
+}
+
+static void vio_bus_shutdown(struct device *dev)
+{
+ struct vio_dev *viodev = to_vio_dev(dev);
+ struct vio_driver *viodrv;
+
+ if (dev->driver) {
+ viodrv = to_vio_driver(dev->driver);
+ if (viodrv->shutdown)
+ viodrv->shutdown(viodev);
+ else if (kexec_in_progress)
+ vio_bus_remove(dev);
+ }
}
/**
@@ -1283,6 +1301,10 @@ static int vio_bus_remove(struct device *dev)
int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
const char *mod_name)
{
+ // vio_bus_type is only initialised for pseries
+ if (!machine_is(pseries))
+ return -ENODEV;
+
pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
/* fill in 'struct driver' fields */
@@ -1458,7 +1480,7 @@ EXPORT_SYMBOL(vio_register_device_node);
* Starting from the root node provide, register the device node for
* each child beneath the root.
*/
-static void vio_bus_scan_register_devices(char *root_name)
+static void __init vio_bus_scan_register_devices(char *root_name)
{
struct device_node *node_root, *node_child;
@@ -1513,7 +1535,7 @@ static int __init vio_bus_init(void)
return 0;
}
-postcore_initcall(vio_bus_init);
+machine_postcore_initcall(pseries, vio_bus_init);
static int __init vio_device_init(void)
{
@@ -1522,7 +1544,7 @@ static int __init vio_device_init(void)
return 0;
}
-device_initcall(vio_device_init);
+machine_device_initcall(pseries, vio_device_init);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1611,6 +1633,7 @@ struct bus_type vio_bus_type = {
.match = vio_bus_match,
.probe = vio_bus_probe,
.remove = vio_bus_remove,
+ .shutdown = vio_bus_shutdown,
};
/**
@@ -1628,7 +1651,6 @@ const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
}
EXPORT_SYMBOL(vio_get_attribute);
-#ifdef CONFIG_PPC_PSERIES
/* vio_find_name() - internal because only vio.c knows how we formatted the
* kobject name
*/
@@ -1698,11 +1720,10 @@ int vio_disable_interrupts(struct vio_dev *dev)
return rc;
}
EXPORT_SYMBOL(vio_disable_interrupts);
-#endif /* CONFIG_PPC_PSERIES */
static int __init vio_init(void)
{
dma_debug_add_bus(&vio_bus_type);
return 0;
}
-fs_initcall(vio_init);
+machine_fs_initcall(pseries, vio_init);
diff --git a/arch/powerpc/platforms/pseries/vphn.c b/arch/powerpc/platforms/pseries/vphn.c
index 3f07bf6c670e..cca474a2c396 100644
--- a/arch/powerpc/platforms/pseries/vphn.c
+++ b/arch/powerpc/platforms/pseries/vphn.c
@@ -82,7 +82,8 @@ long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity)
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, cpu);
- vphn_unpack_associativity(retbuf, associativity);
+ if (rc == H_SUCCESS)
+ vphn_unpack_associativity(retbuf, associativity);
return rc;
}
diff --git a/arch/powerpc/purgatory/.gitignore b/arch/powerpc/purgatory/.gitignore
index e9e66f178a6d..5e40575c1f2b 100644
--- a/arch/powerpc/purgatory/.gitignore
+++ b/arch/powerpc/purgatory/.gitignore
@@ -1,2 +1,2 @@
-kexec-purgatory.c
+# SPDX-License-Identifier: GPL-2.0-only
purgatory.ro
diff --git a/arch/powerpc/purgatory/Makefile b/arch/powerpc/purgatory/Makefile
index 7c6d8b14f440..a81d155b89ae 100644
--- a/arch/powerpc/purgatory/Makefile
+++ b/arch/powerpc/purgatory/Makefile
@@ -2,17 +2,13 @@
KASAN_SANITIZE := n
-targets += trampoline.o purgatory.ro kexec-purgatory.c
+targets += trampoline_$(BITS).o purgatory.ro
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
-$(obj)/purgatory.ro: $(obj)/trampoline.o FORCE
+$(obj)/purgatory.ro: $(obj)/trampoline_$(BITS).o FORCE
$(call if_changed,ld)
-quiet_cmd_bin2c = BIN2C $@
- cmd_bin2c = $(objtree)/scripts/bin2c kexec_purgatory < $< > $@
-
-$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
- $(call if_changed,bin2c)
+$(obj)/kexec-purgatory.o: $(obj)/purgatory.ro
obj-y += kexec-purgatory.o
diff --git a/arch/powerpc/purgatory/kexec-purgatory.S b/arch/powerpc/purgatory/kexec-purgatory.S
new file mode 100644
index 000000000000..f494fd5a0526
--- /dev/null
+++ b/arch/powerpc/purgatory/kexec-purgatory.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+ .section .rodata, "a"
+
+ .align 8
+kexec_purgatory:
+ .globl kexec_purgatory
+ .incbin "arch/powerpc/purgatory/purgatory.ro"
+.Lkexec_purgatory_end:
+
+ .align 8
+kexec_purgatory_size:
+ .globl kexec_purgatory_size
+ .quad .Lkexec_purgatory_end - kexec_purgatory
diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline_64.S
index a5a83c3f53e6..b35837c13852 100644
--- a/arch/powerpc/purgatory/trampoline.S
+++ b/arch/powerpc/purgatory/trampoline_64.S
@@ -10,8 +10,8 @@
*/
#include <asm/asm-compat.h>
+#include <asm/crashdump-ppc64.h>
- .machine ppc64
.balign 256
.globl purgatory_start
purgatory_start:
@@ -43,14 +43,39 @@ master:
mr %r17,%r3 /* save cpu id to r17 */
mr %r15,%r4 /* save physical address in reg15 */
+ /* Work out where we're running */
+ bcl 20, 31, 0f
+0: mflr %r18
+
+ /*
+ * Copy BACKUP_SRC_SIZE bytes from BACKUP_SRC_START to
+ * backup_start 8 bytes at a time.
+ *
+ * Use r3 = dest, r4 = src, r5 = size, r6 = count
+ */
+ ld %r3, (backup_start - 0b)(%r18)
+ cmpdi %cr0, %r3, 0
+ beq .Lskip_copy /* skip if there is no backup region */
+ lis %r5, BACKUP_SRC_SIZE@h
+ ori %r5, %r5, BACKUP_SRC_SIZE@l
+ cmpdi %cr0, %r5, 0
+ beq .Lskip_copy /* skip if copy size is zero */
+ lis %r4, BACKUP_SRC_START@h
+ ori %r4, %r4, BACKUP_SRC_START@l
+ li %r6, 0
+.Lcopy_loop:
+ ldx %r0, %r6, %r4
+ stdx %r0, %r6, %r3
+ addi %r6, %r6, 8
+ cmpld %cr0, %r6, %r5
+ blt .Lcopy_loop
+
+.Lskip_copy:
or %r3,%r3,%r3 /* ok now to high priority, lets boot */
lis %r6,0x1
mtctr %r6 /* delay a bit for slaves to catch up */
bdnz . /* before we overwrite 0-100 again */
- bl 0f /* Work out where we're running */
-0: mflr %r18
-
/* load device-tree address */
ld %r3, (dt_offset - 0b)(%r18)
mr %r16,%r3 /* save dt address in reg16 */
@@ -61,6 +86,10 @@ master:
li %r4,28
STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */
1:
+ /* Load opal base and entry values in r8 & r9 respectively */
+ ld %r8,(opal_base - 0b)(%r18)
+ ld %r9,(opal_entry - 0b)(%r18)
+
/* load the kernel address */
ld %r4,(kernel - 0b)(%r18)
@@ -89,7 +118,6 @@ master:
rfid /* update MSR and start kernel */
-
.balign 8
.globl kernel
kernel:
@@ -102,6 +130,23 @@ dt_offset:
.8byte 0x0
.size dt_offset, . - dt_offset
+ .balign 8
+ .globl backup_start
+backup_start:
+ .8byte 0x0
+ .size backup_start, . - backup_start
+
+ .balign 8
+ .globl opal_base
+opal_base:
+ .8byte 0x0
+ .size opal_base, . - opal_base
+
+ .balign 8
+ .globl opal_entry
+opal_entry:
+ .8byte 0x0
+ .size opal_entry, . - opal_entry
.data
.balign 8
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 9ebcc1337560..5aa92ff3622d 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -12,17 +12,11 @@ config PPC4xx_HSTA_MSI
depends on PCI_MSI
depends on PCI && 4xx
-config PPC4xx_MSI
- bool
- depends on PCI_MSI
- depends on PCI && 4xx
-
config PPC_MSI_BITMAP
bool
depends on PCI_MSI
default y if MPIC
default y if FSL_PCI
- default y if PPC4xx_MSI
default y if PPC_POWERNV
source "arch/powerpc/sysdev/xics/Kconfig"
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index cb5a5bd2cef5..9cb1d029511a 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_FSL_PMC) += fsl_pmc.o
obj-$(CONFIG_FSL_CORENET_RCPM) += fsl_rcpm.o
obj-$(CONFIG_FSL_LBC) += fsl_lbc.o
obj-$(CONFIG_FSL_GTM) += fsl_gtm.o
-obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
obj-$(CONFIG_FSL_RIO) += fsl_rio.o fsl_rmu.o
obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o
obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o
@@ -31,8 +30,6 @@ obj-$(CONFIG_RTC_DRV_CMOS) += rtc_cmos_setup.o
obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
obj-$(CONFIG_PPC_I8259) += i8259.o
obj-$(CONFIG_IPIC) += ipic.o
-obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o
-obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o
obj-$(CONFIG_OF_RTC) += of_rtc.o
obj-$(CONFIG_CPM) += cpm_common.o
diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
index 07718b9a2c99..915f4d3991c3 100644
--- a/arch/powerpc/sysdev/cpm2.c
+++ b/arch/powerpc/sysdev/cpm2.c
@@ -39,7 +39,6 @@
#include <asm/irq.h>
#include <asm/mpc8260.h>
#include <asm/page.h>
-#include <asm/pgtable.h>
#include <asm/cpm2.h>
#include <asm/rheap.h>
#include <asm/fs_pd.h>
@@ -108,7 +107,7 @@ EXPORT_SYMBOL(cpm_command);
* memory mapped space.
* The baud rate clock is the system clock divided by something.
* It was set up long ago during the initial boot phase and is
- * is given to us.
+ * given to us.
* Baud rate clocks are zero-based in the driver code (as that maps
* to port numbers). Documentation uses 1-based numbering.
*/
@@ -136,7 +135,7 @@ void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src)
}
EXPORT_SYMBOL(__cpm2_setbrg);
-int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
+int __init cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
{
int ret = 0;
int shift;
@@ -266,7 +265,7 @@ int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
return ret;
}
-int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock)
+int __init cpm2_smc_clk_setup(enum cpm_clk_target target, int clock)
{
int ret = 0;
int shift;
@@ -327,7 +326,7 @@ struct cpm2_ioports {
u32 res[3];
};
-void cpm2_set_pin(int port, int pin, int flags)
+void __init cpm2_set_pin(int port, int pin, int flags)
{
struct cpm2_ioports __iomem *iop =
(struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport;
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index 9e86074719a9..cb9ba4ef557a 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -30,11 +30,11 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/fs_pd.h>
#include "cpm2_pic.h"
diff --git a/arch/powerpc/sysdev/cpm_common.c b/arch/powerpc/sysdev/cpm_common.c
index 71660bacb264..7dc1960f8bdb 100644
--- a/arch/powerpc/sysdev/cpm_common.c
+++ b/arch/powerpc/sysdev/cpm_common.c
@@ -68,6 +68,8 @@ static void udbg_putc_cpm(char c)
void __init udbg_init_cpm(void)
{
#ifdef CONFIG_PPC_8xx
+ mmu_mapin_immr();
+
cpm_udbg_txdesc = (u32 __iomem __force *)
(CONFIG_PPC_EARLY_DEBUG_CPM_ADDR - PHYS_IMMR_BASE +
VIRT_IMMR_BASE);
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 6b4a34b36d98..98096bbfd62e 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -25,8 +25,8 @@
#include <linux/memblock.h>
#include <linux/gfp.h>
#include <linux/kmemleak.h>
+#include <linux/of_address.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/iommu.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
@@ -226,7 +226,7 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
dart_cache_sync(orig_dp, orig_npages);
}
-static void allocate_dart(void)
+static void __init allocate_dart(void)
{
unsigned long tmp;
@@ -344,7 +344,8 @@ static void iommu_table_dart_setup(void)
iommu_table_dart.it_index = 0;
iommu_table_dart.it_blocksize = 1;
iommu_table_dart.it_ops = &iommu_dart_ops;
- iommu_init_table(&iommu_table_dart, -1, 0, 0);
+ if (!iommu_init_table(&iommu_table_dart, -1, 0, 0))
+ panic("Failed to initialize iommu table");
/* Reserve the last page of the DART to avoid possible prefetch
* past the DART mapped area
@@ -403,9 +404,10 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
}
/* Initialize the DART HW */
- if (dart_init(dn) != 0)
+ if (dart_init(dn) != 0) {
+ of_node_put(dn);
return;
-
+ }
/*
* U4 supports a DART bypass, we use it for 64-bit capable devices to
* improve performance. However, that only works for devices connected
@@ -418,6 +420,7 @@ void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
/* Setup pci_dma ops */
set_pci_dma_ops(&dma_iommu_ops);
+ of_node_put(dn);
}
#ifdef CONFIG_PM
diff --git a/arch/powerpc/sysdev/dcr-low.S b/arch/powerpc/sysdev/dcr-low.S
index efeeb1b885a1..329b9c4ae542 100644
--- a/arch/powerpc/sysdev/dcr-low.S
+++ b/arch/powerpc/sysdev/dcr-low.S
@@ -11,7 +11,7 @@
#include <asm/export.h>
#define DCR_ACCESS_PROLOG(table) \
- cmpli cr0,r3,1024; \
+ cmplwi cr0,r3,1024; \
rlwinm r3,r3,4,18,27; \
lis r5,table@h; \
ori r5,r5,table@l; \
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index 22991e1128e3..3093f14111e6 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -8,7 +8,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
-#include <asm/prom.h>
+#include <linux/of_address.h>
#include <asm/dcr.h>
#ifdef CONFIG_PPC_DCR_MMIO
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 48866e6c1efb..00705258ecf9 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h
deleted file mode 100644
index ce370749add9..000000000000
--- a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc
- *
- * QorIQ based Cache Controller Memory Mapped Registers
- *
- * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
- */
-
-#ifndef __FSL_85XX_CACHE_CTLR_H__
-#define __FSL_85XX_CACHE_CTLR_H__
-
-#define L2CR_L2FI 0x40000000 /* L2 flash invalidate */
-#define L2CR_L2IO 0x00200000 /* L2 instruction only */
-#define L2CR_SRAM_ZERO 0x00000000 /* L2SRAM zero size */
-#define L2CR_SRAM_FULL 0x00010000 /* L2SRAM full size */
-#define L2CR_SRAM_HALF 0x00020000 /* L2SRAM half size */
-#define L2CR_SRAM_TWO_HALFS 0x00030000 /* L2SRAM two half sizes */
-#define L2CR_SRAM_QUART 0x00040000 /* L2SRAM one quarter size */
-#define L2CR_SRAM_TWO_QUARTS 0x00050000 /* L2SRAM two quarter size */
-#define L2CR_SRAM_EIGHTH 0x00060000 /* L2SRAM one eighth size */
-#define L2CR_SRAM_TWO_EIGHTH 0x00070000 /* L2SRAM two eighth size */
-
-#define L2SRAM_OPTIMAL_SZ_SHIFT 0x00000003 /* Optimum size for L2SRAM */
-
-#define L2SRAM_BAR_MSK_LO18 0xFFFFC000 /* Lower 18 bits */
-#define L2SRAM_BARE_MSK_HI4 0x0000000F /* Upper 4 bits */
-
-enum cache_sram_lock_ways {
- LOCK_WAYS_ZERO,
- LOCK_WAYS_EIGHTH,
- LOCK_WAYS_TWO_EIGHTH,
- LOCK_WAYS_HALF = 4,
- LOCK_WAYS_FULL = 8,
-};
-
-struct mpc85xx_l2ctlr {
- u32 ctl; /* 0x000 - L2 control */
- u8 res1[0xC];
- u32 ewar0; /* 0x010 - External write address 0 */
- u32 ewarea0; /* 0x014 - External write address extended 0 */
- u32 ewcr0; /* 0x018 - External write ctrl */
- u8 res2[4];
- u32 ewar1; /* 0x020 - External write address 1 */
- u32 ewarea1; /* 0x024 - External write address extended 1 */
- u32 ewcr1; /* 0x028 - External write ctrl 1 */
- u8 res3[4];
- u32 ewar2; /* 0x030 - External write address 2 */
- u32 ewarea2; /* 0x034 - External write address extended 2 */
- u32 ewcr2; /* 0x038 - External write ctrl 2 */
- u8 res4[4];
- u32 ewar3; /* 0x040 - External write address 3 */
- u32 ewarea3; /* 0x044 - External write address extended 3 */
- u32 ewcr3; /* 0x048 - External write ctrl 3 */
- u8 res5[0xB4];
- u32 srbar0; /* 0x100 - SRAM base address 0 */
- u32 srbarea0; /* 0x104 - SRAM base addr reg ext address 0 */
- u32 srbar1; /* 0x108 - SRAM base address 1 */
- u32 srbarea1; /* 0x10C - SRAM base addr reg ext address 1 */
- u8 res6[0xCF0];
- u32 errinjhi; /* 0xE00 - Error injection mask high */
- u32 errinjlo; /* 0xE04 - Error injection mask low */
- u32 errinjctl; /* 0xE08 - Error injection tag/ecc control */
- u8 res7[0x14];
- u32 captdatahi; /* 0xE20 - Error data high capture */
- u32 captdatalo; /* 0xE24 - Error data low capture */
- u32 captecc; /* 0xE28 - Error syndrome */
- u8 res8[0x14];
- u32 errdet; /* 0xE40 - Error detect */
- u32 errdis; /* 0xE44 - Error disable */
- u32 errinten; /* 0xE48 - Error interrupt enable */
- u32 errattr; /* 0xE4c - Error attribute capture */
- u32 erradrrl; /* 0xE50 - Error address capture low */
- u32 erradrrh; /* 0xE54 - Error address capture high */
- u32 errctl; /* 0xE58 - Error control */
- u8 res9[0x1A4];
-};
-
-struct sram_parameters {
- unsigned int sram_size;
- phys_addr_t sram_offset;
-};
-
-extern int instantiate_cache_sram(struct platform_device *dev,
- struct sram_parameters sram_params);
-extern void remove_cache_sram(struct platform_device *dev);
-
-#endif /* __FSL_85XX_CACHE_CTLR_H__ */
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
deleted file mode 100644
index f6c665dac725..000000000000
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ /dev/null
@@ -1,147 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2009-2010 Freescale Semiconductor, Inc.
- *
- * Simple memory allocator abstraction for QorIQ (P1/P2) based Cache-SRAM
- *
- * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
- *
- * This file is derived from the original work done
- * by Sylvain Munaut for the Bestcomm SRAM allocator.
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-#include <linux/of_platform.h>
-#include <asm/pgtable.h>
-#include <asm/fsl_85xx_cache_sram.h>
-
-#include "fsl_85xx_cache_ctlr.h"
-
-struct mpc85xx_cache_sram *cache_sram;
-
-void *mpc85xx_cache_sram_alloc(unsigned int size,
- phys_addr_t *phys, unsigned int align)
-{
- unsigned long offset;
- unsigned long flags;
-
- if (unlikely(cache_sram == NULL))
- return NULL;
-
- if (!size || (size > cache_sram->size) || (align > cache_sram->size)) {
- pr_err("%s(): size(=%x) or align(=%x) zero or too big\n",
- __func__, size, align);
- return NULL;
- }
-
- if ((align & (align - 1)) || align <= 1) {
- pr_err("%s(): align(=%x) must be power of two and >1\n",
- __func__, align);
- return NULL;
- }
-
- spin_lock_irqsave(&cache_sram->lock, flags);
- offset = rh_alloc_align(cache_sram->rh, size, align, NULL);
- spin_unlock_irqrestore(&cache_sram->lock, flags);
-
- if (IS_ERR_VALUE(offset))
- return NULL;
-
- *phys = cache_sram->base_phys + offset;
-
- return (unsigned char *)cache_sram->base_virt + offset;
-}
-EXPORT_SYMBOL(mpc85xx_cache_sram_alloc);
-
-void mpc85xx_cache_sram_free(void *ptr)
-{
- unsigned long flags;
- BUG_ON(!ptr);
-
- spin_lock_irqsave(&cache_sram->lock, flags);
- rh_free(cache_sram->rh, ptr - cache_sram->base_virt);
- spin_unlock_irqrestore(&cache_sram->lock, flags);
-}
-EXPORT_SYMBOL(mpc85xx_cache_sram_free);
-
-int __init instantiate_cache_sram(struct platform_device *dev,
- struct sram_parameters sram_params)
-{
- int ret = 0;
-
- if (cache_sram) {
- dev_err(&dev->dev, "Already initialized cache-sram\n");
- return -EBUSY;
- }
-
- cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL);
- if (!cache_sram) {
- dev_err(&dev->dev, "Out of memory for cache_sram structure\n");
- return -ENOMEM;
- }
-
- cache_sram->base_phys = sram_params.sram_offset;
- cache_sram->size = sram_params.sram_size;
-
- if (!request_mem_region(cache_sram->base_phys, cache_sram->size,
- "fsl_85xx_cache_sram")) {
- dev_err(&dev->dev, "%pOF: request memory failed\n",
- dev->dev.of_node);
- ret = -ENXIO;
- goto out_free;
- }
-
- cache_sram->base_virt = ioremap_coherent(cache_sram->base_phys,
- cache_sram->size);
- if (!cache_sram->base_virt) {
- dev_err(&dev->dev, "%pOF: ioremap_coherent failed\n",
- dev->dev.of_node);
- ret = -ENOMEM;
- goto out_release;
- }
-
- cache_sram->rh = rh_create(sizeof(unsigned int));
- if (IS_ERR(cache_sram->rh)) {
- dev_err(&dev->dev, "%pOF: Unable to create remote heap\n",
- dev->dev.of_node);
- ret = PTR_ERR(cache_sram->rh);
- goto out_unmap;
- }
-
- rh_attach_region(cache_sram->rh, 0, cache_sram->size);
- spin_lock_init(&cache_sram->lock);
-
- dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n",
- (unsigned long long)cache_sram->base_phys, cache_sram->size);
-
- return 0;
-
-out_unmap:
- iounmap(cache_sram->base_virt);
-
-out_release:
- release_mem_region(cache_sram->base_phys, cache_sram->size);
-
-out_free:
- kfree(cache_sram);
- return ret;
-}
-
-void remove_cache_sram(struct platform_device *dev)
-{
- BUG_ON(!cache_sram);
-
- rh_detach_region(cache_sram->rh, 0, cache_sram->size);
- rh_destroy(cache_sram->rh);
-
- iounmap(cache_sram->base_virt);
- release_mem_region(cache_sram->base_phys, cache_sram->size);
-
- kfree(cache_sram);
- cache_sram = NULL;
-
- dev_info(&dev->dev, "MPC85xx Cache-SRAM driver unloaded\n");
-}
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
deleted file mode 100644
index 2d0af0c517bb..000000000000
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ /dev/null
@@ -1,216 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2009-2010, 2012 Freescale Semiconductor, Inc.
- *
- * QorIQ (P1/P2) L2 controller init for Cache-SRAM instantiation
- *
- * Author: Vivek Mahajan <vivek.mahajan@freescale.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_platform.h>
-#include <asm/io.h>
-
-#include "fsl_85xx_cache_ctlr.h"
-
-static char *sram_size;
-static char *sram_offset;
-struct mpc85xx_l2ctlr __iomem *l2ctlr;
-
-static int get_cache_sram_params(struct sram_parameters *sram_params)
-{
- unsigned long long addr;
- unsigned int size;
-
- if (!sram_size || (kstrtouint(sram_size, 0, &size) < 0))
- return -EINVAL;
-
- if (!sram_offset || (kstrtoull(sram_offset, 0, &addr) < 0))
- return -EINVAL;
-
- sram_params->sram_offset = addr;
- sram_params->sram_size = size;
-
- return 0;
-}
-
-static int __init get_size_from_cmdline(char *str)
-{
- if (!str)
- return 0;
-
- sram_size = str;
- return 1;
-}
-
-static int __init get_offset_from_cmdline(char *str)
-{
- if (!str)
- return 0;
-
- sram_offset = str;
- return 1;
-}
-
-__setup("cache-sram-size=", get_size_from_cmdline);
-__setup("cache-sram-offset=", get_offset_from_cmdline);
-
-static int mpc85xx_l2ctlr_of_probe(struct platform_device *dev)
-{
- long rval;
- unsigned int rem;
- unsigned char ways;
- const unsigned int *prop;
- unsigned int l2cache_size;
- struct sram_parameters sram_params;
-
- if (!dev->dev.of_node) {
- dev_err(&dev->dev, "Device's OF-node is NULL\n");
- return -EINVAL;
- }
-
- prop = of_get_property(dev->dev.of_node, "cache-size", NULL);
- if (!prop) {
- dev_err(&dev->dev, "Missing L2 cache-size\n");
- return -EINVAL;
- }
- l2cache_size = *prop;
-
- if (get_cache_sram_params(&sram_params))
- return 0; /* fall back to L2 cache only */
-
- rem = l2cache_size % sram_params.sram_size;
- ways = LOCK_WAYS_FULL * sram_params.sram_size / l2cache_size;
- if (rem || (ways & (ways - 1))) {
- dev_err(&dev->dev, "Illegal cache-sram-size in command line\n");
- return -EINVAL;
- }
-
- l2ctlr = of_iomap(dev->dev.of_node, 0);
- if (!l2ctlr) {
- dev_err(&dev->dev, "Can't map L2 controller\n");
- return -EINVAL;
- }
-
- /*
- * Write bits[0-17] to srbar0
- */
- out_be32(&l2ctlr->srbar0,
- lower_32_bits(sram_params.sram_offset) & L2SRAM_BAR_MSK_LO18);
-
- /*
- * Write bits[18-21] to srbare0
- */
-#ifdef CONFIG_PHYS_64BIT
- out_be32(&l2ctlr->srbarea0,
- upper_32_bits(sram_params.sram_offset) & L2SRAM_BARE_MSK_HI4);
-#endif
-
- clrsetbits_be32(&l2ctlr->ctl, L2CR_L2E, L2CR_L2FI);
-
- switch (ways) {
- case LOCK_WAYS_EIGHTH:
- setbits32(&l2ctlr->ctl,
- L2CR_L2E | L2CR_L2FI | L2CR_SRAM_EIGHTH);
- break;
-
- case LOCK_WAYS_TWO_EIGHTH:
- setbits32(&l2ctlr->ctl,
- L2CR_L2E | L2CR_L2FI | L2CR_SRAM_QUART);
- break;
-
- case LOCK_WAYS_HALF:
- setbits32(&l2ctlr->ctl,
- L2CR_L2E | L2CR_L2FI | L2CR_SRAM_HALF);
- break;
-
- case LOCK_WAYS_FULL:
- default:
- setbits32(&l2ctlr->ctl,
- L2CR_L2E | L2CR_L2FI | L2CR_SRAM_FULL);
- break;
- }
- eieio();
-
- rval = instantiate_cache_sram(dev, sram_params);
- if (rval < 0) {
- dev_err(&dev->dev, "Can't instantiate Cache-SRAM\n");
- iounmap(l2ctlr);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int mpc85xx_l2ctlr_of_remove(struct platform_device *dev)
-{
- BUG_ON(!l2ctlr);
-
- iounmap(l2ctlr);
- remove_cache_sram(dev);
- dev_info(&dev->dev, "MPC85xx L2 controller unloaded\n");
-
- return 0;
-}
-
-static const struct of_device_id mpc85xx_l2ctlr_of_match[] = {
- {
- .compatible = "fsl,p2020-l2-cache-controller",
- },
- {
- .compatible = "fsl,p2010-l2-cache-controller",
- },
- {
- .compatible = "fsl,p1020-l2-cache-controller",
- },
- {
- .compatible = "fsl,p1011-l2-cache-controller",
- },
- {
- .compatible = "fsl,p1013-l2-cache-controller",
- },
- {
- .compatible = "fsl,p1022-l2-cache-controller",
- },
- {
- .compatible = "fsl,mpc8548-l2-cache-controller",
- },
- { .compatible = "fsl,mpc8544-l2-cache-controller",},
- { .compatible = "fsl,mpc8572-l2-cache-controller",},
- { .compatible = "fsl,mpc8536-l2-cache-controller",},
- { .compatible = "fsl,p1021-l2-cache-controller",},
- { .compatible = "fsl,p1012-l2-cache-controller",},
- { .compatible = "fsl,p1025-l2-cache-controller",},
- { .compatible = "fsl,p1016-l2-cache-controller",},
- { .compatible = "fsl,p1024-l2-cache-controller",},
- { .compatible = "fsl,p1015-l2-cache-controller",},
- { .compatible = "fsl,p1010-l2-cache-controller",},
- { .compatible = "fsl,bsc9131-l2-cache-controller",},
- {},
-};
-
-static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = {
- .driver = {
- .name = "fsl-l2ctlr",
- .of_match_table = mpc85xx_l2ctlr_of_match,
- },
- .probe = mpc85xx_l2ctlr_of_probe,
- .remove = mpc85xx_l2ctlr_of_remove,
-};
-
-static __init int mpc85xx_l2ctlr_of_init(void)
-{
- return platform_driver_register(&mpc85xx_l2ctlr_of_platform_driver);
-}
-
-static void __exit mpc85xx_l2ctlr_of_exit(void)
-{
- platform_driver_unregister(&mpc85xx_l2ctlr_of_platform_driver);
-}
-
-subsys_initcall(mpc85xx_l2ctlr_of_init);
-module_exit(mpc85xx_l2ctlr_of_exit);
-
-MODULE_DESCRIPTION("Freescale MPC85xx L2 controller init");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/powerpc/sysdev/fsl_gtm.c b/arch/powerpc/sysdev/fsl_gtm.c
index 8963eaffb1b7..39186ad6b3c3 100644
--- a/arch/powerpc/sysdev/fsl_gtm.c
+++ b/arch/powerpc/sysdev/fsl_gtm.c
@@ -86,7 +86,7 @@ static LIST_HEAD(gtms);
*/
struct gtm_timer *gtm_get_timer16(void)
{
- struct gtm *gtm = NULL;
+ struct gtm *gtm;
int i;
list_for_each_entry(gtm, &gtms, list_node) {
@@ -103,7 +103,7 @@ struct gtm_timer *gtm_get_timer16(void)
spin_unlock_irq(&gtm->lock);
}
- if (gtm)
+ if (!list_empty(&gtms))
return ERR_PTR(-EBUSY);
return ERR_PTR(-ENODEV);
}
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 1985e067e952..217cea150987 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -18,13 +18,14 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/mod_devicetable.h>
#include <linux/syscore_ops.h>
-#include <asm/prom.h>
#include <asm/fsl_lbc.h>
static DEFINE_SPINLOCK(fsl_lbc_lock);
@@ -37,7 +38,7 @@ EXPORT_SYMBOL(fsl_lbc_ctrl_dev);
*
* This function converts a base address of lbc into the right format for the
* BR register. If the SOC has eLBC then it returns 32bit physical address
- * else it convers a 34bit local bus physical address to correct format of
+ * else it converts a 34bit local bus physical address to correct format of
* 32bit address for BR register (Example: MPC8641).
*/
u32 fsl_lbc_addr(phys_addr_t addr_base)
diff --git a/arch/powerpc/sysdev/fsl_mpic_err.c b/arch/powerpc/sysdev/fsl_mpic_err.c
index 13583bbc3e8e..df06bb6b838f 100644
--- a/arch/powerpc/sysdev/fsl_mpic_err.c
+++ b/arch/powerpc/sysdev/fsl_mpic_err.c
@@ -8,6 +8,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -57,7 +58,7 @@ static struct irq_chip fsl_mpic_err_chip = {
.irq_unmask = fsl_mpic_unmask_err,
};
-int mpic_setup_error_int(struct mpic *mpic, int intvec)
+int __init mpic_setup_error_int(struct mpic *mpic, int intvec)
{
int i;
@@ -98,7 +99,6 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data)
struct mpic *mpic = (struct mpic *) data;
u32 eisr, eimr;
int errint;
- unsigned int cascade_irq;
eisr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EISR);
eimr = mpic_fsl_err_read(mpic->err_regs, MPIC_ERR_INT_EIMR);
@@ -107,13 +107,11 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data)
return IRQ_NONE;
while (eisr) {
+ int ret;
errint = __builtin_clz(eisr);
- cascade_irq = irq_linear_revmap(mpic->irqhost,
- mpic->err_int_vecs[errint]);
- WARN_ON(!cascade_irq);
- if (cascade_irq) {
- generic_handle_irq(cascade_irq);
- } else {
+ ret = generic_handle_domain_irq(mpic->irqhost,
+ mpic->err_int_vecs[errint]);
+ if (WARN_ON(ret)) {
eimr |= 1 << (31 - errint);
mpic_fsl_err_write(mpic->err_regs, eimr);
}
@@ -123,7 +121,7 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data)
return IRQ_HANDLED;
}
-void mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum)
+void __init mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum)
{
unsigned int virq;
int ret;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 808e7118abfc..73c2d70706c0 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -11,11 +11,13 @@
#include <linux/msi.h>
#include <linux/pci.h>
#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/seq_file.h>
#include <sysdev/fsl_soc.h>
-#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/mpic.h>
@@ -125,17 +127,13 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
struct fsl_msi *msi_data;
irq_hw_number_t hwirq;
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->irq)
- continue;
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
hwirq = virq_to_hw(entry->irq);
msi_data = irq_get_chip_data(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
-
- return;
}
static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
@@ -211,11 +209,13 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
dev_err(&pdev->dev,
"node %pOF has an invalid fsl,msi phandle %u\n",
hose->dn, np->phandle);
+ of_node_put(np);
return -EINVAL;
}
+ of_node_put(np);
}
- for_each_pci_msi_entry(entry, pdev) {
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
/*
* Loop over all the MSI devices until we find one that has an
* available interrupt.
@@ -266,7 +266,6 @@ out_free:
static irqreturn_t fsl_msi_cascade(int irq, void *data)
{
- unsigned int cascade_irq;
struct fsl_msi *msi_data;
int msir_index = -1;
u32 msir_value = 0;
@@ -279,9 +278,6 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
msir_index = cascade_data->index;
- if (msir_index >= NR_MSI_REG_MAX)
- cascade_irq = 0;
-
switch (msi_data->feature & FSL_PIC_IP_MASK) {
case FSL_PIC_IP_MPIC:
msir_value = fsl_msi_read(msi_data->msi_regs,
@@ -305,15 +301,15 @@ static irqreturn_t fsl_msi_cascade(int irq, void *data)
}
while (msir_value) {
+ int err;
intr_index = ffs(msir_value) - 1;
- cascade_irq = irq_linear_revmap(msi_data->irqhost,
+ err = generic_handle_domain_irq(msi_data->irqhost,
msi_hwirq(msi_data, msir_index,
intr_index + have_shift));
- if (cascade_irq) {
- generic_handle_irq(cascade_irq);
+ if (!err)
ret = IRQ_HANDLED;
- }
+
have_shift += intr_index + 1;
msir_value = msir_value >> (intr_index + 1);
}
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4a8874bc1057..974d3db6faab 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -22,6 +22,8 @@
#include <linux/interrupt.h>
#include <linux/memblock.h>
#include <linux/log2.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/suspend.h>
@@ -29,7 +31,6 @@
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/ppc-pci.h>
#include <asm/machdep.h>
@@ -37,6 +38,7 @@
#include <asm/disassemble.h>
#include <asm/ppc-opcode.h>
#include <asm/swiotlb.h>
+#include <asm/setup.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
@@ -55,7 +57,7 @@ static void quirk_fsl_pcie_early(struct pci_dev *dev)
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
return;
- dev->class = PCI_CLASS_BRIDGE_PCI << 8;
+ dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
fsl_pcie_bus_fixup = 1;
return;
}
@@ -179,6 +181,7 @@ static int setup_one_atmu(struct ccsr_pci __iomem *pci,
static bool is_kdump(void)
{
struct device_node *node;
+ bool ret;
node = of_find_node_by_type(NULL, "memory");
if (!node) {
@@ -186,7 +189,10 @@ static bool is_kdump(void)
return false;
}
- return of_property_read_bool(node, "linux,usable-memory");
+ ret = of_property_read_bool(node, "linux,usable-memory");
+ of_node_put(node);
+
+ return ret;
}
/* atmu setup for fsl pci/pcie controller */
@@ -218,7 +224,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
* windows have implemented the default target value as 0xf
* for CCSR space.In all Freescale legacy devices the target
* of 0xf is reserved for local memory space. 9132 Rev1.0
- * now has local mempry space mapped to target 0x0 instead of
+ * now has local memory space mapped to target 0x0 instead of
* 0xf. Hence adding a workaround to remove the target 0xf
* defined for memory space from Inbound window attributes.
*/
@@ -455,7 +461,7 @@ static void setup_pci_atmu(struct pci_controller *hose)
}
}
-static void __init setup_pci_cmd(struct pci_controller *hose)
+static void setup_pci_cmd(struct pci_controller *hose)
{
u16 cmd;
int cap_x;
@@ -520,6 +526,7 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
struct resource rsrc;
const int *bus_range;
u8 hdr_type, progif;
+ u32 class_code;
struct device_node *dev;
struct ccsr_pci __iomem *pci;
u16 temp;
@@ -593,6 +600,13 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS;
if (fsl_pcie_check_link(hose))
hose->indirect_type |= PPC_INDIRECT_TYPE_NO_PCIE_LINK;
+ /* Fix Class Code to PCI_CLASS_BRIDGE_PCI_NORMAL for pre-3.0 controller */
+ if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0) {
+ early_read_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, &class_code);
+ class_code &= 0xff;
+ class_code |= PCI_CLASS_BRIDGE_PCI_NORMAL << 8;
+ early_write_config_dword(hose, 0, 0, PCIE_FSL_CSR_CLASSCODE, class_code);
+ }
} else {
/*
* Set PBFR(PCI Bus Function Register)[10] = 1 to
@@ -929,7 +943,7 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose)
return 0;
}
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
{
unsigned int rd, ra, rb, d;
@@ -1066,13 +1080,13 @@ int fsl_pci_mcheck_exception(struct pt_regs *regs)
if (is_in_pci_mem_space(addr)) {
if (user_mode(regs))
- ret = probe_user_read(&inst, (void __user *)regs->nip,
- sizeof(inst));
+ ret = copy_from_user_nofault(&inst,
+ (void __user *)regs->nip, sizeof(inst));
else
- ret = probe_kernel_address((void *)regs->nip, inst);
+ ret = get_kernel_nofault(inst, (void *)regs->nip);
if (!ret && mcheck_handle_load(regs, inst)) {
- regs->nip += 4;
+ regs_add_return_ip(regs, 4);
return 1;
}
}
@@ -1106,7 +1120,7 @@ static const struct of_device_id pci_ids[] = {
struct device_node *fsl_pci_primary;
-void fsl_pci_assign_primary(void)
+void __init fsl_pci_assign_primary(void)
{
struct device_node *np;
@@ -1132,7 +1146,6 @@ void fsl_pci_assign_primary(void)
for_each_matching_node(np, pci_ids) {
if (of_device_is_available(np)) {
fsl_pci_primary = np;
- of_node_put(np);
return;
}
}
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index 1d7a41205695..093a875d7d1e 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -18,6 +18,7 @@ struct platform_device;
#define PCIE_LTSSM 0x0404 /* PCIE Link Training and Status */
#define PCIE_LTSSM_L0 0x16 /* L0 state */
+#define PCIE_FSL_CSR_CLASSCODE 0x474 /* FSL GPEX CSR */
#define PCIE_IP_REV_2_2 0x02080202 /* PCIE IP block version Rev2.2 */
#define PCIE_IP_REV_3_0 0x02080300 /* PCIE IP block version Rev3.0 */
#define PIWAR_EN 0x80000000 /* Enable */
@@ -120,7 +121,7 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose);
extern struct device_node *fsl_pci_primary;
#ifdef CONFIG_PCI
-void fsl_pci_assign_primary(void);
+void __init fsl_pci_assign_primary(void);
#else
static inline void fsl_pci_assign_primary(void) {}
#endif
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index 07c164f7f8cf..c8f044d62fe2 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -69,10 +69,10 @@
static DEFINE_SPINLOCK(fsl_rio_config_lock);
-#define __fsl_read_rio_config(x, addr, err, op) \
+#define ___fsl_read_rio_config(x, addr, err, op, barrier) \
__asm__ __volatile__( \
"1: "op" %1,0(%2)\n" \
- " eieio\n" \
+ " "barrier"\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: li %1,-1\n" \
@@ -83,6 +83,14 @@ static DEFINE_SPINLOCK(fsl_rio_config_lock);
: "=r" (err), "=r" (x) \
: "b" (addr), "i" (-EFAULT), "0" (err))
+#ifdef CONFIG_BOOKE
+#define __fsl_read_rio_config(x, addr, err, op) \
+ ___fsl_read_rio_config(x, addr, err, op, "mbar")
+#else
+#define __fsl_read_rio_config(x, addr, err, op) \
+ ___fsl_read_rio_config(x, addr, err, op, "eieio")
+#endif
+
void __iomem *rio_regs_win;
void __iomem *rmu_regs_win;
resource_size_t rio_law_start;
@@ -90,7 +98,7 @@ resource_size_t rio_law_start;
struct fsl_rio_dbell *dbell;
struct fsl_rio_pw *pw;
-#ifdef CONFIG_E500
+#ifdef CONFIG_PPC_E500
int fsl_rio_mcheck_exception(struct pt_regs *regs)
{
const struct exception_table_entry *entry;
@@ -108,8 +116,8 @@ int fsl_rio_mcheck_exception(struct pt_regs *regs)
__func__);
out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR),
0);
- regs->msr |= MSR_RI;
- regs->nip = extable_fixup(entry);
+ regs_set_recoverable(regs);
+ regs_set_return_ip(regs, extable_fixup(entry));
return 1;
}
}
@@ -505,8 +513,10 @@ int fsl_rio_setup(struct platform_device *dev)
if (rc) {
dev_err(&dev->dev, "Can't get %pOF property 'reg'\n",
rmu_node);
+ of_node_put(rmu_node);
goto err_rmu;
}
+ of_node_put(rmu_node);
rmu_regs_win = ioremap(rmu_regs.start, resource_size(&rmu_regs));
if (!rmu_regs_win) {
dev_err(&dev->dev, "Unable to map rmu register window\n");
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index 90ad16161604..78118c188993 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -31,7 +31,6 @@
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/time.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <sysdev/fsl_soc.h>
#include <mm/mmu_decl.h>
diff --git a/arch/powerpc/sysdev/ge/ge_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c
index 02553a8ce191..a6c424680c37 100644
--- a/arch/powerpc/sysdev/ge/ge_pic.c
+++ b/arch/powerpc/sysdev/ge/ge_pic.c
@@ -14,12 +14,14 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/spinlock.h>
#include <asm/byteorder.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/irq.h>
#include "ge_pic.h"
@@ -150,7 +152,7 @@ static struct irq_chip gef_pic_chip = {
};
-/* When an interrupt is being configured, this call allows some flexibilty
+/* When an interrupt is being configured, this call allows some flexibility
* in deciding which irq_chip structure is used
*/
static int gef_pic_host_map(struct irq_domain *h, unsigned int virq,
diff --git a/arch/powerpc/sysdev/grackle.c b/arch/powerpc/sysdev/grackle.c
index aaba0b809032..fd2f94a884f0 100644
--- a/arch/powerpc/sysdev/grackle.c
+++ b/arch/powerpc/sysdev/grackle.c
@@ -9,9 +9,9 @@
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/of.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/grackle.h>
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index c1d76c344351..06e391485da7 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -6,11 +6,11 @@
#include <linux/ioport.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/i8259.h>
-#include <asm/prom.h>
static volatile void __iomem *pci_intack; /* RO, gives us the irq vector */
@@ -208,7 +208,7 @@ static const struct irq_domain_ops i8259_host_ops = {
.xlate = i8259_host_xlate,
};
-struct irq_domain *i8259_get_host(void)
+struct irq_domain *__init i8259_get_host(void)
{
return i8259_host;
}
@@ -260,7 +260,8 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
raw_spin_unlock_irqrestore(&i8259_lock, flags);
/* create a legacy host */
- i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL);
+ i8259_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
+ &i8259_host_ops, NULL);
if (i8259_host == NULL) {
printk(KERN_ERR "i8259: failed to allocate irq host !\n");
return;
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c
index 09b36617425e..1aacb403a010 100644
--- a/arch/powerpc/sysdev/indirect_pci.c
+++ b/arch/powerpc/sysdev/indirect_pci.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm/machdep.h>
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 7638a50a7c38..5f69e2d50f26 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -18,9 +18,10 @@
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/fsl_devices.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
#include <asm/irq.h>
#include <asm/io.h>
-#include <asm/prom.h>
#include <asm/ipic.h>
#include "ipic.h"
@@ -767,7 +768,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
return ipic;
}
-void ipic_set_default_priority(void)
+void __init ipic_set_default_priority(void)
{
ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT);
ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT);
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 628f9b759c84..eb48210ef98e 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -10,12 +10,12 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/machdep.h>
#include <asm/nvram.h>
-#include <asm/prom.h>
static void __iomem *mmio_nvram_start;
static long mmio_nvram_len;
diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c
index 834a6d7fbd88..c5bf7e1b3780 100644
--- a/arch/powerpc/sysdev/mpc5xxx_clocks.c
+++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c
@@ -1,31 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
-/**
- * mpc5xxx_get_bus_frequency - Find the bus frequency for a device
- * @node: device node
- *
- * Returns bus frequency (IPS on MPC512x, IPB on MPC52xx),
- * or 0 if the bus frequency cannot be found.
- */
#include <linux/kernel.h>
-#include <linux/of_platform.h>
#include <linux/export.h>
+#include <linux/property.h>
+
#include <asm/mpc5xxx.h>
-unsigned long mpc5xxx_get_bus_frequency(struct device_node *node)
+/**
+ * mpc5xxx_fwnode_get_bus_frequency - Find the bus frequency for a firmware node
+ * @fwnode: firmware node
+ *
+ * Returns bus frequency (IPS on MPC512x, IPB on MPC52xx),
+ * or 0 if the bus frequency cannot be found.
+ */
+unsigned long mpc5xxx_fwnode_get_bus_frequency(struct fwnode_handle *fwnode)
{
- const unsigned int *p_bus_freq = NULL;
+ struct fwnode_handle *parent;
+ u32 bus_freq;
+ int ret;
- of_node_get(node);
- while (node) {
- p_bus_freq = of_get_property(node, "bus-frequency", NULL);
- if (p_bus_freq)
- break;
+ ret = fwnode_property_read_u32(fwnode, "bus-frequency", &bus_freq);
+ if (!ret)
+ return bus_freq;
- node = of_get_next_parent(node);
+ fwnode_for_each_parent_node(fwnode, parent) {
+ ret = fwnode_property_read_u32(parent, "bus-frequency", &bus_freq);
+ if (!ret)
+ return bus_freq;
}
- of_node_put(node);
- return p_bus_freq ? *p_bus_freq : 0;
+ return 0;
}
-EXPORT_SYMBOL(mpc5xxx_get_bus_frequency);
+EXPORT_SYMBOL(mpc5xxx_fwnode_get_bus_frequency);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index a3a72b780e67..9a9381f102d6 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -29,11 +29,13 @@
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/ratelimit.h>
+#include <linux/pgtable.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/io.h>
-#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/machdep.h>
#include <asm/mpic.h>
@@ -602,7 +604,7 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
/* Find an mpic associated with a given linux interrupt */
static struct mpic *mpic_find(unsigned int irq)
{
- if (irq < NUM_ISA_INTERRUPTS)
+ if (irq < NR_IRQS_LEGACY)
return NULL;
return irq_get_chip_data(irq);
@@ -1323,8 +1325,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
psrc = of_get_property(mpic->node, "protected-sources", &psize);
if (psrc) {
/* Allocate a bitmap with one bit per interrupt */
- unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1);
- mpic->protected = kcalloc(mapsize, sizeof(long), GFP_KERNEL);
+ mpic->protected = bitmap_zalloc(intvec_top + 1, GFP_KERNEL);
BUG_ON(mpic->protected == NULL);
for (i = 0; i < psize/sizeof(u32); i++) {
if (psrc[i] > intvec_top)
@@ -1405,10 +1406,8 @@ struct mpic * __init mpic_alloc(struct device_node *node,
* with device trees generated by older versions of QEMU.
* fsl_version will be zero if MPIC_FSL is not set.
*/
- if (fsl_version < 0x400 && (flags & MPIC_ENABLE_COREINT)) {
- WARN_ON(ppc_md.get_irq != mpic_get_coreint_irq);
+ if (fsl_version < 0x400 && (flags & MPIC_ENABLE_COREINT))
ppc_md.get_irq = mpic_get_irq;
- }
/* Reset */
@@ -1840,7 +1839,7 @@ unsigned int mpic_get_mcirq(void)
}
#ifdef CONFIG_SMP
-void mpic_request_ipis(void)
+void __init mpic_request_ipis(void)
{
struct mpic *mpic = mpic_primary;
int i;
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
index 73a31a429d46..bb460ff57a06 100644
--- a/arch/powerpc/sysdev/mpic.h
+++ b/arch/powerpc/sysdev/mpic.h
@@ -8,8 +8,8 @@
#ifdef CONFIG_PCI_MSI
extern void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq);
-extern int mpic_msi_init_allocator(struct mpic *mpic);
-extern int mpic_u3msi_init(struct mpic *mpic);
+int __init mpic_msi_init_allocator(struct mpic *mpic);
+int __init mpic_u3msi_init(struct mpic *mpic);
#else
static inline void mpic_msi_reserve_hwirq(struct mpic *mpic,
irq_hw_number_t hwirq)
@@ -24,7 +24,7 @@ static inline int mpic_u3msi_init(struct mpic *mpic)
#endif
#if defined(CONFIG_PCI_MSI) && defined(CONFIG_PPC_PASEMI)
-int mpic_pasemi_msi_init(struct mpic *mpic);
+int __init mpic_pasemi_msi_init(struct mpic *mpic);
#else
static inline int mpic_pasemi_msi_init(struct mpic *mpic) { return -1; }
#endif
@@ -37,8 +37,8 @@ extern void mpic_reset_core(int cpu);
#ifdef CONFIG_FSL_SOC
extern int mpic_map_error_int(struct mpic *mpic, unsigned int virq, irq_hw_number_t hw);
-extern void mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum);
-extern int mpic_setup_error_int(struct mpic *mpic, int intvec);
+void __init mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum);
+int __init mpic_setup_error_int(struct mpic *mpic, int intvec);
#else
static inline int mpic_map_error_int(struct mpic *mpic, unsigned int virq, irq_hw_number_t hw)
{
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index f6b253e2be40..a439e33eae06 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -7,12 +7,13 @@
*/
#include <linux/list.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
-#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/mpic_msgr.h>
@@ -99,7 +100,7 @@ void mpic_msgr_disable(struct mpic_msgr *msgr)
EXPORT_SYMBOL_GPL(mpic_msgr_disable);
/* The following three functions are used to compute the order and number of
- * the message register blocks. They are clearly very inefficent. However,
+ * the message register blocks. They are clearly very inefficient. However,
* they are called *only* a few times during device initialization.
*/
static unsigned int mpic_msgr_number_of_blocks(void)
@@ -120,6 +121,7 @@ static unsigned int mpic_msgr_number_of_blocks(void)
count += 1;
}
+ of_node_put(aliases);
}
return count;
@@ -143,12 +145,18 @@ static int mpic_msgr_block_number(struct device_node *node)
for (index = 0; index < number_of_blocks; ++index) {
struct property *prop;
+ struct device_node *tn;
snprintf(buf, sizeof(buf), "mpic-msgr-block%d", index);
prop = of_find_property(aliases, buf, NULL);
- if (node == of_find_node_by_path(prop->value))
+ tn = of_find_node_by_path(prop->value);
+ if (node == tn) {
+ of_node_put(tn);
break;
+ }
+ of_node_put(tn);
}
+ of_node_put(aliases);
return index == number_of_blocks ? -1 : index;
}
@@ -191,7 +199,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
/* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc);
- msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
+ msgr_block_addr = devm_ioremap(&dev->dev, rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT;
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index 4695c04320ae..34246c8e01c2 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -4,10 +4,11 @@
*/
#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
#include <linux/bitmap.h>
#include <linux/msi.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
@@ -24,7 +25,7 @@ void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq)
}
#ifdef CONFIG_MPIC_U3_HT_IRQS
-static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
+static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
{
irq_hw_number_t hwirq;
const struct irq_domain_ops *ops = mpic->irqhost->ops;
@@ -37,7 +38,7 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
/* Reserve source numbers we know are reserved in the HW.
*
* This is a bit of a mix of U3 and U4 reserves but that's going
- * to work fine, we have plenty enugh numbers left so let's just
+ * to work fine, we have plenty enough numbers left so let's just
* mark anything we don't like reserved.
*/
for (i = 0; i < 8; i++)
@@ -68,13 +69,13 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
return 0;
}
#else
-static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
+static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
{
return -1;
}
#endif
-int mpic_msi_init_allocator(struct mpic *mpic)
+int __init mpic_msi_init_allocator(struct mpic *mpic)
{
int rc;
diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c
index a42a20280035..b2f0a73e8f93 100644
--- a/arch/powerpc/sysdev/mpic_timer.c
+++ b/arch/powerpc/sysdev/mpic_timer.c
@@ -255,7 +255,7 @@ EXPORT_SYMBOL(mpic_start_timer);
/**
* mpic_stop_timer - stop hardware timer
- * @handle: the timer to be stoped
+ * @handle: the timer to be stopped
*
* The timer periodically generates an interrupt. Unless user stops the timer.
*/
@@ -384,7 +384,7 @@ struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
}
EXPORT_SYMBOL(mpic_request_timer);
-static int timer_group_get_freq(struct device_node *np,
+static int __init timer_group_get_freq(struct device_node *np,
struct timer_group_priv *priv)
{
u32 div;
@@ -411,7 +411,7 @@ static int timer_group_get_freq(struct device_node *np,
return 0;
}
-static int timer_group_get_irq(struct device_node *np,
+static int __init timer_group_get_irq(struct device_node *np,
struct timer_group_priv *priv)
{
const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
@@ -459,7 +459,7 @@ static int timer_group_get_irq(struct device_node *np,
return 0;
}
-static void timer_group_init(struct device_node *np)
+static void __init timer_group_init(struct device_node *np)
{
struct timer_group_priv *priv;
unsigned int i = 0;
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 3861023d378a..1d8cfdfdf115 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -5,9 +5,9 @@
*/
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <asm/mpic.h>
-#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/msi_bitmap.h>
@@ -78,7 +78,7 @@ static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq)
/* U4 PCIe MSIs need to write to the special register in
* the bridge that generates interrupts. There should be
- * theorically a register at 0xf8005000 where you just write
+ * theoretically a register at 0xf8005000 where you just write
* the MSI number and that triggers the right interrupt, but
* unfortunately, this is busted in HW, the bridge endian swaps
* the value and hits the wrong nibble in the register.
@@ -104,17 +104,12 @@ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
struct msi_desc *entry;
irq_hw_number_t hwirq;
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->irq)
- continue;
-
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
}
-
- return;
}
static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -136,7 +131,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return -ENXIO;
}
- for_each_pci_msi_entry(entry, pdev) {
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1);
if (hwirq < 0) {
pr_debug("u3msi: failed allocating hwirq\n");
@@ -174,7 +169,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return 0;
}
-int mpic_u3msi_init(struct mpic *mpic)
+int __init mpic_u3msi_init(struct mpic *mpic)
{
int rc;
struct pci_controller *phb;
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index fdd3e17150fc..0b6e37f3ffb8 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -8,6 +8,7 @@
#include <linux/kmemleak.h>
#include <linux/bitmap.h>
#include <linux/memblock.h>
+#include <linux/of.h>
#include <asm/msi_bitmap.h>
#include <asm/setup.h>
diff --git a/arch/powerpc/sysdev/of_rtc.c b/arch/powerpc/sysdev/of_rtc.c
index 1f408d34a6a7..420f949b7485 100644
--- a/arch/powerpc/sysdev/of_rtc.c
+++ b/arch/powerpc/sysdev/of_rtc.c
@@ -11,6 +11,8 @@
#include <linux/of_platform.h>
#include <linux/slab.h>
+#include <asm/prom.h>
+
static __initdata struct {
const char *compatible;
char *plat_name;
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c
index 9c8744e09a9c..9dabb50c36eb 100644
--- a/arch/powerpc/sysdev/pmi.c
+++ b/arch/powerpc/sysdev/pmi.c
@@ -17,12 +17,13 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/workqueue.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#include <asm/pmi.h>
-#include <asm/prom.h>
struct pmi_data {
struct list_head handler;
diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c
index af0f9beddca9..47cc87bd6a33 100644
--- a/arch/powerpc/sysdev/rtc_cmos_setup.c
+++ b/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -14,8 +14,8 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mc146818rtc.h>
+#include <linux/of_address.h>
-#include <asm/prom.h>
static int __init add_rtc(void)
{
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index 0baec82510b9..30051397292f 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -16,13 +16,14 @@
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/of_net.h>
#include <asm/tsi108.h>
#include <linux/atomic.h>
#include <asm/io.h>
#include <asm/irq.h>
-#include <asm/prom.h>
#include <mm/mmu_decl.h>
#undef DEBUG
@@ -51,13 +52,12 @@ phys_addr_t get_csrbase(void)
}
return tsi108_csr_base;
}
+EXPORT_SYMBOL(get_csrbase);
u32 get_vir_csrbase(void)
{
return (u32) (ioremap(get_csrbase(), 0x10000));
}
-
-EXPORT_SYMBOL(get_csrbase);
EXPORT_SYMBOL(get_vir_csrbase);
static int __init tsi108_eth_of_init(void)
@@ -73,7 +73,6 @@ static int __init tsi108_eth_of_init(void)
struct device_node *phy, *mdio;
hw_info tsi_eth_data;
const unsigned int *phy_id;
- const void *mac_addr;
const phandle *ph;
memset(r, 0, sizeof(r));
@@ -101,9 +100,7 @@ static int __init tsi108_eth_of_init(void)
goto err;
}
- mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
- ether_addr_copy(tsi_eth_data.mac_addr, mac_addr);
+ of_get_mac_address(np, tsi_eth_data.mac_addr);
ph = of_get_property(np, "mdio-handle", NULL);
mdio = of_find_node_by_phandle(*ph);
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 49f9541954f8..5af4c35ff584 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -12,7 +12,9 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/interrupt.h>
+#include <linux/of_address.h>
#include <asm/byteorder.h>
#include <asm/io.h>
@@ -23,7 +25,6 @@
#include <asm/tsi108.h>
#include <asm/tsi108_pci.h>
#include <asm/tsi108_irq.h>
-#include <asm/prom.h>
#undef DEBUG
#ifdef DEBUG
@@ -257,7 +258,7 @@ static void tsi108_pci_int_unmask(u_int irq)
mb();
}
-static void init_pci_source(void)
+static void __init init_pci_source(void)
{
tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL,
0x0000ff00);
@@ -404,7 +405,8 @@ void __init tsi108_pci_int_init(struct device_node *node)
{
DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
- pci_irq_host = irq_domain_add_legacy_isa(node, &pci_irq_domain_ops, NULL);
+ pci_irq_host = irq_domain_add_legacy(node, NR_IRQS_LEGACY, 0, 0,
+ &pci_irq_domain_ops, NULL);
if (pci_irq_host == NULL) {
printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n");
return;
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
index d38bbeed219b..5020044400dc 100644
--- a/arch/powerpc/sysdev/udbg_memcons.c
+++ b/arch/powerpc/sysdev/udbg_memcons.c
@@ -92,7 +92,7 @@ int memcons_getc(void)
return c;
}
-void udbg_init_memcons(void)
+void __init udbg_init_memcons(void)
{
udbg_putc = memcons_putc;
udbg_getc = memcons_getc;
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig
index 304614c920aa..063d9195891f 100644
--- a/arch/powerpc/sysdev/xics/Kconfig
+++ b/arch/powerpc/sysdev/xics/Kconfig
@@ -12,3 +12,6 @@ config PPC_ICP_HV
config PPC_ICS_RTAS
def_bool n
+
+config PPC_ICS_NATIVE
+ def_bool n
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile
index ba1e3117b1c0..747063927c6c 100644
--- a/arch/powerpc/sysdev/xics/Makefile
+++ b/arch/powerpc/sysdev/xics/Makefile
@@ -4,4 +4,5 @@ obj-y += xics-common.o
obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o
obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o
obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o
+obj-$(CONFIG_PPC_ICS_NATIVE) += ics-native.o
obj-$(CONFIG_PPC_POWERNV) += ics-opal.o icp-opal.o
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index ad8117148ea3..cf8db19a4f7d 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/cpu.h>
#include <linux/of.h>
@@ -161,7 +162,7 @@ static const struct icp_ops icp_hv_ops = {
#endif
};
-int icp_hv_init(void)
+int __init icp_hv_init(void)
{
struct device_node *np;
@@ -174,6 +175,7 @@ int icp_hv_init(void)
icp_ops = &icp_hv_ops;
+ of_node_put(np);
return 0;
}
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 7d13d2ef5a90..edc17b6b1cc2 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -6,15 +6,16 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/spinlock.h>
#include <linux/module.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 68fd2540b093..4dae624b9f2f 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -7,6 +7,7 @@
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/cpu.h>
#include <linux/of.h>
@@ -183,7 +184,7 @@ static const struct icp_ops icp_opal_ops = {
#endif
};
-int icp_opal_init(void)
+int __init icp_opal_init(void)
{
struct device_node *np;
@@ -195,6 +196,7 @@ int icp_opal_init(void)
printk("XICS: Using OPAL ICP fallbacks\n");
+ of_node_put(np);
return 0;
}
diff --git a/arch/powerpc/sysdev/xics/ics-native.c b/arch/powerpc/sysdev/xics/ics-native.c
new file mode 100644
index 000000000000..112c8a1e8159
--- /dev/null
+++ b/arch/powerpc/sysdev/xics/ics-native.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ICS backend for OPAL managed interrupts.
+ *
+ * Copyright 2011 IBM Corp.
+ */
+
+//#define DEBUG
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+#include <linux/msi.h>
+#include <linux/list.h>
+
+#include <asm/smp.h>
+#include <asm/machdep.h>
+#include <asm/irq.h>
+#include <asm/errno.h>
+#include <asm/xics.h>
+#include <asm/opal.h>
+#include <asm/firmware.h>
+
+struct ics_native {
+ struct ics ics;
+ struct device_node *node;
+ void __iomem *base;
+ u32 ibase;
+ u32 icount;
+};
+#define to_ics_native(_ics) container_of(_ics, struct ics_native, ics)
+
+static void __iomem *ics_native_xive(struct ics_native *in, unsigned int vec)
+{
+ return in->base + 0x800 + ((vec - in->ibase) << 2);
+}
+
+static void ics_native_unmask_irq(struct irq_data *d)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+ unsigned int server;
+
+ pr_devel("ics-native: unmask virq %d [hw 0x%x]\n", d->irq, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+
+ server = xics_get_irq_server(d->irq, irq_data_get_affinity_mask(d), 0);
+ out_be32(ics_native_xive(in, vec), (server << 8) | DEFAULT_PRIORITY);
+}
+
+static unsigned int ics_native_startup(struct irq_data *d)
+{
+#ifdef CONFIG_PCI_MSI
+ /*
+ * The generic MSI code returns with the interrupt disabled on the
+ * card, using the MSI mask bits. Firmware doesn't appear to unmask
+ * at that level, so we do it here by hand.
+ */
+ if (irq_data_get_msi_desc(d))
+ pci_msi_unmask_irq(d);
+#endif
+
+ /* unmask it */
+ ics_native_unmask_irq(d);
+ return 0;
+}
+
+static void ics_native_do_mask(struct ics_native *in, unsigned int vec)
+{
+ out_be32(ics_native_xive(in, vec), 0xff);
+}
+
+static void ics_native_mask_irq(struct irq_data *d)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+
+ pr_devel("ics-native: mask virq %d [hw 0x%x]\n", d->irq, vec);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+ ics_native_do_mask(in, vec);
+}
+
+static int ics_native_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask,
+ bool force)
+{
+ unsigned int vec = (unsigned int)irqd_to_hwirq(d);
+ struct ics *ics = irq_data_get_irq_chip_data(d);
+ struct ics_native *in = to_ics_native(ics);
+ int server;
+ u32 xive;
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ server = xics_get_irq_server(d->irq, cpumask, 1);
+ if (server == -1) {
+ pr_warn("%s: No online cpus in the mask %*pb for irq %d\n",
+ __func__, cpumask_pr_args(cpumask), d->irq);
+ return -1;
+ }
+
+ xive = in_be32(ics_native_xive(in, vec));
+ xive = (xive & 0xff) | (server << 8);
+ out_be32(ics_native_xive(in, vec), xive);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip ics_native_irq_chip = {
+ .name = "ICS",
+ .irq_startup = ics_native_startup,
+ .irq_mask = ics_native_mask_irq,
+ .irq_unmask = ics_native_unmask_irq,
+ .irq_eoi = NULL, /* Patched at init time */
+ .irq_set_affinity = ics_native_set_affinity,
+ .irq_set_type = xics_set_irq_type,
+ .irq_retrigger = xics_retrigger,
+};
+
+static int ics_native_check(struct ics *ics, unsigned int hw_irq)
+{
+ struct ics_native *in = to_ics_native(ics);
+
+ pr_devel("%s: hw_irq=0x%x\n", __func__, hw_irq);
+
+ if (hw_irq < in->ibase || hw_irq >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ics_native_mask_unknown(struct ics *ics, unsigned long vec)
+{
+ struct ics_native *in = to_ics_native(ics);
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return;
+
+ ics_native_do_mask(in, vec);
+}
+
+static long ics_native_get_server(struct ics *ics, unsigned long vec)
+{
+ struct ics_native *in = to_ics_native(ics);
+ u32 xive;
+
+ if (vec < in->ibase || vec >= (in->ibase + in->icount))
+ return -EINVAL;
+
+ xive = in_be32(ics_native_xive(in, vec));
+ return (xive >> 8) & 0xfff;
+}
+
+static int ics_native_host_match(struct ics *ics, struct device_node *node)
+{
+ struct ics_native *in = to_ics_native(ics);
+
+ return in->node == node;
+}
+
+static struct ics ics_native_template = {
+ .check = ics_native_check,
+ .mask_unknown = ics_native_mask_unknown,
+ .get_server = ics_native_get_server,
+ .host_match = ics_native_host_match,
+ .chip = &ics_native_irq_chip,
+};
+
+static int __init ics_native_add_one(struct device_node *np)
+{
+ struct ics_native *ics;
+ u32 ranges[2];
+ int rc, count;
+
+ ics = kzalloc(sizeof(struct ics_native), GFP_KERNEL);
+ if (!ics)
+ return -ENOMEM;
+ ics->node = of_node_get(np);
+ memcpy(&ics->ics, &ics_native_template, sizeof(struct ics));
+
+ ics->base = of_iomap(np, 0);
+ if (!ics->base) {
+ pr_err("Failed to map %pOFP\n", np);
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ count = of_property_count_u32_elems(np, "interrupt-ranges");
+ if (count < 2 || count & 1) {
+ pr_err("Failed to read interrupt-ranges of %pOFP\n", np);
+ rc = -EINVAL;
+ goto fail;
+ }
+ if (count > 2) {
+ pr_warn("ICS %pOFP has %d ranges, only one supported\n",
+ np, count >> 1);
+ }
+ rc = of_property_read_u32_array(np, "interrupt-ranges",
+ ranges, 2);
+ if (rc) {
+ pr_err("Failed to read interrupt-ranges of %pOFP\n", np);
+ goto fail;
+ }
+ ics->ibase = ranges[0];
+ ics->icount = ranges[1];
+
+ pr_info("ICS native initialized for sources %d..%d\n",
+ ics->ibase, ics->ibase + ics->icount - 1);
+
+ /* Register ourselves */
+ xics_register_ics(&ics->ics);
+
+ return 0;
+fail:
+ of_node_put(ics->node);
+ kfree(ics);
+ return rc;
+}
+
+int __init ics_native_init(void)
+{
+ struct device_node *ics;
+ bool found_one = false;
+
+ /* We need to patch our irq chip's EOI to point to the
+ * right ICP
+ */
+ ics_native_irq_chip.irq_eoi = icp_ops->eoi;
+
+ /* Find native ICS in the device-tree */
+ for_each_compatible_node(ics, NULL, "openpower,xics-sources") {
+ if (ics_native_add_one(ics) == 0)
+ found_one = true;
+ }
+
+ if (found_one)
+ pr_info("ICS native backend registered\n");
+
+ return found_one ? 0 : -ENODEV;
+}
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index 823f6c9664cd..6cfbb4fac7fb 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -18,7 +18,6 @@
#include <linux/spinlock.h>
#include <linux/msi.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
@@ -62,17 +61,6 @@ static void ics_opal_unmask_irq(struct irq_data *d)
static unsigned int ics_opal_startup(struct irq_data *d)
{
-#ifdef CONFIG_PCI_MSI
- /*
- * The generic MSI code returns with the interrupt disabled on the
- * card, using the MSI mask bits. Firmware doesn't appear to unmask
- * at that level, so we do it here by hand.
- */
- if (irq_data_get_msi_desc(d))
- pci_msi_unmask_irq(d);
-#endif
-
- /* unmask it */
ics_opal_unmask_irq(d);
return 0;
}
@@ -133,7 +121,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
}
server = ics_opal_mangle_server(wanted_server);
- pr_devel("ics-hal: set-affinity irq %d [hw 0x%x] server: 0x%x/0x%x\n",
+ pr_debug("ics-hal: set-affinity irq %d [hw 0x%x] server: 0x%x/0x%x\n",
d->irq, hw_irq, wanted_server, server);
rc = opal_set_xive(hw_irq, server, priority);
@@ -157,26 +145,13 @@ static struct irq_chip ics_opal_irq_chip = {
.irq_retrigger = xics_retrigger,
};
-static int ics_opal_map(struct ics *ics, unsigned int virq);
-static void ics_opal_mask_unknown(struct ics *ics, unsigned long vec);
-static long ics_opal_get_server(struct ics *ics, unsigned long vec);
-
static int ics_opal_host_match(struct ics *ics, struct device_node *node)
{
return 1;
}
-/* Only one global & state struct ics */
-static struct ics ics_hal = {
- .map = ics_opal_map,
- .mask_unknown = ics_opal_mask_unknown,
- .get_server = ics_opal_get_server,
- .host_match = ics_opal_host_match,
-};
-
-static int ics_opal_map(struct ics *ics, unsigned int virq)
+static int ics_opal_check(struct ics *ics, unsigned int hw_irq)
{
- unsigned int hw_irq = (unsigned int)virq_to_hw(virq);
int64_t rc;
__be16 server;
int8_t priority;
@@ -189,9 +164,6 @@ static int ics_opal_map(struct ics *ics, unsigned int virq)
if (rc != OPAL_SUCCESS)
return -ENXIO;
- irq_set_chip_and_handler(virq, &ics_opal_irq_chip, handle_fasteoi_irq);
- irq_set_chip_data(virq, &ics_hal);
-
return 0;
}
@@ -222,6 +194,15 @@ static long ics_opal_get_server(struct ics *ics, unsigned long vec)
return ics_opal_unmangle_server(be16_to_cpu(server));
}
+/* Only one global & state struct ics */
+static struct ics ics_hal = {
+ .check = ics_opal_check,
+ .mask_unknown = ics_opal_mask_unknown,
+ .get_server = ics_opal_get_server,
+ .host_match = ics_opal_host_match,
+ .chip = &ics_opal_irq_chip,
+};
+
int __init ics_opal_init(void)
{
if (!firmware_has_feature(FW_FEATURE_OPAL))
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c
index 6aabc74688a6..f8320f8e5bc7 100644
--- a/arch/powerpc/sysdev/xics/ics-rtas.c
+++ b/arch/powerpc/sysdev/xics/ics-rtas.c
@@ -10,7 +10,6 @@
#include <linux/spinlock.h>
#include <linux/msi.h>
-#include <asm/prom.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/irq.h>
@@ -24,19 +23,6 @@ static int ibm_set_xive;
static int ibm_int_on;
static int ibm_int_off;
-static int ics_rtas_map(struct ics *ics, unsigned int virq);
-static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec);
-static long ics_rtas_get_server(struct ics *ics, unsigned long vec);
-static int ics_rtas_host_match(struct ics *ics, struct device_node *node);
-
-/* Only one global & state struct ics */
-static struct ics ics_rtas = {
- .map = ics_rtas_map,
- .mask_unknown = ics_rtas_mask_unknown,
- .get_server = ics_rtas_get_server,
- .host_match = ics_rtas_host_match,
-};
-
static void ics_rtas_unmask_irq(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
@@ -70,15 +56,6 @@ static void ics_rtas_unmask_irq(struct irq_data *d)
static unsigned int ics_rtas_startup(struct irq_data *d)
{
-#ifdef CONFIG_PCI_MSI
- /*
- * The generic MSI code returns with the interrupt disabled on the
- * card, using the MSI mask bits. Firmware doesn't appear to unmask
- * at that level, so we do it here by hand.
- */
- if (irq_data_get_msi_desc(d))
- pci_msi_unmask_irq(d);
-#endif
/* unmask it */
ics_rtas_unmask_irq(d);
return 0;
@@ -146,6 +123,9 @@ static int ics_rtas_set_affinity(struct irq_data *d,
return -1;
}
+ pr_debug("%s: irq %d [hw 0x%x] server: 0x%x\n", __func__, d->irq,
+ hw_irq, irq_server);
+
status = rtas_call(ibm_set_xive, 3, 1, NULL,
hw_irq, irq_server, xics_status[1]);
@@ -169,9 +149,8 @@ static struct irq_chip ics_rtas_irq_chip = {
.irq_retrigger = xics_retrigger,
};
-static int ics_rtas_map(struct ics *ics, unsigned int virq)
+static int ics_rtas_check(struct ics *ics, unsigned int hw_irq)
{
- unsigned int hw_irq = (unsigned int)virq_to_hw(virq);
int status[2];
int rc;
@@ -183,9 +162,6 @@ static int ics_rtas_map(struct ics *ics, unsigned int virq)
if (rc)
return -ENXIO;
- irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq);
- irq_set_chip_data(virq, &ics_rtas);
-
return 0;
}
@@ -213,6 +189,15 @@ static int ics_rtas_host_match(struct ics *ics, struct device_node *node)
return !of_device_is_compatible(node, "chrp,iic");
}
+/* Only one global & state struct ics */
+static struct ics ics_rtas = {
+ .check = ics_rtas_check,
+ .mask_unknown = ics_rtas_mask_unknown,
+ .get_server = ics_rtas_get_server,
+ .host_match = ics_rtas_host_match,
+ .chip = &ics_rtas_irq_chip,
+};
+
__init int ics_rtas_init(void)
{
ibm_get_xive = rtas_token("ibm,get-xive");
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 7e4305c01bac..d3a4156e8788 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -6,6 +6,7 @@
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/debugfs.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
@@ -17,7 +18,6 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/machdep.h>
@@ -38,7 +38,7 @@ DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
struct irq_domain *xics_host;
-static LIST_HEAD(ics_list);
+static struct ics *xics_ics;
void xics_update_irq_servers(void)
{
@@ -111,18 +111,17 @@ void xics_setup_cpu(void)
void xics_mask_unknown_vec(unsigned int vec)
{
- struct ics *ics;
-
pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec);
- list_for_each_entry(ics, &ics_list, link)
- ics->mask_unknown(ics, vec);
+ if (WARN_ON(!xics_ics))
+ return;
+ xics_ics->mask_unknown(xics_ics, vec);
}
#ifdef CONFIG_SMP
-static void xics_request_ipi(void)
+static void __init xics_request_ipi(void)
{
unsigned int ipi;
@@ -133,7 +132,7 @@ static void xics_request_ipi(void)
* IPIs are marked IRQF_PERCPU. The handler was set in map.
*/
BUG_ON(request_irq(ipi, icp_ops->ipi_action,
- IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
+ IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
}
void __init xics_smp_probe(void)
@@ -147,7 +146,7 @@ void __init xics_smp_probe(void)
#endif /* CONFIG_SMP */
-void xics_teardown_cpu(void)
+noinstr void xics_teardown_cpu(void)
{
struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
@@ -160,7 +159,7 @@ void xics_teardown_cpu(void)
icp_ops->teardown_cpu();
}
-void xics_kexec_teardown_cpu(int secondary)
+noinstr void xics_kexec_teardown_cpu(int secondary)
{
xics_teardown_cpu();
@@ -184,6 +183,8 @@ void xics_migrate_irqs_away(void)
unsigned int irq, virq;
struct irq_desc *desc;
+ pr_debug("%s: CPU %u\n", __func__, cpu);
+
/* If we used to be the default server, move to the new "boot_cpuid" */
if (hw_cpu == xics_default_server)
xics_update_irq_servers();
@@ -198,17 +199,19 @@ void xics_migrate_irqs_away(void)
struct irq_chip *chip;
long server;
unsigned long flags;
- struct ics *ics;
+ struct irq_data *irqd;
/* We can't set affinity on ISA interrupts */
- if (virq < NUM_ISA_INTERRUPTS)
+ if (virq < NR_IRQS_LEGACY)
continue;
/* We only need to migrate enabled IRQS */
if (!desc->action)
continue;
- if (desc->irq_data.domain != xics_host)
+ /* We need a mapping in the XICS IRQ domain */
+ irqd = irq_domain_get_irq_data(xics_host, virq);
+ if (!irqd)
continue;
- irq = desc->irq_data.hwirq;
+ irq = irqd_to_hwirq(irqd);
/* We need to get IPIs still. */
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
continue;
@@ -219,13 +222,10 @@ void xics_migrate_irqs_away(void)
raw_spin_lock_irqsave(&desc->lock, flags);
/* Locate interrupt server */
- server = -1;
- ics = irq_desc_get_chip_data(desc);
- if (ics)
- server = ics->get_server(ics, irq);
+ server = xics_ics->get_server(xics_ics, irq);
if (server < 0) {
- printk(KERN_ERR "%s: Can't find server for irq %d\n",
- __func__, irq);
+ pr_err("%s: Can't find server for irq %d/%x\n",
+ __func__, virq, irq);
goto unlock;
}
@@ -307,13 +307,9 @@ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
static int xics_host_match(struct irq_domain *h, struct device_node *node,
enum irq_domain_bus_token bus_token)
{
- struct ics *ics;
-
- list_for_each_entry(ics, &ics_list, link)
- if (ics->host_match(ics, node))
- return 1;
-
- return 0;
+ if (WARN_ON(!xics_ics))
+ return 0;
+ return xics_ics->host_match(xics_ics, node) ? 1 : 0;
}
/* Dummies */
@@ -327,12 +323,10 @@ static struct irq_chip xics_ipi_chip = {
.irq_unmask = xics_ipi_unmask,
};
-static int xics_host_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
+static int xics_host_map(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq)
{
- struct ics *ics;
-
- pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
+ pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hwirq);
/*
* Mark interrupts as edge sensitive by default so that resend
@@ -342,18 +336,23 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq,
irq_clear_status_flags(virq, IRQ_LEVEL);
/* Don't call into ICS for IPIs */
- if (hw == XICS_IPI) {
+ if (hwirq == XICS_IPI) {
irq_set_chip_and_handler(virq, &xics_ipi_chip,
handle_percpu_irq);
return 0;
}
- /* Let the ICS setup the chip data */
- list_for_each_entry(ics, &ics_list, link)
- if (ics->map(ics, virq) == 0)
- return 0;
+ if (WARN_ON(!xics_ics))
+ return -EINVAL;
- return -EINVAL;
+ if (xics_ics->check(xics_ics, hwirq))
+ return -EINVAL;
+
+ /* Let the ICS be the chip data for the XICS domain. For ICS native */
+ irq_domain_set_info(domain, virq, hwirq, xics_ics->chip,
+ xics_ics, handle_fasteoi_irq, NULL, NULL);
+
+ return 0;
}
static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
@@ -412,22 +411,76 @@ int xics_retrigger(struct irq_data *data)
return 0;
}
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+static int xics_host_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
+ unsigned long *hwirq, unsigned int *type)
+{
+ return xics_host_xlate(d, to_of_node(fwspec->fwnode), fwspec->param,
+ fwspec->param_count, hwirq, type);
+}
+
+static int xics_host_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ int i, rc;
+
+ rc = xics_host_domain_translate(domain, fwspec, &hwirq, &type);
+ if (rc)
+ return rc;
+
+ pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs);
+
+ for (i = 0; i < nr_irqs; i++)
+ irq_domain_set_info(domain, virq + i, hwirq + i, xics_ics->chip,
+ xics_ics, handle_fasteoi_irq, NULL, NULL);
+
+ return 0;
+}
+
+static void xics_host_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
+}
+#endif
+
static const struct irq_domain_ops xics_host_ops = {
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ .alloc = xics_host_domain_alloc,
+ .free = xics_host_domain_free,
+ .translate = xics_host_domain_translate,
+#endif
.match = xics_host_match,
.map = xics_host_map,
.xlate = xics_host_xlate,
};
-static void __init xics_init_host(void)
+static int __init xics_allocate_domain(void)
{
- xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL);
- BUG_ON(xics_host == NULL);
+ struct fwnode_handle *fn;
+
+ fn = irq_domain_alloc_named_fwnode("XICS");
+ if (!fn)
+ return -ENOMEM;
+
+ xics_host = irq_domain_create_tree(fn, &xics_host_ops, NULL);
+ if (!xics_host) {
+ irq_domain_free_fwnode(fn);
+ return -ENOMEM;
+ }
+
irq_set_default_host(xics_host);
+ return 0;
}
void __init xics_register_ics(struct ics *ics)
{
- list_add(&ics->link, &ics_list);
+ if (WARN_ONCE(xics_ics, "XICS: Source Controller is already defined !"))
+ return;
+ xics_ics = ics;
}
static void __init xics_get_server_size(void)
@@ -477,11 +530,15 @@ void __init xics_init(void)
if (rc < 0)
rc = ics_opal_init();
if (rc < 0)
+ rc = ics_native_init();
+ if (rc < 0)
pr_warn("XICS: Cannot find a Source Controller !\n");
/* Initialize common bits */
xics_get_server_size();
xics_update_irq_servers();
- xics_init_host();
+ rc = xics_allocate_domain();
+ if (rc < 0)
+ pr_err("XICS: Failed to create IRQ domain");
xics_setup_cpu();
}
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
deleted file mode 100644
index 4a86dcff3fcd..000000000000
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * Interrupt controller driver for Xilinx Virtex FPGAs
- *
- * Copyright (C) 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- *
- */
-
-/*
- * This is a driver for the interrupt controller typically found in
- * Xilinx Virtex FPGA designs.
- *
- * The interrupt sense levels are hard coded into the FPGA design with
- * typically a 1:1 relationship between irq lines and devices (no shared
- * irq lines). Therefore, this driver does not attempt to handle edge
- * and level interrupts differently.
- */
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/i8259.h>
-#include <asm/irq.h>
-#include <linux/irqchip.h>
-
-#if defined(CONFIG_PPC_I8259)
-/*
- * Support code for cascading to 8259 interrupt controllers
- */
-static void xilinx_i8259_cascade(struct irq_desc *desc)
-{
- struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int cascade_irq = i8259_irq();
-
- if (cascade_irq)
- generic_handle_irq(cascade_irq);
-
- /* Let xilinx_intc end the interrupt */
- chip->irq_unmask(&desc->irq_data);
-}
-
-static void __init xilinx_i8259_setup_cascade(void)
-{
- struct device_node *cascade_node;
- int cascade_irq;
-
- /* Initialize i8259 controller */
- cascade_node = of_find_compatible_node(NULL, NULL, "chrp,iic");
- if (!cascade_node)
- return;
-
- cascade_irq = irq_of_parse_and_map(cascade_node, 0);
- if (!cascade_irq) {
- pr_err("virtex_ml510: Failed to map cascade interrupt\n");
- goto out;
- }
-
- i8259_init(cascade_node, 0);
- irq_set_chained_handler(cascade_irq, xilinx_i8259_cascade);
-
- /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */
- /* This looks like a dirty hack to me --gcl */
- outb(0xc0, 0x4d0);
- outb(0xc0, 0x4d1);
-
- out:
- of_node_put(cascade_node);
-}
-#else
-static inline void xilinx_i8259_setup_cascade(void) { return; }
-#endif /* defined(CONFIG_PPC_I8259) */
-
-/*
- * Initialize master Xilinx interrupt controller
- */
-void __init xilinx_intc_init_tree(void)
-{
- irqchip_init();
- xilinx_i8259_setup_cascade();
-}
diff --git a/arch/powerpc/sysdev/xilinx_pci.c b/arch/powerpc/sysdev/xilinx_pci.c
deleted file mode 100644
index fea5667699ed..000000000000
--- a/arch/powerpc/sysdev/xilinx_pci.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * PCI support for Xilinx plbv46_pci soft-core which can be used on
- * Xilinx Virtex ML410 / ML510 boards.
- *
- * Copyright 2009 Roderick Colenbrander
- * Copyright 2009 Secret Lab Technologies Ltd.
- *
- * The pci bridge fixup code was copied from ppc4xx_pci.c and was written
- * by Benjamin Herrenschmidt.
- * Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/ioport.h>
-#include <linux/of.h>
-#include <linux/pci.h>
-#include <mm/mmu_decl.h>
-#include <asm/io.h>
-#include <asm/xilinx_pci.h>
-
-#define XPLB_PCI_ADDR 0x10c
-#define XPLB_PCI_DATA 0x110
-#define XPLB_PCI_BUS 0x114
-
-#define PCI_HOST_ENABLE_CMD PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY
-
-static const struct of_device_id xilinx_pci_match[] = {
- { .compatible = "xlnx,plbv46-pci-1.03.a", },
- {}
-};
-
-/**
- * xilinx_pci_fixup_bridge - Block Xilinx PHB configuration.
- */
-static void xilinx_pci_fixup_bridge(struct pci_dev *dev)
-{
- struct pci_controller *hose;
- int i;
-
- if (dev->devfn || dev->bus->self)
- return;
-
- hose = pci_bus_to_host(dev->bus);
- if (!hose)
- return;
-
- if (!of_match_node(xilinx_pci_match, hose->dn))
- return;
-
- /* Hide the PCI host BARs from the kernel as their content doesn't
- * fit well in the resource management
- */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- dev->resource[i].start = 0;
- dev->resource[i].end = 0;
- dev->resource[i].flags = 0;
- }
-
- dev_info(&dev->dev, "Hiding Xilinx plb-pci host bridge resources %s\n",
- pci_name(dev));
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, xilinx_pci_fixup_bridge);
-
-/**
- * xilinx_pci_exclude_device - Don't do config access for non-root bus
- *
- * This is a hack. Config access to any bus other than bus 0 does not
- * currently work on the ML510 so we prevent it here.
- */
-static int
-xilinx_pci_exclude_device(struct pci_controller *hose, u_char bus, u8 devfn)
-{
- return (bus != 0);
-}
-
-/**
- * xilinx_pci_init - Find and register a Xilinx PCI host bridge
- */
-void __init xilinx_pci_init(void)
-{
- struct pci_controller *hose;
- struct resource r;
- void __iomem *pci_reg;
- struct device_node *pci_node;
-
- pci_node = of_find_matching_node(NULL, xilinx_pci_match);
- if(!pci_node)
- return;
-
- if (of_address_to_resource(pci_node, 0, &r)) {
- pr_err("xilinx-pci: cannot resolve base address\n");
- return;
- }
-
- hose = pcibios_alloc_controller(pci_node);
- if (!hose) {
- pr_err("xilinx-pci: pcibios_alloc_controller() failed\n");
- return;
- }
-
- /* Setup config space */
- setup_indirect_pci(hose, r.start + XPLB_PCI_ADDR,
- r.start + XPLB_PCI_DATA,
- PPC_INDIRECT_TYPE_SET_CFG_TYPE);
-
- /* According to the xilinx plbv46_pci documentation the soft-core starts
- * a self-init when the bus master enable bit is set. Without this bit
- * set the pci bus can't be scanned.
- */
- early_write_config_word(hose, 0, 0, PCI_COMMAND, PCI_HOST_ENABLE_CMD);
-
- /* Set the max latency timer to 255 */
- early_write_config_byte(hose, 0, 0, PCI_LATENCY_TIMER, 0xff);
-
- /* Set the max bus number to 255 */
- pci_reg = of_iomap(pci_node, 0);
- out_8(pci_reg + XPLB_PCI_BUS, 0xff);
- iounmap(pci_reg);
-
- /* Nothing past the root bridge is working right now. By default
- * exclude config access to anything except bus 0 */
- if (!ppc_md.pci_exclude_device)
- ppc_md.pci_exclude_device = xilinx_pci_exclude_device;
-
- /* Register the host bridge with the linux kernel! */
- pci_process_bridge_OF_ranges(hose, pci_node, 1);
-
- pr_info("xilinx-pci: Registered PCI host bridge\n");
-}
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 9651ca061828..a289cb97c1d7 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -9,6 +9,7 @@
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/debugfs.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
@@ -19,8 +20,8 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/msi.h>
+#include <linux/vmalloc.h>
-#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/machdep.h>
@@ -61,22 +62,37 @@ static const struct xive_ops *xive_ops;
static struct irq_domain *xive_irq_domain;
#ifdef CONFIG_SMP
-/* The IPIs all use the same logical irq number */
-static u32 xive_ipi_irq;
+/* The IPIs use the same logical irq number when on the same chip */
+static struct xive_ipi_desc {
+ unsigned int irq;
+ char name[16];
+ atomic_t started;
+} *xive_ipis;
+
+/*
+ * Use early_cpu_to_node() for hot-plugged CPUs
+ */
+static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu)
+{
+ return xive_ipis[early_cpu_to_node(cpu)].irq;
+}
#endif
/* Xive state for each CPU */
static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
+/* An invalid CPU target */
+#define XIVE_INVALID_TARGET (-1)
+
/*
- * A "disabled" interrupt should never fire, to catch problems
- * we set its logical number to this
+ * Global toggle to switch on/off StoreEOI
*/
-#define XIVE_BAD_IRQ 0x7fffffff
-#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
+static bool xive_store_eoi = true;
-/* An invalid CPU target */
-#define XIVE_INVALID_TARGET (-1)
+static bool xive_is_store_eoi(struct xive_irq_data *xd)
+{
+ return xd->flags & XIVE_IRQ_FLAG_STORE_EOI && xive_store_eoi;
+}
/*
* Read the next entry in a queue, return its content if it's valid
@@ -202,9 +218,8 @@ static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
{
u64 val;
- /* Handle HW errata */
- if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
- offset |= offset << 4;
+ if (offset == XIVE_ESB_SET_PQ_10 && xive_is_store_eoi(xd))
+ offset |= XIVE_ESB_LD_ST_MO;
if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
@@ -216,16 +231,27 @@ static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
{
- /* Handle HW errata */
- if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
- offset |= offset << 4;
-
if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
else
out_be64(xd->eoi_mmio + offset, data);
}
+#if defined(CONFIG_XMON) || defined(CONFIG_DEBUG_FS)
+static void xive_irq_data_dump(struct xive_irq_data *xd, char *buffer, size_t size)
+{
+ u64 val = xive_esb_read(xd, XIVE_ESB_GET);
+
+ snprintf(buffer, size, "flags=%c%c%c PQ=%c%c 0x%016llx 0x%016llx",
+ xive_is_store_eoi(xd) ? 'S' : ' ',
+ xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
+ xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
+ val & XIVE_ESB_VAL_P ? 'P' : '-',
+ val & XIVE_ESB_VAL_Q ? 'Q' : '-',
+ xd->trig_page, xd->eoi_page);
+}
+#endif
+
#ifdef CONFIG_XMON
static notrace void xive_dump_eq(const char *name, struct xive_q *q)
{
@@ -251,11 +277,10 @@ notrace void xmon_xive_do_dump(int cpu)
#ifdef CONFIG_SMP
{
- u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
+ char buffer[128];
- xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
- val & XIVE_ESB_VAL_P ? 'P' : '-',
- val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer));
+ xmon_printf("IPI=0x%08x %s", xc->hw_ipi, buffer);
}
#endif
xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
@@ -263,6 +288,13 @@ notrace void xmon_xive_do_dump(int cpu)
xmon_printf("\n");
}
+static struct irq_data *xive_get_irq_data(u32 hw_irq)
+{
+ unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
+
+ return irq ? irq_get_irq_data(irq) : NULL;
+}
+
int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
{
int rc;
@@ -279,19 +311,34 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
hw_irq, target, prio, lirq);
+ if (!d)
+ d = xive_get_irq_data(hw_irq);
+
if (d) {
- struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
- u64 val = xive_esb_read(xd, XIVE_ESB_GET);
+ char buffer[128];
- xmon_printf("PQ=%c%c",
- val & XIVE_ESB_VAL_P ? 'P' : '-',
- val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ xive_irq_data_dump(irq_data_get_irq_handler_data(d),
+ buffer, sizeof(buffer));
+ xmon_printf("%s", buffer);
}
xmon_printf("\n");
return 0;
}
+void xmon_xive_get_irq_all(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+
+ for_each_irq_desc(i, desc) {
+ struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
+
+ if (d)
+ xmon_xive_get_irq_config(irqd_to_hwirq(d), d);
+ }
+}
+
#endif /* CONFIG_XMON */
static unsigned int xive_get_irq(void)
@@ -351,50 +398,40 @@ static void xive_do_queue_eoi(struct xive_cpu *xc)
* EOI an interrupt at the source. There are several methods
* to do this depending on the HW version and source type
*/
-static void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
+static void xive_do_source_eoi(struct xive_irq_data *xd)
{
+ u8 eoi_val;
+
xd->stale_p = false;
+
/* If the XIVE supports the new "store EOI facility, use it */
- if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ if (xive_is_store_eoi(xd)) {
xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
- else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
- /*
- * The FW told us to call it. This happens for some
- * interrupt sources that need additional HW whacking
- * beyond the ESB manipulation. For example LPC interrupts
- * on P9 DD1.0 needed a latch to be clared in the LPC bridge
- * itself. The Firmware will take care of it.
- */
- if (WARN_ON_ONCE(!xive_ops->eoi))
- return;
- xive_ops->eoi(hw_irq);
- } else {
- u8 eoi_val;
+ return;
+ }
- /*
- * Otherwise for EOI, we use the special MMIO that does
- * a clear of both P and Q and returns the old Q,
- * except for LSIs where we use the "EOI cycle" special
- * load.
- *
- * This allows us to then do a re-trigger if Q was set
- * rather than synthesizing an interrupt in software
- *
- * For LSIs the HW EOI cycle is used rather than PQ bits,
- * as they are automatically re-triggred in HW when still
- * pending.
- */
- if (xd->flags & XIVE_IRQ_FLAG_LSI)
- xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
- else {
- eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
- DBG_VERBOSE("eoi_val=%x\n", eoi_val);
-
- /* Re-trigger if needed */
- if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
- out_be64(xd->trig_mmio, 0);
- }
+ /*
+ * For LSIs, we use the "EOI cycle" special load rather than
+ * PQ bits, as they are automatically re-triggered in HW when
+ * still pending.
+ */
+ if (xd->flags & XIVE_IRQ_FLAG_LSI) {
+ xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
+ return;
}
+
+ /*
+ * Otherwise, we use the special MMIO that does a clear of
+ * both P and Q and returns the old Q. This allows us to then
+ * do a re-trigger if Q was set rather than synthesizing an
+ * interrupt in software
+ */
+ eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
+ DBG_VERBOSE("eoi_val=%x\n", eoi_val);
+
+ /* Re-trigger if needed */
+ if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
+ out_be64(xd->trig_mmio, 0);
}
/* irq_chip eoi callback, called with irq descriptor lock held */
@@ -411,8 +448,8 @@ static void xive_irq_eoi(struct irq_data *d)
* been passed-through to a KVM guest
*/
if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) &&
- !(xd->flags & XIVE_IRQ_NO_EOI))
- xive_do_source_eoi(irqd_to_hwirq(d), xd);
+ !(xd->flags & XIVE_IRQ_FLAG_NO_EOI))
+ xive_do_source_eoi(xd);
else
xd->stale_p = true;
@@ -427,15 +464,15 @@ static void xive_irq_eoi(struct irq_data *d)
}
/*
- * Helper used to mask and unmask an interrupt source. This
- * is only called for normal interrupts that do not require
- * masking/unmasking via firmware.
+ * Helper used to mask and unmask an interrupt source.
*/
static void xive_do_source_set_mask(struct xive_irq_data *xd,
bool mask)
{
u64 val;
+ pr_debug("%s: HW 0x%x %smask\n", __func__, xd->hw_irq, mask ? "" : "un");
+
/*
* If the interrupt had P set, it may be in a queue.
*
@@ -597,18 +634,8 @@ static unsigned int xive_irq_startup(struct irq_data *d)
xd->saved_p = false;
xd->stale_p = false;
- pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
- d->irq, hw_irq, d);
-#ifdef CONFIG_PCI_MSI
- /*
- * The generic MSI code returns with the interrupt disabled on the
- * card, using the MSI mask bits. Firmware doesn't appear to unmask
- * at that level, so we do it here by hand.
- */
- if (irq_data_get_msi_desc(d))
- pci_msi_unmask_irq(d);
-#endif
+ pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
/* Pick a target */
target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
@@ -649,8 +676,7 @@ static void xive_irq_shutdown(struct irq_data *d)
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
- pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
- d->irq, hw_irq, d);
+ pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
return;
@@ -674,21 +700,7 @@ static void xive_irq_unmask(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
- pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
-
- /*
- * This is a workaround for PCI LSI problems on P9, for
- * these, we call FW to set the mask. The problems might
- * be fixed by P9 DD2.0, if that is the case, firmware
- * will no longer set that flag.
- */
- if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
- unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
- xive_ops->configure_irq(hw_irq,
- get_hard_smp_processor_id(xd->target),
- xive_irq_priority, d->irq);
- return;
- }
+ pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
xive_do_source_set_mask(xd, false);
}
@@ -697,21 +709,7 @@ static void xive_irq_mask(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
- pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
-
- /*
- * This is a workaround for PCI LSI problems on P9, for
- * these, we call OPAL to set the mask. The problems might
- * be fixed by P9 DD2.0, if that is the case, firmware
- * will no longer set that flag.
- */
- if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
- unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
- xive_ops->configure_irq(hw_irq,
- get_hard_smp_processor_id(xd->target),
- 0xff, d->irq);
- return;
- }
+ pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
xive_do_source_set_mask(xd, true);
}
@@ -725,16 +723,12 @@ static int xive_irq_set_affinity(struct irq_data *d,
u32 target, old_target;
int rc = 0;
- pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
+ pr_debug("%s: irq %d/0x%x\n", __func__, d->irq, hw_irq);
/* Is this valid ? */
if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
return -EINVAL;
- /* Don't do anything if the interrupt isn't started */
- if (!irqd_is_started(d))
- return IRQ_SET_MASK_OK;
-
/*
* If existing target is already in the new mask, and is
* online then do nothing.
@@ -770,7 +764,7 @@ static int xive_irq_set_affinity(struct irq_data *d,
return rc;
}
- pr_devel(" target: 0x%x\n", target);
+ pr_debug(" target: 0x%x\n", target);
xd->target = target;
/* Give up previous target */
@@ -789,7 +783,7 @@ static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
* the corresponding descriptor bits mind you but those will in turn
* affect the resend function when re-enabling an edge interrupt.
*
- * Set set the default to edge as explained in map().
+ * Set the default to edge as explained in map().
*/
if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_EDGE_RISING;
@@ -832,14 +826,7 @@ static int xive_irq_retrigger(struct irq_data *d)
* 11, then perform an EOI.
*/
xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
-
- /*
- * Note: We pass "0" to the hw_irq argument in order to
- * avoid calling into the backend EOI code which we don't
- * want to do in the case of a re-trigger. Backends typically
- * only do EOI for LSIs anyway.
- */
- xive_do_source_eoi(0, xd);
+ xive_do_source_eoi(xd);
return 1;
}
@@ -856,13 +843,6 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
u8 pq;
/*
- * We only support this on interrupts that do not require
- * firmware calls for masking and unmasking
- */
- if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
- return -EIO;
-
- /*
* This is called by KVM with state non-NULL for enabling
* pass-through or NULL for disabling it
*/
@@ -961,7 +941,7 @@ static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
* while masked, the generic code will re-mask it anyway.
*/
if (!xd->saved_p)
- xive_do_source_eoi(hw_irq, xd);
+ xive_do_source_eoi(xd);
}
return 0;
@@ -986,7 +966,8 @@ static int xive_get_irqchip_state(struct irq_data *data,
* interrupt to be inactive in that case.
*/
*state = (pq != XIVE_ESB_INVALID) && !xd->stale_p &&
- (xd->saved_p || !!(pq & XIVE_ESB_VAL_P));
+ (xd->saved_p || (!!(pq & XIVE_ESB_VAL_P) &&
+ !irqd_irq_disabled(data)));
return 0;
default:
return -EINVAL;
@@ -1015,6 +996,8 @@ EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
+ pr_debug("%s for HW 0x%x\n", __func__, xd->hw_irq);
+
if (xd->eoi_mmio) {
iounmap(xd->eoi_mmio);
if (xd->eoi_mmio == xd->trig_mmio)
@@ -1056,7 +1039,7 @@ static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
return 0;
}
-static void xive_irq_free_data(unsigned int virq)
+void xive_irq_free_data(unsigned int virq)
{
struct xive_irq_data *xd = irq_get_handler_data(virq);
@@ -1066,6 +1049,7 @@ static void xive_irq_free_data(unsigned int virq)
xive_cleanup_irq_data(xd);
kfree(xd);
}
+EXPORT_SYMBOL_GPL(xive_irq_free_data);
#ifdef CONFIG_SMP
@@ -1101,7 +1085,7 @@ static void xive_ipi_eoi(struct irq_data *d)
DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
- xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
+ xive_do_source_eoi(&xc->ipi_data);
xive_do_queue_eoi(xc);
}
@@ -1120,28 +1104,100 @@ static struct irq_chip xive_ipi_chip = {
.irq_unmask = xive_ipi_do_nothing,
};
-static void __init xive_request_ipi(void)
+/*
+ * IPIs are marked per-cpu. We use separate HW interrupts under the
+ * hood but associated with the same "linux" interrupt
+ */
+struct xive_ipi_alloc_info {
+ irq_hw_number_t hwirq;
+};
+
+static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
{
- unsigned int virq;
+ struct xive_ipi_alloc_info *info = arg;
+ int i;
- /*
- * Initialization failed, move on, we might manage to
- * reach the point where we display our errors before
- * the system falls appart
- */
- if (!xive_irq_domain)
- return;
+ for (i = 0; i < nr_irqs; i++) {
+ irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip,
+ domain->host_data, handle_percpu_irq,
+ NULL, NULL);
+ }
+ return 0;
+}
- /* Initialize it */
- virq = irq_create_mapping(xive_irq_domain, 0);
- xive_ipi_irq = virq;
+static const struct irq_domain_ops xive_ipi_irq_domain_ops = {
+ .alloc = xive_ipi_irq_domain_alloc,
+};
- WARN_ON(request_irq(virq, xive_muxed_ipi_action,
- IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
+static int __init xive_init_ipis(void)
+{
+ struct fwnode_handle *fwnode;
+ struct irq_domain *ipi_domain;
+ unsigned int node;
+ int ret = -ENOMEM;
+
+ fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI");
+ if (!fwnode)
+ goto out;
+
+ ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids,
+ &xive_ipi_irq_domain_ops, NULL);
+ if (!ipi_domain)
+ goto out_free_fwnode;
+
+ xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL);
+ if (!xive_ipis)
+ goto out_free_domain;
+
+ for_each_node(node) {
+ struct xive_ipi_desc *xid = &xive_ipis[node];
+ struct xive_ipi_alloc_info info = { node };
+
+ /*
+ * Map one IPI interrupt per node for all cpus of that node.
+ * Since the HW interrupt number doesn't have any meaning,
+ * simply use the node number.
+ */
+ ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info);
+ if (ret < 0)
+ goto out_free_xive_ipis;
+ xid->irq = ret;
+
+ snprintf(xid->name, sizeof(xid->name), "IPI-%d", node);
+ }
+
+ return ret;
+
+out_free_xive_ipis:
+ kfree(xive_ipis);
+out_free_domain:
+ irq_domain_remove(ipi_domain);
+out_free_fwnode:
+ irq_domain_free_fwnode(fwnode);
+out:
+ return ret;
+}
+
+static int xive_request_ipi(unsigned int cpu)
+{
+ struct xive_ipi_desc *xid = &xive_ipis[early_cpu_to_node(cpu)];
+ int ret;
+
+ if (atomic_inc_return(&xid->started) > 1)
+ return 0;
+
+ ret = request_irq(xid->irq, xive_muxed_ipi_action,
+ IRQF_NO_DEBUG | IRQF_PERCPU | IRQF_NO_THREAD,
+ xid->name, NULL);
+
+ WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret);
+ return ret;
}
static int xive_setup_cpu_ipi(unsigned int cpu)
{
+ unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
struct xive_cpu *xc;
int rc;
@@ -1150,9 +1206,12 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
xc = per_cpu(xive_cpu, cpu);
/* Check if we are already setup */
- if (xc->hw_ipi != 0)
+ if (xc->hw_ipi != XIVE_BAD_IRQ)
return 0;
+ /* Register the IPI */
+ xive_request_ipi(cpu);
+
/* Grab an IPI from the backend, this will populate xc->hw_ipi */
if (xive_ops->get_ipi(cpu, xc))
return -EIO;
@@ -1173,8 +1232,8 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
pr_err("Failed to map IPI CPU %d\n", cpu);
return -EIO;
}
- pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
- xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
+ pr_debug("CPU %d HW IPI 0x%x, virq %d, trig_mmio=%p\n", cpu,
+ xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
/* Unmask it */
xive_do_source_set_mask(&xc->ipi_data, false);
@@ -1182,14 +1241,18 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
return 0;
}
-static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
+noinstr static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
{
+ unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu);
+
/* Disable the IPI and free the IRQ data */
/* Already cleaned up ? */
- if (xc->hw_ipi == 0)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
+ /* TODO: clear IPI mapping */
+
/* Mask the IPI */
xive_do_source_set_mask(&xc->ipi_data, true);
@@ -1212,7 +1275,7 @@ void __init xive_smp_probe(void)
smp_ops->cause_ipi = xive_cause_ipi;
/* Register the IPI */
- xive_request_ipi();
+ xive_init_ipis();
/* Allocate and setup IPI for the boot CPU */
xive_setup_cpu_ipi(smp_processor_id());
@@ -1231,19 +1294,6 @@ static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
*/
irq_clear_status_flags(virq, IRQ_LEVEL);
-#ifdef CONFIG_SMP
- /* IPIs are special and come up with HW number 0 */
- if (hw == 0) {
- /*
- * IPIs are marked per-cpu. We use separate HW interrupts under
- * the hood but associated with the same "linux" interrupt
- */
- irq_set_chip_and_handler(virq, &xive_ipi_chip,
- handle_percpu_irq);
- return 0;
- }
-#endif
-
rc = xive_irq_alloc_data(virq, hw);
if (rc)
return rc;
@@ -1255,15 +1305,7 @@ static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
{
- struct irq_data *data = irq_get_irq_data(virq);
- unsigned int hw_irq;
-
- /* XXX Assign BAD number */
- if (!data)
- return;
- hw_irq = (unsigned int)irqd_to_hwirq(data);
- if (hw_irq)
- xive_irq_free_data(virq);
+ xive_irq_free_data(virq);
}
static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
@@ -1294,17 +1336,135 @@ static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
return xive_ops->match(node);
}
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" };
+
+static const struct {
+ u64 mask;
+ char *name;
+} xive_irq_flags[] = {
+ { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" },
+ { XIVE_IRQ_FLAG_LSI, "LSI" },
+ { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" },
+ { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" },
+};
+
+static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d,
+ struct irq_data *irqd, int ind)
+{
+ struct xive_irq_data *xd;
+ u64 val;
+ int i;
+
+ /* No IRQ domain level information. To be done */
+ if (!irqd)
+ return;
+
+ if (!is_xive_irq(irq_data_get_irq_chip(irqd)))
+ return;
+
+ seq_printf(m, "%*sXIVE:\n", ind, "");
+ ind++;
+
+ xd = irq_data_get_irq_handler_data(irqd);
+ if (!xd) {
+ seq_printf(m, "%*snot assigned\n", ind, "");
+ return;
+ }
+
+ val = xive_esb_read(xd, XIVE_ESB_GET);
+ seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]);
+ seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "",
+ xd->saved_p ? "saved" : "");
+ seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target);
+ seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip);
+ seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page);
+ seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page);
+ seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags);
+ for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) {
+ if (xd->flags & xive_irq_flags[i].mask)
+ seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name);
+ }
+}
+#endif
+
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+static int xive_irq_domain_translate(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode),
+ fwspec->param, fwspec->param_count,
+ hwirq, type);
+}
+
+static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct irq_fwspec *fwspec = arg;
+ irq_hw_number_t hwirq;
+ unsigned int type = IRQ_TYPE_NONE;
+ int i, rc;
+
+ rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (rc)
+ return rc;
+
+ pr_debug("%s %d/0x%lx #%d\n", __func__, virq, hwirq, nr_irqs);
+
+ for (i = 0; i < nr_irqs; i++) {
+ /* TODO: call xive_irq_domain_map() */
+
+ /*
+ * Mark interrupts as edge sensitive by default so that resend
+ * actually works. Will fix that up below if needed.
+ */
+ irq_clear_status_flags(virq, IRQ_LEVEL);
+
+ /* allocates and sets handler data */
+ rc = xive_irq_alloc_data(virq + i, hwirq + i);
+ if (rc)
+ return rc;
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &xive_irq_chip, domain->host_data);
+ irq_set_handler(virq + i, handle_fasteoi_irq);
+ }
+
+ return 0;
+}
+
+static void xive_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ int i;
+
+ pr_debug("%s %d #%d\n", __func__, virq, nr_irqs);
+
+ for (i = 0; i < nr_irqs; i++)
+ xive_irq_free_data(virq + i);
+}
+#endif
+
static const struct irq_domain_ops xive_irq_domain_ops = {
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+ .alloc = xive_irq_domain_alloc,
+ .free = xive_irq_domain_free,
+ .translate = xive_irq_domain_translate,
+#endif
.match = xive_irq_domain_match,
.map = xive_irq_domain_map,
.unmap = xive_irq_domain_unmap,
.xlate = xive_irq_domain_xlate,
+#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
+ .debug_show = xive_irq_domain_debug_show,
+#endif
};
-static void __init xive_init_host(void)
+static void __init xive_init_host(struct device_node *np)
{
- xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
- &xive_irq_domain_ops, NULL);
+ xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL);
if (WARN_ON(xive_irq_domain == NULL))
return;
irq_set_default_host(xive_irq_domain);
@@ -1333,16 +1493,14 @@ static int xive_prepare_cpu(unsigned int cpu)
xc = per_cpu(xive_cpu, cpu);
if (!xc) {
- struct device_node *np;
-
xc = kzalloc_node(sizeof(struct xive_cpu),
GFP_KERNEL, cpu_to_node(cpu));
if (!xc)
return -ENOMEM;
- np = of_get_cpu_node(cpu, NULL);
- if (np)
- xc->chip_id = of_get_ibm_chip_id(np);
- of_node_put(np);
+ xc->hw_ipi = XIVE_BAD_IRQ;
+ xc->chip_id = XIVE_INVALID_CHIP_ID;
+ if (xive_ops->prepare_cpu)
+ xive_ops->prepare_cpu(cpu, xc);
per_cpu(xive_cpu, cpu) = xc;
}
@@ -1367,7 +1525,7 @@ static void xive_setup_cpu(void)
#ifdef CONFIG_SMP
void xive_smp_setup_cpu(void)
{
- pr_devel("SMP setup CPU %d\n", smp_processor_id());
+ pr_debug("SMP setup CPU %d\n", smp_processor_id());
/* This will have already been done on the boot CPU */
if (smp_processor_id() != boot_cpuid)
@@ -1405,13 +1563,12 @@ static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
struct irq_desc *desc = irq_to_desc(irq);
struct irq_data *d = irq_desc_get_irq_data(desc);
struct xive_irq_data *xd;
- unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
/*
* Ignore anything that isn't a XIVE irq and ignore
* IPIs, so can just be dropped.
*/
- if (d->domain != xive_irq_domain || hw_irq == 0)
+ if (d->domain != xive_irq_domain)
continue;
/*
@@ -1436,7 +1593,7 @@ static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
* still asserted. Otherwise do an MSI retrigger.
*/
if (xd->flags & XIVE_IRQ_FLAG_LSI)
- xive_do_source_eoi(irqd_to_hwirq(d), xd);
+ xive_do_source_eoi(xd);
else
xive_irq_retrigger(d);
@@ -1477,7 +1634,7 @@ void xive_flush_interrupt(void)
#endif /* CONFIG_SMP */
-void xive_teardown_cpu(void)
+noinstr void xive_teardown_cpu(void)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
unsigned int cpu = smp_processor_id();
@@ -1503,8 +1660,8 @@ void xive_shutdown(void)
xive_ops->shutdown();
}
-bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
- u8 max_prio)
+bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops,
+ void __iomem *area, u32 offset, u8 max_prio)
{
xive_tima = area;
xive_tima_offset = offset;
@@ -1514,10 +1671,10 @@ bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 o
ppc_md.get_irq = xive_get_irq;
__xive_enabled = true;
- pr_devel("Initializing host..\n");
- xive_init_host();
+ pr_debug("Initializing host..\n");
+ xive_init_host(np);
- pr_devel("Initializing boot CPU..\n");
+ pr_debug("Initializing boot CPU..\n");
/* Allocate per-CPU data and queues */
xive_prepare_cpu(smp_processor_id());
@@ -1551,6 +1708,157 @@ __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
static int __init xive_off(char *arg)
{
xive_cmdline_disabled = true;
- return 0;
+ return 1;
}
__setup("xive=off", xive_off);
+
+static int __init xive_store_eoi_cmdline(char *arg)
+{
+ if (!arg)
+ return 1;
+
+ if (strncmp(arg, "off", 3) == 0) {
+ pr_info("StoreEOI disabled on kernel command line\n");
+ xive_store_eoi = false;
+ }
+ return 1;
+}
+__setup("xive.store-eoi=", xive_store_eoi_cmdline);
+
+#ifdef CONFIG_DEBUG_FS
+static void xive_debug_show_ipi(struct seq_file *m, int cpu)
+{
+ struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
+
+ seq_printf(m, "CPU %d: ", cpu);
+ if (xc) {
+ seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
+
+#ifdef CONFIG_SMP
+ {
+ char buffer[128];
+
+ xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer));
+ seq_printf(m, "IPI=0x%08x %s", xc->hw_ipi, buffer);
+ }
+#endif
+ }
+ seq_puts(m, "\n");
+}
+
+static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
+{
+ unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
+ int rc;
+ u32 target;
+ u8 prio;
+ u32 lirq;
+ char buffer[128];
+
+ rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
+ if (rc) {
+ seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
+ return;
+ }
+
+ seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
+ hw_irq, target, prio, lirq);
+
+ xive_irq_data_dump(irq_data_get_irq_handler_data(d), buffer, sizeof(buffer));
+ seq_puts(m, buffer);
+ seq_puts(m, "\n");
+}
+
+static int xive_irq_debug_show(struct seq_file *m, void *private)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+
+ for_each_irq_desc(i, desc) {
+ struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
+
+ if (d)
+ xive_debug_show_irq(m, d);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(xive_irq_debug);
+
+static int xive_ipi_debug_show(struct seq_file *m, void *private)
+{
+ int cpu;
+
+ if (xive_ops->debug_show)
+ xive_ops->debug_show(m, private);
+
+ for_each_online_cpu(cpu)
+ xive_debug_show_ipi(m, cpu);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(xive_ipi_debug);
+
+static void xive_eq_debug_show_one(struct seq_file *m, struct xive_q *q, u8 prio)
+{
+ int i;
+
+ seq_printf(m, "EQ%d idx=%d T=%d\n", prio, q->idx, q->toggle);
+ if (q->qpage) {
+ for (i = 0; i < q->msk + 1; i++) {
+ if (!(i % 8))
+ seq_printf(m, "%05d ", i);
+ seq_printf(m, "%08x%s", be32_to_cpup(q->qpage + i),
+ (i + 1) % 8 ? " " : "\n");
+ }
+ }
+ seq_puts(m, "\n");
+}
+
+static int xive_eq_debug_show(struct seq_file *m, void *private)
+{
+ int cpu = (long)m->private;
+ struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
+
+ if (xc)
+ xive_eq_debug_show_one(m, &xc->queue[xive_irq_priority],
+ xive_irq_priority);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(xive_eq_debug);
+
+static void xive_core_debugfs_create(void)
+{
+ struct dentry *xive_dir;
+ struct dentry *xive_eq_dir;
+ long cpu;
+ char name[16];
+
+ xive_dir = debugfs_create_dir("xive", arch_debugfs_dir);
+ if (IS_ERR(xive_dir))
+ return;
+
+ debugfs_create_file("ipis", 0400, xive_dir,
+ NULL, &xive_ipi_debug_fops);
+ debugfs_create_file("interrupts", 0400, xive_dir,
+ NULL, &xive_irq_debug_fops);
+ xive_eq_dir = debugfs_create_dir("eqs", xive_dir);
+ for_each_possible_cpu(cpu) {
+ snprintf(name, sizeof(name), "cpu%ld", cpu);
+ debugfs_create_file(name, 0400, xive_eq_dir, (void *)cpu,
+ &xive_eq_debug_fops);
+ }
+ debugfs_create_bool("store-eoi", 0600, xive_dir, &xive_store_eoi);
+
+ if (xive_ops->debug_create)
+ xive_ops->debug_create(xive_dir);
+}
+#else
+static inline void xive_core_debugfs_create(void) { }
+#endif /* CONFIG_DEBUG_FS */
+
+int xive_core_debug_init(void)
+{
+ if (xive_enabled() && IS_ENABLED(CONFIG_DEBUG_FS))
+ xive_core_debugfs_create();
+
+ return 0;
+}
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 0ff6b739052c..3925825954bc 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -13,13 +13,15 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/cpumask.h>
#include <linux/mm.h>
+#include <linux/kmemleak.h>
-#include <asm/prom.h>
+#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/irq.h>
@@ -39,6 +41,7 @@ static u32 xive_queue_shift;
static u32 xive_pool_vps = XIVE_INVALID_VP;
static struct kmem_cache *xive_provision_cache;
static bool xive_has_single_esc;
+bool xive_has_save_restore;
int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
{
@@ -60,14 +63,10 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
opal_flags = be64_to_cpu(flags);
if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
+ if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI2)
+ data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
if (opal_flags & OPAL_XIVE_IRQ_LSI)
data->flags |= XIVE_IRQ_FLAG_LSI;
- if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
- data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
- if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
- data->flags |= XIVE_IRQ_FLAG_MASK_FW;
- if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
- data->flags |= XIVE_IRQ_FLAG_EOI_FW;
data->eoi_page = be64_to_cpu(eoi_page);
data->trig_page = be64_to_cpu(trig_page);
data->esb_shift = be32_to_cpu(esb_shift);
@@ -126,6 +125,8 @@ static int xive_native_get_irq_config(u32 hw_irq, u32 *target, u8 *prio,
return rc == 0 ? 0 : -ENXIO;
}
+#define vp_err(vp, fmt, ...) pr_err("VP[0x%x]: " fmt, vp, ##__VA_ARGS__)
+
/* This can be called multiple time to change a queue configuration */
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
__be32 *qpage, u32 order, bool can_escalate)
@@ -153,7 +154,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
&esc_irq_be,
NULL);
if (rc) {
- pr_err("Error %lld getting queue info prio %d\n", rc, prio);
+ vp_err(vp_id, "Failed to get queue %d info : %lld\n", prio, rc);
rc = -EIO;
goto fail;
}
@@ -176,7 +177,7 @@ int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc) {
- pr_err("Error %lld setting queue for prio %d\n", rc, prio);
+ vp_err(vp_id, "Failed to set queue %d info: %lld\n", prio, rc);
rc = -EIO;
} else {
/*
@@ -203,7 +204,7 @@ static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
msleep(OPAL_BUSY_DELAY_MS);
}
if (rc)
- pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
+ vp_err(vp_id, "Failed to disable queue %d : %lld\n", prio, rc);
}
void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
@@ -279,12 +280,12 @@ static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
}
#endif /* CONFIG_SMP */
-u32 xive_native_alloc_irq(void)
+u32 xive_native_alloc_irq_on_chip(u32 chip_id)
{
s64 rc;
for (;;) {
- rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
+ rc = opal_xive_allocate_irq(chip_id);
if (rc != OPAL_BUSY)
break;
msleep(OPAL_BUSY_DELAY_MS);
@@ -293,7 +294,7 @@ u32 xive_native_alloc_irq(void)
return 0;
return rc;
}
-EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
+EXPORT_SYMBOL_GPL(xive_native_alloc_irq_on_chip);
void xive_native_free_irq(u32 irq)
{
@@ -312,7 +313,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
s64 rc;
/* Free the IPI */
- if (!xc->hw_ipi)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
for (;;) {
rc = opal_xive_free_irq(xc->hw_ipi);
@@ -320,7 +321,7 @@ static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
msleep(OPAL_BUSY_DELAY_MS);
continue;
}
- xc->hw_ipi = 0;
+ xc->hw_ipi = XIVE_BAD_IRQ;
break;
}
}
@@ -382,13 +383,9 @@ static void xive_native_update_pending(struct xive_cpu *xc)
}
}
-static void xive_native_eoi(u32 hw_irq)
+static void xive_native_prepare_cpu(unsigned int cpu, struct xive_cpu *xc)
{
- /*
- * Not normally used except if specific interrupts need
- * a workaround on EOI.
- */
- opal_int_eoi(hw_irq);
+ xc->chip_id = cpu_to_chip_id(cpu);
}
static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
@@ -464,6 +461,14 @@ void xive_native_sync_queue(u32 hw_irq)
}
EXPORT_SYMBOL_GPL(xive_native_sync_queue);
+#ifdef CONFIG_DEBUG_FS
+static int xive_native_debug_create(struct dentry *xive_dir)
+{
+ debugfs_create_bool("save-restore", 0600, xive_dir, &xive_has_save_restore);
+ return 0;
+}
+#endif
+
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
.configure_irq = xive_native_configure_irq,
@@ -473,7 +478,7 @@ static const struct xive_ops xive_native_ops = {
.match = xive_native_match,
.shutdown = xive_native_shutdown,
.update_pending = xive_native_update_pending,
- .eoi = xive_native_eoi,
+ .prepare_cpu = xive_native_prepare_cpu,
.setup_cpu = xive_native_setup_cpu,
.teardown_cpu = xive_native_teardown_cpu,
.sync_source = xive_native_sync_source,
@@ -481,10 +486,13 @@ static const struct xive_ops xive_native_ops = {
.get_ipi = xive_native_get_ipi,
.put_ipi = xive_native_put_ipi,
#endif /* CONFIG_SMP */
+#ifdef CONFIG_DEBUG_FS
+ .debug_create = xive_native_debug_create,
+#endif /* CONFIG_DEBUG_FS */
.name = "native",
};
-static bool xive_parse_provisioning(struct device_node *np)
+static bool __init xive_parse_provisioning(struct device_node *np)
{
int rc;
@@ -524,7 +532,7 @@ static bool xive_parse_provisioning(struct device_node *np)
return true;
}
-static void xive_native_setup_pools(void)
+static void __init xive_native_setup_pools(void)
{
/* Allocate a pool big enough */
pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
@@ -571,12 +579,12 @@ bool __init xive_native_init(void)
/* Resource 1 is HV window */
if (of_address_to_resource(np, 1, &r)) {
pr_err("Failed to get thread mgmnt area resource\n");
- return false;
+ goto err_put;
}
tima = ioremap(r.start, resource_size(&r));
if (!tima) {
pr_err("Failed to map thread mgmnt area\n");
- return false;
+ goto err_put;
}
/* Read number of priorities */
@@ -594,6 +602,9 @@ bool __init xive_native_init(void)
if (of_get_property(np, "single-escalation-support", NULL) != NULL)
xive_has_single_esc = true;
+ if (of_get_property(np, "vp-save-restore", NULL))
+ xive_has_save_restore = true;
+
/* Configure Thread Management areas for KVM */
for_each_possible_cpu(cpu)
kvmppc_set_xive_tima(cpu, r.start, tima);
@@ -601,32 +612,37 @@ bool __init xive_native_init(void)
/* Resource 2 is OS window */
if (of_address_to_resource(np, 2, &r)) {
pr_err("Failed to get thread mgmnt area resource\n");
- return false;
+ goto err_put;
}
xive_tima_os = r.start;
- /* Grab size of provisionning pages */
+ /* Grab size of provisioning pages */
xive_parse_provisioning(np);
/* Switch the XIVE to exploitation mode */
rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
if (rc) {
pr_err("Switch to exploitation mode failed with error %lld\n", rc);
- return false;
+ goto err_put;
}
/* Setup some dummy HV pool VPs */
xive_native_setup_pools();
/* Initialize XIVE core with our backend */
- if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
+ if (!xive_core_init(np, &xive_native_ops, tima, TM_QW3_HV_PHYS,
max_prio)) {
opal_xive_reset(OPAL_XIVE_MODE_EMU);
- return false;
+ goto err_put;
}
+ of_node_put(np);
pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
return true;
+
+err_put:
+ of_node_put(np);
+ return false;
}
static bool xive_native_provision_pages(void)
@@ -646,6 +662,7 @@ static bool xive_native_provision_pages(void)
pr_err("Failed to allocate provisioning page\n");
return false;
}
+ kmemleak_ignore(p);
opal_xive_donate_page(chip, __pa(p));
}
return true;
@@ -711,6 +728,8 @@ int xive_native_enable_vp(u32 vp_id, bool single_escalation)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
+ if (rc)
+ vp_err(vp_id, "Failed to enable VP : %lld\n", rc);
return rc ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(xive_native_enable_vp);
@@ -725,6 +744,8 @@ int xive_native_disable_vp(u32 vp_id)
break;
msleep(OPAL_BUSY_DELAY_MS);
}
+ if (rc)
+ vp_err(vp_id, "Failed to disable VP : %lld\n", rc);
return rc ? -EIO : 0;
}
EXPORT_SYMBOL_GPL(xive_native_disable_vp);
@@ -736,8 +757,10 @@ int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
s64 rc;
rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
- if (rc)
+ if (rc) {
+ vp_err(vp_id, "Failed to get VP info : %lld\n", rc);
return -EIO;
+ }
*out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
*out_chip_id = be32_to_cpu(vp_chip_id_be);
@@ -751,6 +774,12 @@ bool xive_native_has_single_escalation(void)
}
EXPORT_SYMBOL_GPL(xive_native_has_single_escalation);
+bool xive_native_has_save_restore(void)
+{
+ return xive_has_save_restore;
+}
+EXPORT_SYMBOL_GPL(xive_native_has_save_restore);
+
int xive_native_get_queue_info(u32 vp_id, u32 prio,
u64 *out_qpage,
u64 *out_qsize,
@@ -768,8 +797,7 @@ int xive_native_get_queue_info(u32 vp_id, u32 prio,
rc = opal_xive_get_queue_info(vp_id, prio, &qpage, &qsize,
&qeoi_page, &escalate_irq, &qflags);
if (rc) {
- pr_err("OPAL failed to get queue info for VCPU %d/%d : %lld\n",
- vp_id, prio, rc);
+ vp_err(vp_id, "failed to get queue %d info : %lld\n", prio, rc);
return -EIO;
}
@@ -797,8 +825,7 @@ int xive_native_get_queue_state(u32 vp_id, u32 prio, u32 *qtoggle, u32 *qindex)
rc = opal_xive_get_queue_state(vp_id, prio, &opal_qtoggle,
&opal_qindex);
if (rc) {
- pr_err("OPAL failed to get queue state for VCPU %d/%d : %lld\n",
- vp_id, prio, rc);
+ vp_err(vp_id, "failed to get queue %d state : %lld\n", prio, rc);
return -EIO;
}
@@ -817,8 +844,7 @@ int xive_native_set_queue_state(u32 vp_id, u32 prio, u32 qtoggle, u32 qindex)
rc = opal_xive_set_queue_state(vp_id, prio, qtoggle, qindex);
if (rc) {
- pr_err("OPAL failed to set queue state for VCPU %d/%d : %lld\n",
- vp_id, prio, rc);
+ vp_err(vp_id, "failed to set queue %d state : %lld\n", prio, rc);
return -EIO;
}
@@ -840,8 +866,7 @@ int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
rc = opal_xive_get_vp_state(vp_id, &state);
if (rc) {
- pr_err("OPAL failed to get vp state for VCPU %d : %lld\n",
- vp_id, rc);
+ vp_err(vp_id, "failed to get vp state : %lld\n", rc);
return -EIO;
}
@@ -850,3 +875,5 @@ int xive_native_get_vp_state(u32 vp_id, u64 *out_state)
return 0;
}
EXPORT_SYMBOL_GPL(xive_native_get_vp_state);
+
+machine_arch_initcall(powernv, xive_core_debug_init);
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index 55dc61cb4867..e2c8f93b535b 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -11,13 +11,17 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/bitmap.h>
#include <linux/cpumask.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/libfdt.h>
+#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -26,6 +30,8 @@
#include <asm/xive.h>
#include <asm/xive-regs.h>
#include <asm/hvcall.h>
+#include <asm/svm.h>
+#include <asm/ultravisor.h>
#include "xive-internal.h"
@@ -41,7 +47,7 @@ struct xive_irq_bitmap {
static LIST_HEAD(xive_irq_bitmaps);
-static int xive_irq_bitmap_add(int base, int count)
+static int __init xive_irq_bitmap_add(int base, int count)
{
struct xive_irq_bitmap *xibm;
@@ -52,7 +58,7 @@ static int xive_irq_bitmap_add(int base, int count)
spin_lock_init(&xibm->lock);
xibm->base = base;
xibm->count = count;
- xibm->bitmap = kzalloc(xibm->count, GFP_KERNEL);
+ xibm->bitmap = bitmap_zalloc(xibm->count, GFP_KERNEL);
if (!xibm->bitmap) {
kfree(xibm);
return -ENOMEM;
@@ -64,6 +70,17 @@ static int xive_irq_bitmap_add(int base, int count)
return 0;
}
+static void xive_irq_bitmap_remove_all(void)
+{
+ struct xive_irq_bitmap *xibm, *tmp;
+
+ list_for_each_entry_safe(xibm, tmp, &xive_irq_bitmaps, list) {
+ list_del(&xibm->list);
+ bitmap_free(xibm->bitmap);
+ kfree(xibm);
+ }
+}
+
static int __xive_irq_bitmap_alloc(struct xive_irq_bitmap *xibm)
{
int irq;
@@ -170,7 +187,7 @@ static long plpar_int_get_source_info(unsigned long flags,
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
+ pr_err("H_INT_GET_SOURCE_INFO lisn=0x%lx failed %ld\n", lisn, rc);
return rc;
}
@@ -179,8 +196,8 @@ static long plpar_int_get_source_info(unsigned long flags,
*trig_page = retbuf[2];
*esb_shift = retbuf[3];
- pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
- retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
+ pr_debug("H_INT_GET_SOURCE_INFO lisn=0x%lx flags=0x%lx eoi=0x%lx trig=0x%lx shift=0x%lx\n",
+ lisn, retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
return 0;
}
@@ -197,8 +214,8 @@ static long plpar_int_set_source_config(unsigned long flags,
long rc;
- pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
- flags, lisn, target, prio, sw_irq);
+ pr_debug("H_INT_SET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx target=%ld prio=%ld sw_irq=%ld\n",
+ flags, lisn, target, prio, sw_irq);
do {
@@ -207,7 +224,7 @@ static long plpar_int_set_source_config(unsigned long flags,
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
+ pr_err("H_INT_SET_SOURCE_CONFIG lisn=0x%lx target=%ld prio=%ld failed %ld\n",
lisn, target, prio, rc);
return rc;
}
@@ -224,7 +241,7 @@ static long plpar_int_get_source_config(unsigned long flags,
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
- pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn);
+ pr_debug("H_INT_GET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx\n", flags, lisn);
do {
rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
@@ -232,7 +249,7 @@ static long plpar_int_get_source_config(unsigned long flags,
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n",
+ pr_err("H_INT_GET_SOURCE_CONFIG lisn=0x%lx failed %ld\n",
lisn, rc);
return rc;
}
@@ -241,8 +258,8 @@ static long plpar_int_get_source_config(unsigned long flags,
*prio = retbuf[1];
*sw_irq = retbuf[2];
- pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n",
- retbuf[0], retbuf[1], retbuf[2]);
+ pr_debug("H_INT_GET_SOURCE_CONFIG target=%ld prio=%ld sw_irq=%ld\n",
+ retbuf[0], retbuf[1], retbuf[2]);
return 0;
}
@@ -270,8 +287,8 @@ static long plpar_int_get_queue_info(unsigned long flags,
*esn_page = retbuf[0];
*esn_size = retbuf[1];
- pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
- retbuf[0], retbuf[1]);
+ pr_debug("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld page=0x%lx size=0x%lx\n",
+ target, priority, retbuf[0], retbuf[1]);
return 0;
}
@@ -286,8 +303,8 @@ static long plpar_int_set_queue_config(unsigned long flags,
{
long rc;
- pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
- flags, target, priority, qpage, qsize);
+ pr_debug("H_INT_SET_QUEUE_CONFIG flags=0x%lx target=%ld priority=0x%lx qpage=0x%lx qsize=0x%lx\n",
+ flags, target, priority, qpage, qsize);
do {
rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
@@ -295,7 +312,7 @@ static long plpar_int_set_queue_config(unsigned long flags,
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
+ pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=0x%lx returned %ld\n",
target, priority, qpage, rc);
return rc;
}
@@ -312,7 +329,7 @@ static long plpar_int_sync(unsigned long flags, unsigned long lisn)
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
+ pr_err("H_INT_SYNC lisn=0x%lx returned %ld\n", lisn, rc);
return rc;
}
@@ -330,8 +347,8 @@ static long plpar_int_esb(unsigned long flags,
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
- pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
- flags, lisn, offset, in_data);
+ pr_debug("H_INT_ESB flags=0x%lx lisn=0x%lx offset=0x%lx in=0x%lx\n",
+ flags, lisn, offset, in_data);
do {
rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
@@ -339,7 +356,7 @@ static long plpar_int_esb(unsigned long flags,
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
+ pr_err("H_INT_ESB lisn=0x%lx offset=0x%lx returned %ld\n",
lisn, offset, rc);
return rc;
}
@@ -501,6 +518,9 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
rc = -EIO;
} else {
q->qpage = qpage;
+ if (is_secure_guest())
+ uv_share_page(PHYS_PFN(qpage_phys),
+ 1 << xive_alloc_order(order));
}
fail:
return rc;
@@ -534,6 +554,8 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
hw_cpu, prio);
alloc_order = xive_alloc_order(xive_queue_shift);
+ if (is_secure_guest())
+ uv_unshare_page(PHYS_PFN(__pa(q->qpage)), 1 << alloc_order);
free_pages((unsigned long)q->qpage, alloc_order);
q->qpage = NULL;
}
@@ -541,7 +563,7 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
static bool xive_spapr_match(struct device_node *node)
{
/* Ignore cascaded controllers for the moment */
- return 1;
+ return true;
}
#ifdef CONFIG_SMP
@@ -560,11 +582,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc)
static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc)
{
- if (!xc->hw_ipi)
+ if (xc->hw_ipi == XIVE_BAD_IRQ)
return;
xive_irq_bitmap_free(xc->hw_ipi);
- xc->hw_ipi = 0;
+ xc->hw_ipi = XIVE_BAD_IRQ;
}
#endif /* CONFIG_SMP */
@@ -620,11 +642,6 @@ static void xive_spapr_update_pending(struct xive_cpu *xc)
}
}
-static void xive_spapr_eoi(u32 hw_irq)
-{
- /* Not used */;
-}
-
static void xive_spapr_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
{
/* Only some debug on the TIMA settings */
@@ -645,6 +662,24 @@ static void xive_spapr_sync_source(u32 hw_irq)
plpar_int_sync(0, hw_irq);
}
+static int xive_spapr_debug_show(struct seq_file *m, void *private)
+{
+ struct xive_irq_bitmap *xibm;
+ char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+
+ if (!buf)
+ return -ENOMEM;
+
+ list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
+ memset(buf, 0, PAGE_SIZE);
+ bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
+ seq_printf(m, "bitmap #%d: %s", xibm->count, buf);
+ }
+ kfree(buf);
+
+ return 0;
+}
+
static const struct xive_ops xive_spapr_ops = {
.populate_irq_data = xive_spapr_populate_irq_data,
.configure_irq = xive_spapr_configure_irq,
@@ -654,7 +689,6 @@ static const struct xive_ops xive_spapr_ops = {
.match = xive_spapr_match,
.shutdown = xive_spapr_shutdown,
.update_pending = xive_spapr_update_pending,
- .eoi = xive_spapr_eoi,
.setup_cpu = xive_spapr_setup_cpu,
.teardown_cpu = xive_spapr_teardown_cpu,
.sync_source = xive_spapr_sync_source,
@@ -662,6 +696,7 @@ static const struct xive_ops xive_spapr_ops = {
#ifdef CONFIG_SMP
.get_ipi = xive_spapr_get_ipi,
.put_ipi = xive_spapr_put_ipi,
+ .debug_show = xive_spapr_debug_show,
#endif /* CONFIG_SMP */
.name = "spapr",
};
@@ -669,7 +704,7 @@ static const struct xive_ops xive_spapr_ops = {
/*
* get max priority from "/ibm,plat-res-int-priorities"
*/
-static bool xive_get_max_prio(u8 *max_prio)
+static bool __init xive_get_max_prio(u8 *max_prio)
{
struct device_node *rootdn;
const __be32 *reg;
@@ -683,6 +718,7 @@ static bool xive_get_max_prio(u8 *max_prio)
}
reg = of_get_property(rootdn, "ibm,plat-res-int-priorities", &len);
+ of_node_put(rootdn);
if (!reg) {
pr_err("Failed to read 'ibm,plat-res-int-priorities' property\n");
return false;
@@ -723,7 +759,7 @@ static bool xive_get_max_prio(u8 *max_prio)
return true;
}
-static const u8 *get_vec5_feature(unsigned int index)
+static const u8 *__init get_vec5_feature(unsigned int index)
{
unsigned long root, chosen;
int size;
@@ -744,7 +780,7 @@ static const u8 *get_vec5_feature(unsigned int index)
return vec5 + index;
}
-static bool xive_spapr_disabled(void)
+static bool __init xive_spapr_disabled(void)
{
const u8 *vec5_xive;
@@ -782,7 +818,7 @@ bool __init xive_spapr_init(void)
u32 val;
u32 len;
const __be32 *reg;
- int i;
+ int i, err;
if (xive_spapr_disabled())
return false;
@@ -798,32 +834,35 @@ bool __init xive_spapr_init(void)
/* Resource 1 is the OS ring TIMA */
if (of_address_to_resource(np, 1, &r)) {
pr_err("Failed to get thread mgmnt area resource\n");
- return false;
+ goto err_put;
}
tima = ioremap(r.start, resource_size(&r));
if (!tima) {
pr_err("Failed to map thread mgmnt area\n");
- return false;
+ goto err_put;
}
if (!xive_get_max_prio(&max_prio))
- return false;
+ goto err_unmap;
/* Feed the IRQ number allocator with the ranges given in the DT */
reg = of_get_property(np, "ibm,xive-lisn-ranges", &len);
if (!reg) {
pr_err("Failed to read 'ibm,xive-lisn-ranges' property\n");
- return false;
+ goto err_unmap;
}
if (len % (2 * sizeof(u32)) != 0) {
pr_err("invalid 'ibm,xive-lisn-ranges' property\n");
- return false;
+ goto err_unmap;
}
- for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2)
- xive_irq_bitmap_add(be32_to_cpu(reg[0]),
- be32_to_cpu(reg[1]));
+ for (i = 0; i < len / (2 * sizeof(u32)); i++, reg += 2) {
+ err = xive_irq_bitmap_add(be32_to_cpu(reg[0]),
+ be32_to_cpu(reg[1]));
+ if (err < 0)
+ goto err_mem_free;
+ }
/* Iterate the EQ sizes and pick one */
of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, reg, val) {
@@ -833,9 +872,20 @@ bool __init xive_spapr_init(void)
}
/* Initialize XIVE core with our backend */
- if (!xive_core_init(&xive_spapr_ops, tima, TM_QW1_OS, max_prio))
- return false;
+ if (!xive_core_init(np, &xive_spapr_ops, tima, TM_QW1_OS, max_prio))
+ goto err_mem_free;
+ of_node_put(np);
pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
return true;
+
+err_mem_free:
+ xive_irq_bitmap_remove_all();
+err_unmap:
+ iounmap(tima);
+err_put:
+ of_node_put(np);
+ return false;
}
+
+machine_arch_initcall(pseries, xive_core_debug_init);
diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
index 59cd366e7933..fe6d95d54af9 100644
--- a/arch/powerpc/sysdev/xive/xive-internal.h
+++ b/arch/powerpc/sysdev/xive/xive-internal.h
@@ -5,6 +5,13 @@
#ifndef __XIVE_INTERNAL_H
#define __XIVE_INTERNAL_H
+/*
+ * A "disabled" interrupt should never fire, to catch problems
+ * we set its logical number to this
+ */
+#define XIVE_BAD_IRQ 0x7fffffff
+#define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
+
/* Each CPU carry one of these with various per-CPU state */
struct xive_cpu {
#ifdef CONFIG_SMP
@@ -37,25 +44,28 @@ struct xive_ops {
u32 *sw_irq);
int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
+ void (*prepare_cpu)(unsigned int cpu, struct xive_cpu *xc);
void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
bool (*match)(struct device_node *np);
void (*shutdown)(void);
void (*update_pending)(struct xive_cpu *xc);
- void (*eoi)(u32 hw_irq);
void (*sync_source)(u32 hw_irq);
u64 (*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
#ifdef CONFIG_SMP
int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
#endif
+ int (*debug_show)(struct seq_file *m, void *private);
+ int (*debug_create)(struct dentry *xive_dir);
const char *name;
};
-bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
- u8 max_prio);
+bool xive_core_init(struct device_node *np, const struct xive_ops *ops,
+ void __iomem *area, u32 offset, u8 max_prio);
__be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
+int xive_core_debug_init(void);
static inline u32 xive_alloc_order(u32 queue_shift)
{
@@ -63,5 +73,6 @@ static inline u32 xive_alloc_order(u32 queue_shift)
}
extern bool xive_cmdline_disabled;
+extern bool xive_has_save_restore;
#endif /* __XIVE_INTERNAL_H */
diff --git a/arch/powerpc/tools/checkpatch.sh b/arch/powerpc/tools/checkpatch.sh
index 3ce5c093b19d..91c04802ec31 100755
--- a/arch/powerpc/tools/checkpatch.sh
+++ b/arch/powerpc/tools/checkpatch.sh
@@ -9,7 +9,6 @@ script_base=$(realpath $(dirname $0))
exec $script_base/../../../scripts/checkpatch.pl \
--subjective \
--no-summary \
- --max-line-length=90 \
--show-types \
--ignore ARCH_INCLUDE_LINUX \
--ignore BIT_MACRO \
diff --git a/arch/powerpc/tools/head_check.sh b/arch/powerpc/tools/head_check.sh
index ad9e57209aa4..689907cda996 100644
--- a/arch/powerpc/tools/head_check.sh
+++ b/arch/powerpc/tools/head_check.sh
@@ -31,8 +31,10 @@
# level entry code (boot, interrupt vectors, etc) until r2 is set up. This
# could cause the kernel to die in early boot.
-# Turn this on if you want more debug output:
-# set -x
+# Allow for verbose output
+if [ "$V" = "1" ]; then
+ set -x
+fi
if [ $# -lt 2 ]; then
echo "$0 [path to nm] [path to vmlinux]" 1>&2
@@ -44,33 +46,33 @@ nm="$1"
vmlinux="$2"
# gcc-4.6-era toolchain make _stext an A (absolute) symbol rather than T
-$nm "$vmlinux" | grep -e " [TA] _stext$" -e " t start_first_256B$" -e " a text_start$" -e " t start_text$" -m4 > .tmp_symbols.txt
+$nm "$vmlinux" | grep -e " [TA] _stext$" -e " t start_first_256B$" -e " a text_start$" -e " t start_text$" > .tmp_symbols.txt
-vma=$(cat .tmp_symbols.txt | grep -e " [TA] _stext$" | cut -d' ' -f1)
+vma=$(grep -e " [TA] _stext$" .tmp_symbols.txt | cut -d' ' -f1)
-expected_start_head_addr=$vma
+expected_start_head_addr="$vma"
-start_head_addr=$(cat .tmp_symbols.txt | grep " t start_first_256B$" | cut -d' ' -f1)
+start_head_addr=$(grep " t start_first_256B$" .tmp_symbols.txt | cut -d' ' -f1)
if [ "$start_head_addr" != "$expected_start_head_addr" ]; then
- echo "ERROR: head code starts at $start_head_addr, should be $expected_start_head_addr"
- echo "ERROR: try to enable LD_HEAD_STUB_CATCH config option"
- echo "ERROR: see comments in arch/powerpc/tools/head_check.sh"
+ echo "ERROR: head code starts at $start_head_addr, should be $expected_start_head_addr" 1>&2
+ echo "ERROR: try to enable LD_HEAD_STUB_CATCH config option" 1>&2
+ echo "ERROR: see comments in arch/powerpc/tools/head_check.sh" 1>&2
exit 1
fi
-top_vma=$(echo $vma | cut -d'0' -f1)
+top_vma=$(echo "$vma" | cut -d'0' -f1)
-expected_start_text_addr=$(cat .tmp_symbols.txt | grep " a text_start$" | cut -d' ' -f1 | sed "s/^0/$top_vma/")
+expected_start_text_addr=$(grep " a text_start$" .tmp_symbols.txt | cut -d' ' -f1 | sed "s/^0/$top_vma/")
-start_text_addr=$(cat .tmp_symbols.txt | grep " t start_text$" | cut -d' ' -f1)
+start_text_addr=$(grep " t start_text$" .tmp_symbols.txt | cut -d' ' -f1)
if [ "$start_text_addr" != "$expected_start_text_addr" ]; then
- echo "ERROR: start_text address is $start_text_addr, should be $expected_start_text_addr"
- echo "ERROR: try to enable LD_HEAD_STUB_CATCH config option"
- echo "ERROR: see comments in arch/powerpc/tools/head_check.sh"
+ echo "ERROR: start_text address is $start_text_addr, should be $expected_start_text_addr" 1>&2
+ echo "ERROR: try to enable LD_HEAD_STUB_CATCH config option" 1>&2
+ echo "ERROR: see comments in arch/powerpc/tools/head_check.sh" 1>&2
exit 1
fi
diff --git a/arch/powerpc/tools/relocs_check.sh b/arch/powerpc/tools/relocs_check.sh
index 014e00e74d2b..63792af00417 100755
--- a/arch/powerpc/tools/relocs_check.sh
+++ b/arch/powerpc/tools/relocs_check.sh
@@ -39,6 +39,7 @@ $objdump -R "$vmlinux" |
# R_PPC_NONE
grep -F -w -v 'R_PPC64_RELATIVE
R_PPC64_NONE
+R_PPC64_UADDR64
R_PPC_ADDR16_LO
R_PPC_ADDR16_HI
R_PPC_ADDR16_HA
@@ -54,9 +55,3 @@ fi
num_bad=$(echo "$bad_relocs" | wc -l)
echo "WARNING: $num_bad bad relocations"
echo "$bad_relocs"
-
-# If we see this type of relocation it's an idication that
-# we /may/ be using an old version of binutils.
-if echo "$bad_relocs" | grep -q -F -w R_PPC64_UADDR64; then
- echo "WARNING: You need at least binutils >= 2.19 to build a CONFIG_RELOCATABLE kernel"
-fi
diff --git a/arch/powerpc/tools/unrel_branch_check.sh b/arch/powerpc/tools/unrel_branch_check.sh
index 77114755dc6f..8301efee1e6c 100755
--- a/arch/powerpc/tools/unrel_branch_check.sh
+++ b/arch/powerpc/tools/unrel_branch_check.sh
@@ -1,57 +1,79 @@
-# Copyright © 2016 IBM Corporation
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright © 2016,2020 IBM Corporation
#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version
-# 2 of the License, or (at your option) any later version.
-#
-# This script checks the relocations of a vmlinux for "suspicious"
-# branches from unrelocated code (head_64.S code).
-
-# Turn this on if you want more debug output:
-# set -x
+# This script checks the unrelocated code of a vmlinux for "suspicious"
+# branches to relocated code (head_64.S code).
-# Have Kbuild supply the path to objdump so we handle cross compilation.
+# Have Kbuild supply the path to objdump and nm so we handle cross compilation.
objdump="$1"
-vmlinux="$2"
-
-#__end_interrupts should be located within the first 64K
-
-end_intr=0x$(
-$objdump -R "$vmlinux" -d --start-address=0xc000000000000000 \
- --stop-address=0xc000000000010000 |
-grep '\<__end_interrupts>:' |
-awk '{print $1}'
-)
-
-BRANCHES=$(
-$objdump -R "$vmlinux" -D --start-address=0xc000000000000000 \
- --stop-address=${end_intr} |
-grep -e "^c[0-9a-f]*:[[:space:]]*\([0-9a-f][0-9a-f][[:space:]]\)\{4\}[[:space:]]*b" |
-grep -v '\<__start_initialization_multiplatform>' |
-grep -v -e 'b.\?.\?ctr' |
-grep -v -e 'b.\?.\?lr' |
-sed 's/://' |
-awk '{ print $1 ":" $6 ":0x" $7 ":" $8 " "}'
-)
-
-for tuple in $BRANCHES
-do
- from=`echo $tuple | cut -d':' -f1`
- branch=`echo $tuple | cut -d':' -f2`
- to=`echo $tuple | cut -d':' -f3 | sed 's/cr[0-7],//'`
- sym=`echo $tuple | cut -d':' -f4`
-
- if (( $to > $end_intr ))
- then
- if [ -z "$bad_branches" ]; then
- echo "WARNING: Unrelocated relative branches"
- bad_branches="yes"
+nm="$2"
+vmlinux="$3"
+
+kstart=0xc000000000000000
+
+end_intr=0x$($nm -p "$vmlinux" |
+ sed -E -n '/\s+[[:alpha:]]\s+__end_interrupts\s*$/{s///p;q}')
+if [ "$end_intr" = "0x" ]; then
+ exit 0
+fi
+
+# we know that there is a correct branch to
+# __start_initialization_multiplatform, so find its address
+# so we can exclude it.
+sim=0x$($nm -p "$vmlinux" |
+ sed -E -n '/\s+[[:alpha:]]\s+__start_initialization_multiplatform\s*$/{s///p;q}')
+
+$objdump -D --no-show-raw-insn --start-address="$kstart" --stop-address="$end_intr" "$vmlinux" |
+sed -E -n '
+# match lines that start with a kernel address
+/^c[0-9a-f]*:\s*b/ {
+ # drop branches via ctr or lr
+ /\<b.?.?(ct|l)r/d
+ # cope with some differences between Clang and GNU objdumps
+ s/\<bt.?\s*[[:digit:]]+,/beq/
+ s/\<bf.?\s*[[:digit:]]+,/bne/
+ # tidy up
+ s/\s0x/ /
+ s/://
+ # format for the loop below
+ s/^(\S+)\s+(\S+)\s+(\S+)\s*(\S*).*$/\1:\2:\3:\4/
+ # strip out condition registers
+ s/:cr[0-7],/:/
+ p
+}' | {
+
+all_good=true
+while IFS=: read -r from branch to sym; do
+ case "$to" in
+ c*) to="0x$to"
+ ;;
+ .+*)
+ to=${to#.+}
+ if [ "$branch" = 'b' ]; then
+ if (( to >= 0x2000000 )); then
+ to=$(( to - 0x4000000 ))
+ fi
+ elif (( to >= 0x8000 )); then
+ to=$(( to - 0x10000 ))
+ fi
+ printf -v to '0x%x' $(( "0x$from" + to ))
+ ;;
+ *) printf 'Unkown branch format\n'
+ ;;
+ esac
+ if [ "$to" = "$sim" ]; then
+ continue
+ fi
+ if (( to > end_intr )); then
+ if $all_good; then
+ printf '%s\n' 'WARNING: Unrelocated relative branches'
+ all_good=false
fi
- echo "$from $branch-> $to $sym"
+ printf '%s %s-> %s %s\n' "$from" "$branch" "$to" "$sym"
fi
done
-if [ -z "$bad_branches" ]; then
- exit 0
-fi
+$all_good
+
+}
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index c3842dbeb1b7..eb25d7554ffd 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -1,17 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for xmon
-# Avoid clang warnings around longjmp/setjmp declarations
-subdir-ccflags-y := -ffreestanding
-
GCOV_PROFILE := n
KCOV_INSTRUMENT := n
UBSAN_SANITIZE := n
KASAN_SANITIZE := n
# Disable ftrace for the entire directory
-ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
+ccflags-remove-$(CONFIG_FUNCTION_TRACER) += $(CC_FLAGS_FTRACE)
ifdef CONFIG_CC_IS_CLANG
# clang stores addresses on the stack causing the frame size to blow
@@ -21,7 +17,7 @@ endif
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y += xmon.o nonstdio.o spr_access.o
+obj-y += xmon.o nonstdio.o spr_access.o xmon_bpts.o
ifdef CONFIG_XMON_DISASSEMBLY
obj-y += ppc-dis.o ppc-opc.o
diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c
index 5c1a50912229..9b0d85bff021 100644
--- a/arch/powerpc/xmon/nonstdio.c
+++ b/arch/powerpc/xmon/nonstdio.c
@@ -178,7 +178,7 @@ void xmon_printf(const char *format, ...)
if (n && rc == 0) {
/* No udbg hooks, fallback to printk() - dangerous */
- printk("%s", xmon_outbuf);
+ pr_cont("%s", xmon_outbuf);
}
}
diff --git a/arch/powerpc/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c
index dfb80810b16c..0774d711453e 100644
--- a/arch/powerpc/xmon/ppc-opc.c
+++ b/arch/powerpc/xmon/ppc-opc.c
@@ -408,7 +408,7 @@ const struct powerpc_operand powerpc_operands[] =
#define FXM4 FXM + 1
{ 0xff, 12, insert_fxm, extract_fxm,
PPC_OPERAND_OPTIONAL | PPC_OPERAND_OPTIONAL_VALUE},
- /* If the FXM4 operand is ommitted, use the sentinel value -1. */
+ /* If the FXM4 operand is omitted, use the sentinel value -1. */
{ -1, -1, NULL, NULL, 0},
/* The IMM20 field in an LI instruction. */
diff --git a/arch/powerpc/xmon/ppc.h b/arch/powerpc/xmon/ppc.h
index d00f33dcf192..1d98b8dd134e 100644
--- a/arch/powerpc/xmon/ppc.h
+++ b/arch/powerpc/xmon/ppc.h
@@ -435,8 +435,6 @@ struct powerpc_macro
extern const struct powerpc_macro powerpc_macros[];
extern const int powerpc_num_macros;
-extern ppc_cpu_t ppc_parse_cpu (ppc_cpu_t, ppc_cpu_t *, const char *);
-
static inline long
ppc_optional_operand_value (const struct powerpc_operand *operand)
{
diff --git a/arch/powerpc/xmon/spr_access.S b/arch/powerpc/xmon/spr_access.S
index 720a52afdd58..c308ddf268fb 100644
--- a/arch/powerpc/xmon/spr_access.S
+++ b/arch/powerpc/xmon/spr_access.S
@@ -4,12 +4,12 @@
/* unsigned long xmon_mfspr(sprn, default_value) */
_GLOBAL(xmon_mfspr)
- PPC_LL r5, .Lmfspr_table@got(r2)
+ LOAD_REG_ADDR(r5, .Lmfspr_table)
b xmon_mxspr
/* void xmon_mtspr(sprn, new_value) */
_GLOBAL(xmon_mtspr)
- PPC_LL r5, .Lmtspr_table@got(r2)
+ LOAD_REG_ADDR(r5, .Lmtspr_table)
b xmon_mxspr
/*
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0ec9640335bb..f51c882bf902 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -26,16 +26,14 @@
#include <linux/ctype.h>
#include <linux/highmem.h>
#include <linux/security.h>
+#include <linux/debugfs.h>
-#include <asm/debugfs.h>
#include <asm/ptrace.h>
#include <asm/smp.h>
#include <asm/string.h>
-#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/xmon.h>
#include <asm/processor.h>
-#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/plpar_wrappers.h>
@@ -54,6 +52,8 @@
#include <asm/firmware.h>
#include <asm/code-patching.h>
#include <asm/sections.h>
+#include <asm/inst.h>
+#include <asm/interrupt.h>
#ifdef CONFIG_PPC64
#include <asm/hvcall.h>
@@ -62,12 +62,16 @@
#include "nonstdio.h"
#include "dis-asm.h"
+#include "xmon_bpts.h"
#ifdef CONFIG_SMP
static cpumask_t cpus_in_xmon = CPU_MASK_NONE;
static unsigned long xmon_taken = 1;
static int xmon_owner;
static int xmon_gate;
+static int xmon_batch;
+static unsigned long xmon_batch_start_cpu;
+static cpumask_t xmon_batch_cpus = CPU_MASK_NONE;
#else
#define xmon_owner 0
#endif /* CONFIG_SMP */
@@ -81,8 +85,9 @@ static bool xmon_is_ro = IS_ENABLED(CONFIG_XMON_DEFAULT_RO_MODE);
static unsigned long adrs;
static int size = 1;
-#define MAX_DUMP (128 * 1024)
+#define MAX_DUMP (64 * 1024)
static unsigned long ndump = 64;
+#define MAX_IDUMP (MAX_DUMP >> 2)
static unsigned long nidump = 16;
static unsigned long ncsum = 4096;
static int termch;
@@ -97,7 +102,7 @@ static long *xmon_fault_jmp[NR_CPUS];
/* Breakpoint stuff */
struct bpt {
unsigned long address;
- unsigned int instr[2];
+ u32 *instr;
atomic_t ref_count;
int enabled;
unsigned long pad;
@@ -108,11 +113,10 @@ struct bpt {
#define BP_TRAP 2
#define BP_DABR 4
-#define NBPTS 256
static struct bpt bpts[NBPTS];
-static struct bpt dabr;
+static struct bpt dabr[HBP_NUM_MAX];
static struct bpt *iabr;
-static unsigned bpinstr = 0x7fe00008; /* trap */
+static unsigned int bpinstr = PPC_RAW_TRAP();
#define BP_NUM(bp) ((bp) - bpts + 1)
@@ -120,6 +124,7 @@ static unsigned bpinstr = 0x7fe00008; /* trap */
static int cmds(struct pt_regs *);
static int mread(unsigned long, void *, int);
static int mwrite(unsigned long, void *, int);
+static int mread_instr(unsigned long, ppc_inst_t *);
static int handle_fault(struct pt_regs *);
static void byterev(unsigned char *, int);
static void memex(void);
@@ -130,6 +135,12 @@ static void prdump(unsigned long, long);
static int ppc_inst_dump(unsigned long, long, int);
static void dump_log_buf(void);
+#ifdef CONFIG_SMP
+static int xmon_switch_cpu(unsigned long);
+static int xmon_batch_next_cpu(void);
+static int batch_cmds(struct pt_regs *);
+#endif
+
#ifdef CONFIG_PPC_POWERNV
static void dump_opal_msglog(void);
#else
@@ -184,7 +195,7 @@ static int do_spu_cmd(void);
#ifdef CONFIG_44x
static void dump_tlb_44x(void);
#endif
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
static void dump_tlb_book3e(void);
#endif
@@ -213,7 +224,8 @@ Commands:\n\
#ifdef CONFIG_SMP
"\
c print cpus stopped in xmon\n\
- c# try to switch to cpu number h (in hex)\n"
+ c# try to switch to cpu number h (in hex)\n\
+ c# $ run command '$' (one of 'r','S' or 't') on all cpus in xmon\n"
#endif
"\
C checksum\n\
@@ -276,11 +288,11 @@ Commands:\n\
t print backtrace\n\
x exit monitor and recover\n\
X exit monitor and don't recover\n"
-#if defined(CONFIG_PPC64) && !defined(CONFIG_PPC_BOOK3E)
+#if defined(CONFIG_PPC_BOOK3S_64)
" u dump segment table or SLB\n"
#elif defined(CONFIG_PPC_BOOK3S_32)
" u dump segment registers\n"
-#elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E)
+#elif defined(CONFIG_44x) || defined(CONFIG_PPC_BOOK3E_64)
" u dump TLB\n"
#endif
" U show uptime information\n"
@@ -325,11 +337,6 @@ static inline void sync(void)
asm volatile("sync; isync");
}
-static inline void store_inst(void *p)
-{
- asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
-}
-
static inline void cflush(void *p)
{
asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
@@ -365,7 +372,7 @@ static void write_ciabr(unsigned long ciabr)
* set_ciabr() - set the CIABR
* @addr: The value to set.
*
- * This function sets the correct privilege value into the the HW
+ * This function sets the correct privilege value into the HW
* breakpoint address before writing it up in the CIABR register.
*/
static void set_ciabr(unsigned long addr)
@@ -474,20 +481,17 @@ static inline void get_output_lock(void) {}
static inline void release_output_lock(void) {}
#endif
-static inline int unrecoverable_excp(struct pt_regs *regs)
+static void xmon_touch_watchdogs(void)
{
-#if defined(CONFIG_4xx) || defined(CONFIG_PPC_BOOK3E)
- /* We have no MSR_RI bit on 4xx or Book3e, so we simply return false */
- return 0;
-#else
- return ((regs->msr & MSR_RI) == 0);
-#endif
+ touch_softlockup_watchdog_sync();
+ rcu_cpu_stall_reset();
+ touch_nmi_watchdog();
}
-static int xmon_core(struct pt_regs *regs, int fromipi)
+static int xmon_core(struct pt_regs *regs, volatile int fromipi)
{
- int cmd = 0;
- struct bpt *bp;
+ volatile int cmd = 0;
+ struct bpt *volatile bp;
long recurse_jmp[JMP_BUF_LEN];
bool locked_down;
unsigned long offset;
@@ -509,7 +513,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
}
@@ -550,7 +554,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
bp = NULL;
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
bp = at_breakpoint(regs->nip);
- if (bp || unrecoverable_excp(regs))
+ if (bp || regs_is_unrecoverable(regs))
fromipi = 0;
if (!fromipi) {
@@ -562,7 +566,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
cpu, BP_NUM(bp));
xmon_print_symbol(regs->nip, " ", ")\n");
}
- if (unrecoverable_excp(regs))
+ if (regs_is_unrecoverable(regs))
printf("WARNING: exception is not recoverable, "
"can't continue\n");
release_output_lock();
@@ -601,7 +605,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
* debugger break (IPI). This is similar to
* crash_kexec_secondary().
*/
- if (TRAP(regs) != 0x100 || !wait_for_other_cpus(ncpus))
+ if (TRAP(regs) != INTERRUPT_SYSTEM_RESET || !wait_for_other_cpus(ncpus))
smp_send_debugger_break();
wait_for_other_cpus(ncpus);
@@ -611,7 +615,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if (!locked_down) {
/* for breakpoint or single step, print curr insn */
- if (bp || TRAP(regs) == 0xd00)
+ if (bp || TRAP(regs) == INTERRUPT_TRACE)
ppc_inst_dump(regs->nip, 1, 0);
printf("enter ? for help\n");
}
@@ -639,7 +643,12 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
spin_cpu_relax();
touch_nmi_watchdog();
} else {
- if (!locked_down)
+ cmd = 1;
+#ifdef CONFIG_SMP
+ if (xmon_batch)
+ cmd = batch_cmds(regs);
+#endif
+ if (!locked_down && cmd)
cmd = cmds(regs);
if (locked_down || cmd != 0) {
/* exiting xmon */
@@ -673,14 +682,14 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
printf("Stopped at breakpoint %tx (", BP_NUM(bp));
xmon_print_symbol(regs->nip, " ", ")\n");
}
- if (unrecoverable_excp(regs))
+ if (regs_is_unrecoverable(regs))
printf("WARNING: exception is not recoverable, "
"can't continue\n");
remove_bpts();
disable_surveillance();
if (!locked_down) {
/* for breakpoint or single step, print current insn */
- if (bp || TRAP(regs) == 0xd00)
+ if (bp || TRAP(regs) == INTERRUPT_TRACE)
ppc_inst_dump(regs->nip, 1, 0);
printf("enter ? for help\n");
}
@@ -697,7 +706,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if (regs->msr & MSR_DE) {
bp = at_breakpoint(regs->nip);
if (bp != NULL) {
- regs->nip = (unsigned long) &bp->instr[0];
+ regs_set_return_ip(regs, (unsigned long) &bp->instr[0]);
atomic_inc(&bp->ref_count);
}
}
@@ -705,13 +714,13 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
bp = at_breakpoint(regs->nip);
if (bp != NULL) {
- int stepped = emulate_step(regs, bp->instr[0]);
+ int stepped = emulate_step(regs, ppc_inst_read(bp->instr));
if (stepped == 0) {
- regs->nip = (unsigned long) &bp->instr[0];
+ regs_set_return_ip(regs, (unsigned long) &bp->instr[0]);
atomic_inc(&bp->ref_count);
} else if (stepped < 0) {
printf("Couldn't single-step %s instruction\n",
- (IS_RFID(bp->instr[0])? "rfid": "mtmsrd"));
+ IS_RFID(ppc_inst_read(bp->instr))? "rfid": "mtmsrd");
}
}
}
@@ -721,7 +730,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
else
insert_cpu_bpts();
- touch_nmi_watchdog();
+ xmon_touch_watchdogs();
local_irq_restore(flags);
return cmd != 'X' && cmd != EOF;
@@ -760,8 +769,8 @@ static int xmon_bpt(struct pt_regs *regs)
/* Are we at the trap at bp->instr[1] for some bp? */
bp = in_breakpoint_table(regs->nip, &offset);
- if (bp != NULL && offset == 4) {
- regs->nip = bp->address + 4;
+ if (bp != NULL && (offset == 4 || offset == 8)) {
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
return 1;
}
@@ -786,10 +795,17 @@ static int xmon_sstep(struct pt_regs *regs)
static int xmon_break_match(struct pt_regs *regs)
{
+ int i;
+
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT))
return 0;
- if (dabr.enabled == 0)
- return 0;
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (dabr[i].enabled)
+ goto found;
+ }
+ return 0;
+
+found:
xmon_core(regs, 0);
return 1;
}
@@ -824,7 +840,7 @@ static int xmon_fault_handler(struct pt_regs *regs)
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) {
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
- regs->nip = bp->address + offset;
+ regs_set_return_ip(regs, bp->address + offset);
atomic_dec(&bp->ref_count);
}
}
@@ -845,7 +861,7 @@ static inline void force_enable_xmon(void)
static struct bpt *at_breakpoint(unsigned long pc)
{
int i;
- struct bpt *bp;
+ struct bpt *volatile bp;
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp)
@@ -858,15 +874,13 @@ static struct bpt *in_breakpoint_table(unsigned long nip, unsigned long *offp)
{
unsigned long off;
- off = nip - (unsigned long) bpts;
- if (off >= sizeof(bpts))
+ off = nip - (unsigned long)bpt_table;
+ if (off >= sizeof(bpt_table))
return NULL;
- off %= sizeof(struct bpt);
- if (off != offsetof(struct bpt, instr[0])
- && off != offsetof(struct bpt, instr[1]))
+ *offp = off & (BPT_SIZE - 1);
+ if (off & 3)
return NULL;
- *offp = off - offsetof(struct bpt, instr[0]);
- return (struct bpt *) (nip - off);
+ return bpts + (off / BPT_SIZE);
}
static struct bpt *new_breakpoint(unsigned long a)
@@ -881,8 +895,7 @@ static struct bpt *new_breakpoint(unsigned long a)
for (bp = bpts; bp < &bpts[NBPTS]; ++bp) {
if (!bp->enabled && atomic_read(&bp->ref_count) == 0) {
bp->address = a;
- bp->instr[1] = bpinstr;
- store_inst(&bp->instr[1]);
+ bp->instr = (void *)(bpt_table + ((bp - bpts) * BPT_WORDS));
return bp;
}
}
@@ -894,47 +907,76 @@ static struct bpt *new_breakpoint(unsigned long a)
static void insert_bpts(void)
{
int i;
- struct bpt *bp;
+ ppc_inst_t instr, instr2;
+ struct bpt *bp, *bp2;
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp) {
if ((bp->enabled & (BP_TRAP|BP_CIABR)) == 0)
continue;
- if (mread(bp->address, &bp->instr[0], 4) != 4) {
+ if (!mread_instr(bp->address, &instr)) {
printf("Couldn't read instruction at %lx, "
"disabling breakpoint there\n", bp->address);
bp->enabled = 0;
continue;
}
- if (IS_MTMSRD(bp->instr[0]) || IS_RFID(bp->instr[0])) {
- printf("Breakpoint at %lx is on an mtmsrd or rfid "
- "instruction, disabling it\n", bp->address);
+ if (!can_single_step(ppc_inst_val(instr))) {
+ printf("Breakpoint at %lx is on an instruction that can't be single stepped, disabling it\n",
+ bp->address);
+ bp->enabled = 0;
+ continue;
+ }
+ /*
+ * Check the address is not a suffix by looking for a prefix in
+ * front of it.
+ */
+ if (mread_instr(bp->address - 4, &instr2) == 8) {
+ printf("Breakpoint at %lx is on the second word of a prefixed instruction, disabling it\n",
+ bp->address);
+ bp->enabled = 0;
+ continue;
+ }
+ /*
+ * We might still be a suffix - if the prefix has already been
+ * replaced by a breakpoint we won't catch it with the above
+ * test.
+ */
+ bp2 = at_breakpoint(bp->address - 4);
+ if (bp2 && ppc_inst_prefixed(ppc_inst_read(bp2->instr))) {
+ printf("Breakpoint at %lx is on the second word of a prefixed instruction, disabling it\n",
+ bp->address);
bp->enabled = 0;
continue;
}
- store_inst(&bp->instr[0]);
+
+ patch_instruction(bp->instr, instr);
+ patch_instruction(ppc_inst_next(bp->instr, bp->instr),
+ ppc_inst(bpinstr));
if (bp->enabled & BP_CIABR)
continue;
- if (patch_instruction((unsigned int *)bp->address,
- bpinstr) != 0) {
+ if (patch_instruction((u32 *)bp->address,
+ ppc_inst(bpinstr)) != 0) {
printf("Couldn't write instruction at %lx, "
"disabling breakpoint there\n", bp->address);
bp->enabled &= ~BP_TRAP;
continue;
}
- store_inst((void *)bp->address);
}
}
static void insert_cpu_bpts(void)
{
+ int i;
struct arch_hw_breakpoint brk;
- if (dabr.enabled) {
- brk.address = dabr.address;
- brk.type = (dabr.enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
- brk.len = DABR_MAX_LEN;
- __set_breakpoint(&brk);
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (dabr[i].enabled) {
+ brk.address = dabr[i].address;
+ brk.type = (dabr[i].enabled & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
+ brk.len = 8;
+ brk.hw_len = 8;
+ __set_breakpoint(i, &brk);
+ }
}
if (iabr)
@@ -945,20 +987,18 @@ static void remove_bpts(void)
{
int i;
struct bpt *bp;
- unsigned instr;
+ ppc_inst_t instr;
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp) {
if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP)
continue;
- if (mread(bp->address, &instr, 4) == 4
- && instr == bpinstr
+ if (mread_instr(bp->address, &instr)
+ && ppc_inst_equal(instr, ppc_inst(bpinstr))
&& patch_instruction(
- (unsigned int *)bp->address, bp->instr[0]) != 0)
+ (u32 *)bp->address, ppc_inst_read(bp->instr)) != 0)
printf("Couldn't remove breakpoint at %lx\n",
bp->address);
- else
- store_inst((void *)bp->address);
}
}
@@ -1118,7 +1158,7 @@ cmds(struct pt_regs *excp)
case 'P':
show_tasks();
break;
-#ifdef CONFIG_PPC_BOOK3S
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_64S_HASH_MMU)
case 'u':
dump_segments();
break;
@@ -1126,7 +1166,7 @@ cmds(struct pt_regs *excp)
case 'u':
dump_tlb_44x();
break;
-#elif defined(CONFIG_PPC_BOOK3E)
+#elif defined(CONFIG_PPC_BOOK3E_64)
case 'u':
dump_tlb_book3e();
break;
@@ -1152,7 +1192,7 @@ cmds(struct pt_regs *excp)
#ifdef CONFIG_BOOKE
static int do_step(struct pt_regs *regs)
{
- regs->msr |= MSR_DE;
+ regs_set_return_msr(regs, regs->msr | MSR_DE);
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
return 1;
}
@@ -1163,13 +1203,13 @@ static int do_step(struct pt_regs *regs)
*/
static int do_step(struct pt_regs *regs)
{
- unsigned int instr;
+ ppc_inst_t instr;
int stepped;
force_enable_xmon();
/* check we are in 64-bit kernel mode, translation enabled */
if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) {
- if (mread(regs->nip, &instr, 4) == 4) {
+ if (mread_instr(regs->nip, &instr)) {
stepped = emulate_step(regs, instr);
if (stepped < 0) {
printf("Couldn't single-step %s instruction\n",
@@ -1177,7 +1217,7 @@ static int do_step(struct pt_regs *regs)
return 0;
}
if (stepped > 0) {
- regs->trap = 0xd00 | (regs->trap & 1);
+ set_trap(regs, 0xd00);
printf("stepped to ");
xmon_print_symbol(regs->nip, " ", "\n");
ppc_inst_dump(regs->nip, 1, 0);
@@ -1185,7 +1225,7 @@ static int do_step(struct pt_regs *regs)
}
}
}
- regs->msr |= MSR_SE;
+ regs_set_return_msr(regs, regs->msr | MSR_SE);
return 1;
}
#endif
@@ -1202,16 +1242,116 @@ static void bootcmds(void)
} else if (cmd == 'h') {
ppc_md.halt();
} else if (cmd == 'p') {
- if (pm_power_off)
- pm_power_off();
+ do_kernel_power_off();
}
}
+#ifdef CONFIG_SMP
+static int xmon_switch_cpu(unsigned long cpu)
+{
+ int timeout;
+
+ xmon_taken = 0;
+ mb();
+ xmon_owner = cpu;
+ timeout = 10000000;
+ while (!xmon_taken) {
+ if (--timeout == 0) {
+ if (test_and_set_bit(0, &xmon_taken))
+ break;
+ /* take control back */
+ mb();
+ xmon_owner = smp_processor_id();
+ printf("cpu 0x%lx didn't take control\n", cpu);
+ return 0;
+ }
+ barrier();
+ }
+ return 1;
+}
+
+static int xmon_batch_next_cpu(void)
+{
+ unsigned long cpu;
+
+ while (!cpumask_empty(&xmon_batch_cpus)) {
+ cpu = cpumask_next_wrap(smp_processor_id(), &xmon_batch_cpus,
+ xmon_batch_start_cpu, true);
+ if (cpu == nr_cpumask_bits)
+ break;
+ if (xmon_batch_start_cpu == -1)
+ xmon_batch_start_cpu = cpu;
+ if (xmon_switch_cpu(cpu))
+ return 0;
+ cpumask_clear_cpu(cpu, &xmon_batch_cpus);
+ }
+
+ xmon_batch = 0;
+ printf("%x:mon> \n", smp_processor_id());
+ return 1;
+}
+
+static int batch_cmds(struct pt_regs *excp)
+{
+ int cmd;
+
+ /* simulate command entry */
+ cmd = xmon_batch;
+ termch = '\n';
+
+ last_cmd = NULL;
+ xmon_regs = excp;
+
+ printf("%x:", smp_processor_id());
+ printf("mon> ");
+ printf("%c\n", (char)cmd);
+
+ switch (cmd) {
+ case 'r':
+ prregs(excp); /* print regs */
+ break;
+ case 'S':
+ super_regs();
+ break;
+ case 't':
+ backtrace(excp);
+ break;
+ }
+
+ cpumask_clear_cpu(smp_processor_id(), &xmon_batch_cpus);
+
+ return xmon_batch_next_cpu();
+}
+
static int cpu_cmd(void)
{
-#ifdef CONFIG_SMP
unsigned long cpu, first_cpu, last_cpu;
- int timeout;
+
+ cpu = skipbl();
+ if (cpu == '#') {
+ xmon_batch = skipbl();
+ if (xmon_batch) {
+ switch (xmon_batch) {
+ case 'r':
+ case 'S':
+ case 't':
+ cpumask_copy(&xmon_batch_cpus, &cpus_in_xmon);
+ if (cpumask_weight(&xmon_batch_cpus) <= 1) {
+ printf("There are no other cpus in xmon\n");
+ break;
+ }
+ xmon_batch_start_cpu = -1;
+ if (!xmon_batch_next_cpu())
+ return 1;
+ break;
+ default:
+ printf("c# only supports 'r', 'S' and 't' commands\n");
+ }
+ xmon_batch = 0;
+ return 0;
+ }
+ }
+ termch = cpu;
if (!scanhex(&cpu)) {
/* print cpus waiting or in xmon */
@@ -1243,27 +1383,15 @@ static int cpu_cmd(void)
#endif
return 0;
}
- xmon_taken = 0;
- mb();
- xmon_owner = cpu;
- timeout = 10000000;
- while (!xmon_taken) {
- if (--timeout == 0) {
- if (test_and_set_bit(0, &xmon_taken))
- break;
- /* take control back */
- mb();
- xmon_owner = smp_processor_id();
- printf("cpu 0x%lx didn't take control\n", cpu);
- return 0;
- }
- barrier();
- }
- return 1;
+
+ return xmon_switch_cpu(cpu);
+}
#else
+static int cpu_cmd(void)
+{
return 0;
-#endif /* CONFIG_SMP */
}
+#endif /* CONFIG_SMP */
static unsigned short fcstab[256] = {
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
@@ -1329,25 +1457,53 @@ csum(void)
*/
static long check_bp_loc(unsigned long addr)
{
- unsigned int instr;
+ ppc_inst_t instr;
addr &= ~3;
if (!is_kernel_addr(addr)) {
printf("Breakpoints may only be placed at kernel addresses\n");
return 0;
}
- if (!mread(addr, &instr, sizeof(instr))) {
+ if (!mread_instr(addr, &instr)) {
printf("Can't read instruction at address %lx\n", addr);
return 0;
}
- if (IS_MTMSRD(instr) || IS_RFID(instr)) {
- printf("Breakpoints may not be placed on mtmsrd or rfid "
- "instructions\n");
+ if (!can_single_step(ppc_inst_val(instr))) {
+ printf("Breakpoints may not be placed on instructions that can't be single stepped\n");
return 0;
}
return 1;
}
+static int find_free_data_bpt(void)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!dabr[i].enabled)
+ return i;
+ }
+ printf("Couldn't find free breakpoint register\n");
+ return -1;
+}
+
+static void print_data_bpts(void)
+{
+ int i;
+
+ for (i = 0; i < nr_wp_slots(); i++) {
+ if (!dabr[i].enabled)
+ continue;
+
+ printf(" data "REG" [", dabr[i].address);
+ if (dabr[i].enabled & 1)
+ printf("r");
+ if (dabr[i].enabled & 2)
+ printf("w");
+ printf("]\n");
+ }
+}
+
static char *breakpoint_help_string =
"Breakpoint command usage:\n"
"b show breakpoints\n"
@@ -1369,7 +1525,6 @@ bpt_cmds(void)
cmd = inchar();
switch (cmd) {
-#ifndef CONFIG_PPC_8xx
static const char badaddr[] = "Only kernel addresses are permitted for breakpoints\n";
int mode;
case 'd': /* bd - hardware data breakpoint */
@@ -1381,6 +1536,9 @@ bpt_cmds(void)
printf("Hardware data breakpoint not supported on this cpu\n");
break;
}
+ i = find_free_data_bpt();
+ if (i < 0)
+ break;
mode = 7;
cmd = inchar();
if (cmd == 'r')
@@ -1389,15 +1547,15 @@ bpt_cmds(void)
mode = 6;
else
termch = cmd;
- dabr.address = 0;
- dabr.enabled = 0;
- if (scanhex(&dabr.address)) {
- if (!is_kernel_addr(dabr.address)) {
+ dabr[i].address = 0;
+ dabr[i].enabled = 0;
+ if (scanhex(&dabr[i].address)) {
+ if (!is_kernel_addr(dabr[i].address)) {
printf(badaddr);
break;
}
- dabr.address &= ~HW_BRK_TYPE_DABR;
- dabr.enabled = mode | BP_DABR;
+ dabr[i].address &= ~HW_BRK_TYPE_DABR;
+ dabr[i].enabled = mode | BP_DABR;
}
force_enable_xmon();
@@ -1428,7 +1586,6 @@ bpt_cmds(void)
force_enable_xmon();
}
break;
-#endif
case 'c':
if (!scanhex(&a)) {
@@ -1436,7 +1593,9 @@ bpt_cmds(void)
for (i = 0; i < NBPTS; ++i)
bpts[i].enabled = 0;
iabr = NULL;
- dabr.enabled = 0;
+ for (i = 0; i < nr_wp_slots(); i++)
+ dabr[i].enabled = 0;
+
printf("All breakpoints cleared\n");
break;
}
@@ -1470,14 +1629,7 @@ bpt_cmds(void)
if (xmon_is_ro || !scanhex(&a)) {
/* print all breakpoints */
printf(" type address\n");
- if (dabr.enabled) {
- printf(" data "REG" [", dabr.address);
- if (dabr.enabled & 1)
- printf("r");
- if (dabr.enabled & 2)
- printf("w");
- printf("]\n");
- }
+ print_data_bpts();
for (bp = bpts; bp < &bpts[NBPTS]; ++bp) {
if (!bp->enabled)
continue;
@@ -1539,6 +1691,7 @@ const char *getvecname(unsigned long vec)
case 0x1300: ret = "(Instruction Breakpoint)"; break;
case 0x1500: ret = "(Denormalisation)"; break;
case 0x1700: ret = "(Altivec Assist)"; break;
+ case 0x3000: ret = "(System Call Vectored)"; break;
default: ret = "";
}
return ret;
@@ -1682,9 +1835,9 @@ static void print_bug_trap(struct pt_regs *regs)
#ifdef CONFIG_DEBUG_BUGVERBOSE
printf("kernel BUG at %s:%u!\n",
- bug->file, bug->line);
+ (char *)bug + bug->file_disp, bug->line);
#else
- printf("kernel BUG at %px!\n", (void *)bug->bug_addr);
+ printf("kernel BUG at %px!\n", (void *)bug + bug->bug_addr_disp);
#endif
#endif /* CONFIG_BUG */
}
@@ -1708,9 +1861,12 @@ static void excprint(struct pt_regs *fp)
printf(" sp: %lx\n", fp->gpr[1]);
printf(" msr: %lx\n", fp->msr);
- if (trap == 0x300 || trap == 0x380 || trap == 0x600 || trap == 0x200) {
+ if (trap == INTERRUPT_DATA_STORAGE ||
+ trap == INTERRUPT_DATA_SEGMENT ||
+ trap == INTERRUPT_ALIGNMENT ||
+ trap == INTERRUPT_MACHINE_CHECK) {
printf(" dar: %lx\n", fp->dar);
- if (trap != 0x380)
+ if (trap != INTERRUPT_DATA_SEGMENT)
printf(" dsisr: %lx\n", fp->dsisr);
}
@@ -1724,7 +1880,7 @@ static void excprint(struct pt_regs *fp)
current->pid, current->comm);
}
- if (trap == 0x700)
+ if (trap == INTERRUPT_PROGRAM)
print_bug_trap(fp);
printf(linux_banner);
@@ -1754,28 +1910,19 @@ static void prregs(struct pt_regs *fp)
}
#ifdef CONFIG_PPC64
- if (FULL_REGS(fp)) {
- for (n = 0; n < 16; ++n)
- printf("R%.2d = "REG" R%.2d = "REG"\n",
- n, fp->gpr[n], n+16, fp->gpr[n+16]);
- } else {
- for (n = 0; n < 7; ++n)
- printf("R%.2d = "REG" R%.2d = "REG"\n",
- n, fp->gpr[n], n+7, fp->gpr[n+7]);
- }
+#define R_PER_LINE 2
#else
+#define R_PER_LINE 4
+#endif
+
for (n = 0; n < 32; ++n) {
- printf("R%.2d = %.8lx%s", n, fp->gpr[n],
- (n & 3) == 3? "\n": " ");
- if (n == 12 && !FULL_REGS(fp)) {
- printf("\n");
- break;
- }
+ printf("R%.2d = "REG"%s", n, fp->gpr[n],
+ (n % R_PER_LINE) == R_PER_LINE - 1 ? "\n" : " ");
}
-#endif
+
printf("pc = ");
xmon_print_symbol(fp->nip, " ", "\n");
- if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) {
+ if (!trap_is_syscall(fp) && cpu_has_feature(CPU_FTR_CFAR)) {
printf("cfar= ");
xmon_print_symbol(fp->orig_gpr3, " ", "\n");
}
@@ -1785,7 +1932,9 @@ static void prregs(struct pt_regs *fp)
printf("ctr = "REG" xer = "REG" trap = %4lx\n",
fp->ctr, fp->xer, fp->trap);
trap = TRAP(fp);
- if (trap == 0x300 || trap == 0x380 || trap == 0x600)
+ if (trap == INTERRUPT_DATA_STORAGE ||
+ trap == INTERRUPT_DATA_SEGMENT ||
+ trap == INTERRUPT_ALIGNMENT)
printf("dar = "REG" dsisr = %.8lx\n", fp->dar, fp->dsisr);
}
@@ -1807,7 +1956,7 @@ static void cacheflush(void)
catch_memory_errors = 1;
sync();
- if (cmd != 'i') {
+ if (cmd != 'i' || IS_ENABLED(CONFIG_PPC_BOOK3S_64)) {
for (; nflush > 0; --nflush, adrs += L1_CACHE_BYTES)
cflush((void *) adrs);
} else {
@@ -1872,7 +2021,7 @@ static void dump_206_sprs(void)
if (!cpu_has_feature(CPU_FTR_ARCH_206))
return;
- /* Actually some of these pre-date 2.06, but whatevs */
+ /* Actually some of these pre-date 2.06, but whatever */
printf("srr0 = %.16lx srr1 = %.16lx dsisr = %.8lx\n",
mfspr(SPRN_SRR0), mfspr(SPRN_SRR1), mfspr(SPRN_DSISR));
@@ -1937,8 +2086,13 @@ static void dump_207_sprs(void)
printf("hfscr = %.16lx dhdes = %.16lx rpr = %.16lx\n",
mfspr(SPRN_HFSCR), mfspr(SPRN_DHDES), mfspr(SPRN_RPR));
- printf("dawr = %.16lx dawrx = %.16lx ciabr = %.16lx\n",
- mfspr(SPRN_DAWR), mfspr(SPRN_DAWRX), mfspr(SPRN_CIABR));
+ printf("dawr0 = %.16lx dawrx0 = %.16lx\n",
+ mfspr(SPRN_DAWR0), mfspr(SPRN_DAWRX0));
+ if (nr_wp_slots() > 1) {
+ printf("dawr1 = %.16lx dawrx1 = %.16lx\n",
+ mfspr(SPRN_DAWR1), mfspr(SPRN_DAWRX1));
+ }
+ printf("ciabr = %.16lx\n", mfspr(SPRN_CIABR));
#endif
}
@@ -1950,8 +2104,14 @@ static void dump_300_sprs(void)
if (!cpu_has_feature(CPU_FTR_ARCH_300))
return;
- printf("pidr = %.16lx tidr = %.16lx\n",
- mfspr(SPRN_PID), mfspr(SPRN_TIDR));
+ if (cpu_has_feature(CPU_FTR_P9_TIDR)) {
+ printf("pidr = %.16lx tidr = %.16lx\n",
+ mfspr(SPRN_PID), mfspr(SPRN_TIDR));
+ } else {
+ printf("pidr = %.16lx\n",
+ mfspr(SPRN_PID));
+ }
+
printf("psscr = %.16lx\n",
hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR));
@@ -1963,6 +2123,18 @@ static void dump_300_sprs(void)
#endif
}
+static void dump_310_sprs(void)
+{
+#ifdef CONFIG_PPC64
+ if (!cpu_has_feature(CPU_FTR_ARCH_31))
+ return;
+
+ printf("mmcr3 = %.16lx, sier2 = %.16lx, sier3 = %.16lx\n",
+ mfspr(SPRN_MMCR3), mfspr(SPRN_SIER2), mfspr(SPRN_SIER3));
+
+#endif
+}
+
static void dump_one_spr(int spr, bool show_unimplemented)
{
unsigned long val;
@@ -2017,6 +2189,7 @@ static void super_regs(void)
dump_206_sprs();
dump_207_sprs();
dump_300_sprs();
+ dump_310_sprs();
return;
}
@@ -2129,6 +2302,25 @@ mwrite(unsigned long adrs, void *buf, int size)
return n;
}
+static int
+mread_instr(unsigned long adrs, ppc_inst_t *instr)
+{
+ volatile int n;
+
+ n = 0;
+ if (setjmp(bus_error_jmp) == 0) {
+ catch_memory_errors = 1;
+ sync();
+ *instr = ppc_inst_read((u32 *)adrs);
+ sync();
+ /* wait a little while to see if we get a machine check */
+ __delay(200);
+ n = ppc_inst_len(*instr);
+ }
+ catch_memory_errors = 0;
+ return n;
+}
+
static int fault_type;
static int fault_except;
static char *fault_chars[] = { "--", "**", "##" };
@@ -2419,7 +2611,7 @@ static void dump_tracing(void)
static void dump_one_paca(int cpu)
{
struct paca_struct *p;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
int i = 0;
#endif
@@ -2461,6 +2653,7 @@ static void dump_one_paca(int cpu)
DUMP(p, cpu_start, "%#-*x");
DUMP(p, kexec_state, "%#-*x");
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (!early_radix_enabled()) {
for (i = 0; i < SLB_NUM_BOLTED; i++) {
u64 esid, vsid;
@@ -2488,11 +2681,12 @@ static void dump_one_paca(int cpu)
22, "slb_cache", i, p->slb_cache[i]);
}
}
+#endif
DUMP(p, rfi_flush_fallback_area, "%-*px");
#endif
DUMP(p, dscr_default, "%#-*llx");
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
DUMP(p, pgd, "%-*px");
DUMP(p, kernel_pgd, "%-*px");
DUMP(p, tcd_ptr, "%-*px");
@@ -2507,7 +2701,7 @@ static void dump_one_paca(int cpu)
DUMP(p, canary, "%#-*lx");
#endif
DUMP(p, saved_r1, "%#-*llx");
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
DUMP(p, trap_save, "%#-*x");
#endif
DUMP(p, irq_soft_mask, "%#-*x");
@@ -2620,39 +2814,15 @@ static void dump_all_xives(void)
{
int cpu;
- if (num_possible_cpus() == 0) {
+ if (num_online_cpus() == 0) {
printf("No possible cpus, use 'dx #' to dump individual cpus\n");
return;
}
- for_each_possible_cpu(cpu)
+ for_each_online_cpu(cpu)
dump_one_xive(cpu);
}
-static void dump_one_xive_irq(u32 num, struct irq_data *d)
-{
- xmon_xive_get_irq_config(num, d);
-}
-
-static void dump_all_xive_irq(void)
-{
- unsigned int i;
- struct irq_desc *desc;
-
- for_each_irq_desc(i, desc) {
- struct irq_data *d = irq_desc_get_irq_data(desc);
- unsigned int hwirq;
-
- if (!d)
- continue;
-
- hwirq = (unsigned int)irqd_to_hwirq(d);
- /* IPIs are special (HW number 0) */
- if (hwirq)
- dump_one_xive_irq(hwirq, d);
- }
-}
-
static void dump_xives(void)
{
unsigned long num;
@@ -2669,9 +2839,9 @@ static void dump_xives(void)
return;
} else if (c == 'i') {
if (scanhex(&num))
- dump_one_xive_irq(num, NULL);
+ xmon_xive_get_irq_config(num, NULL);
else
- dump_all_xive_irq();
+ xmon_xive_get_irq_all();
return;
}
@@ -2712,7 +2882,12 @@ static void dump_by_size(unsigned long addr, long count, int size)
printf("%0*llx", size * 2, val);
}
- printf("\n");
+ printf(" |");
+ for (j = 0; j < 16; ++j) {
+ val = temp[j];
+ putchar(' ' <= val && val <= '~' ? val : '.');
+ }
+ printf("|\n");
}
}
@@ -2756,8 +2931,8 @@ dump(void)
scanhex(&nidump);
if (nidump == 0)
nidump = 16;
- else if (nidump > MAX_DUMP)
- nidump = MAX_DUMP;
+ else if (nidump > MAX_IDUMP)
+ nidump = MAX_IDUMP;
adrs += ppc_inst_dump(adrs, nidump, 1);
last_cmd = "di\n";
} else if (c == 'l') {
@@ -2850,12 +3025,11 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
{
int nr, dotted;
unsigned long first_adr;
- unsigned int inst, last_inst = 0;
- unsigned char val[4];
+ ppc_inst_t inst, last_inst = ppc_inst(0);
dotted = 0;
- for (first_adr = adr; count > 0; --count, adr += 4) {
- nr = mread(adr, val, 4);
+ for (first_adr = adr; count > 0; --count, adr += ppc_inst_len(inst)) {
+ nr = mread_instr(adr, &inst);
if (nr == 0) {
if (praddr) {
const char *x = fault_chars[fault_type];
@@ -2863,8 +3037,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
}
break;
}
- inst = GETWORD(val);
- if (adr > first_adr && inst == last_inst) {
+ if (adr > first_adr && ppc_inst_equal(inst, last_inst)) {
if (!dotted) {
printf(" ...\n");
dotted = 1;
@@ -2874,9 +3047,12 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
dotted = 0;
last_inst = inst;
if (praddr)
- printf(REG" %.8x", adr, inst);
+ printf(REG" %08lx", adr, ppc_inst_as_ulong(inst));
printf("\t");
- dump_func(inst, adr);
+ if (!ppc_inst_prefixed(inst))
+ dump_func(ppc_inst_val(inst), adr);
+ else
+ dump_func(ppc_inst_as_ulong(inst), adr);
printf("\n");
}
return adr - first_adr;
@@ -2897,8 +3073,8 @@ print_address(unsigned long addr)
static void
dump_log_buf(void)
{
- struct kmsg_dumper dumper = { .active = 1 };
- unsigned char buf[128];
+ struct kmsg_dump_iter iter;
+ static unsigned char buf[1024];
size_t len;
if (setjmp(bus_error_jmp) != 0) {
@@ -2909,9 +3085,9 @@ dump_log_buf(void)
catch_memory_errors = 1;
sync();
- kmsg_dump_rewind_nolock(&dumper);
+ kmsg_dump_rewind(&iter);
xmon_start_pagination();
- while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) {
+ while (kmsg_dump_get_line(&iter, false, buf, sizeof(buf), &len)) {
buf[len] = '\0';
printf("%s", buf);
}
@@ -2928,7 +3104,7 @@ static void dump_opal_msglog(void)
{
unsigned char buf[128];
ssize_t res;
- loff_t pos = 0;
+ volatile loff_t pos = 0;
if (!firmware_has_feature(FW_FEATURE_OPAL)) {
printf("Machine is not running OPAL firmware.\n");
@@ -3083,8 +3259,9 @@ memzcan(void)
printf("%.8lx\n", a - mskip);
}
-static void show_task(struct task_struct *tsk)
+static void show_task(struct task_struct *volatile tsk)
{
+ unsigned int p_state = READ_ONCE(tsk->__state);
char state;
/*
@@ -3092,17 +3269,16 @@ static void show_task(struct task_struct *tsk)
* appropriate for calling from xmon. This could be moved
* to a common, generic, routine used by both.
*/
- state = (tsk->state == 0) ? 'R' :
- (tsk->state < 0) ? 'U' :
- (tsk->state & TASK_UNINTERRUPTIBLE) ? 'D' :
- (tsk->state & TASK_STOPPED) ? 'T' :
- (tsk->state & TASK_TRACED) ? 'C' :
+ state = (p_state == TASK_RUNNING) ? 'R' :
+ (p_state & TASK_UNINTERRUPTIBLE) ? 'D' :
+ (p_state & TASK_STOPPED) ? 'T' :
+ (p_state & TASK_TRACED) ? 'C' :
(tsk->exit_state & EXIT_ZOMBIE) ? 'Z' :
(tsk->exit_state & EXIT_DEAD) ? 'E' :
- (tsk->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
+ (p_state & TASK_INTERRUPTIBLE) ? 'S' : '?';
- printf("%px %016lx %6d %6d %c %2d %s\n", tsk,
- tsk->thread.ksp,
+ printf("%16px %16lx %16px %6d %6d %c %2d %s\n", tsk,
+ tsk->thread.ksp, tsk->thread.regs,
tsk->pid, rcu_dereference(tsk->parent)->pid,
state, task_cpu(tsk),
tsk->comm);
@@ -3127,9 +3303,10 @@ static void format_pte(void *ptep, unsigned long pte)
static void show_pte(unsigned long addr)
{
unsigned long tskv = 0;
- struct task_struct *tsk = NULL;
+ struct task_struct *volatile tsk = NULL;
struct mm_struct *mm;
- pgd_t *pgdp, *pgdir;
+ pgd_t *pgdp;
+ p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
@@ -3153,28 +3330,26 @@ static void show_pte(unsigned long addr)
catch_memory_errors = 1;
sync();
- if (mm == &init_mm) {
+ if (mm == &init_mm)
pgdp = pgd_offset_k(addr);
- pgdir = pgd_offset_k(0);
- } else {
+ else
pgdp = pgd_offset(mm, addr);
- pgdir = pgd_offset(mm, 0);
- }
- if (pgd_none(*pgdp)) {
- printf("no linux page table for address\n");
+ p4dp = p4d_offset(pgdp, addr);
+
+ if (p4d_none(*p4dp)) {
+ printf("No valid P4D\n");
return;
}
- printf("pgd @ 0x%px\n", pgdir);
-
- if (pgd_is_leaf(*pgdp)) {
- format_pte(pgdp, pgd_val(*pgdp));
+ if (p4d_is_leaf(*p4dp)) {
+ format_pte(p4dp, p4d_val(*p4dp));
return;
}
- printf("pgdp @ 0x%px = 0x%016lx\n", pgdp, pgd_val(*pgdp));
- pudp = pud_offset(pgdp, addr);
+ printf("p4dp @ 0x%px = 0x%016lx\n", p4dp, p4d_val(*p4dp));
+
+ pudp = pud_offset(p4dp, addr);
if (pud_none(*pudp)) {
printf("No valid PUD\n");
@@ -3223,9 +3398,9 @@ static void show_pte(unsigned long addr)
static void show_tasks(void)
{
unsigned long tskv;
- struct task_struct *tsk = NULL;
+ struct task_struct *volatile tsk = NULL;
- printf(" task_struct ->thread.ksp PID PPID S P CMD\n");
+ printf(" task_struct ->thread.ksp ->thread.regs PID PPID S P CMD\n");
if (scanhex(&tskv))
tsk = (struct task_struct *)tskv;
@@ -3546,7 +3721,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
const char *after)
{
char *modname;
- const char *name = NULL;
+ const char *volatile name = NULL;
unsigned long offset, size;
printf(REG, address);
@@ -3570,7 +3745,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
printf("%s", after);
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
void dump_segments(void)
{
int i;
@@ -3616,7 +3791,7 @@ void dump_segments(void)
printf("sr0-15 =");
for (i = 0; i < 16; ++i)
- printf(" %x", mfsrin(i << 28));
+ printf(" %x", mfsr(i << 28));
printf("\n");
}
#endif
@@ -3648,7 +3823,7 @@ static void dump_tlb_44x(void)
}
#endif /* CONFIG_44x */
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
static void dump_tlb_book3e(void)
{
u32 mmucfg, pidmask, lpidmask;
@@ -3790,7 +3965,7 @@ static void dump_tlb_book3e(void)
}
}
}
-#endif /* CONFIG_PPC_BOOK3E */
+#endif /* CONFIG_PPC_BOOK3E_64 */
static void xmon_init(int enable)
{
@@ -3836,7 +4011,7 @@ static void sysrq_handle_xmon(int key)
xmon_init(0);
}
-static struct sysrq_key_op sysrq_xmon_op = {
+static const struct sysrq_key_op sysrq_xmon_op = {
.handler = sysrq_handle_xmon,
.help_msg = "xmon(x)",
.action_msg = "Entering xmon",
@@ -3863,10 +4038,9 @@ static void clear_all_bpt(void)
bpts[i].enabled = 0;
/* Clear any data or iabr breakpoints */
- if (iabr || dabr.enabled) {
- iabr = NULL;
- dabr.enabled = 0;
- }
+ iabr = NULL;
+ for (i = 0; i < nr_wp_slots(); i++)
+ dabr[i].enabled = 0;
}
#ifdef CONFIG_DEBUG_FS
@@ -3897,8 +4071,8 @@ DEFINE_SIMPLE_ATTRIBUTE(xmon_dbgfs_ops, xmon_dbgfs_get,
static int __init setup_xmon_dbgfs(void)
{
- debugfs_create_file("xmon", 0600, powerpc_debugfs_root, NULL,
- &xmon_dbgfs_ops);
+ debugfs_create_file("xmon", 0600, arch_debugfs_dir, NULL,
+ &xmon_dbgfs_ops);
return 0;
}
device_initcall(setup_xmon_dbgfs);
@@ -3959,7 +4133,7 @@ struct spu_info {
static struct spu_info spu_info[XMON_NUM_SPUS];
-void xmon_register_spus(struct list_head *list)
+void __init xmon_register_spus(struct list_head *list)
{
struct spu *spu;
@@ -3979,7 +4153,7 @@ void xmon_register_spus(struct list_head *list)
static void stop_spus(void)
{
struct spu *spu;
- int i;
+ volatile int i;
u64 tmp;
for (i = 0; i < XMON_NUM_SPUS; i++) {
@@ -4020,7 +4194,7 @@ static void stop_spus(void)
static void restart_spus(void)
{
struct spu *spu;
- int i;
+ volatile int i;
for (i = 0; i < XMON_NUM_SPUS; i++) {
if (!spu_info[i].spu)
@@ -4110,8 +4284,7 @@ static void dump_spu_fields(struct spu *spu)
DUMP_FIELD(spu, "0x%p", pdata);
}
-int
-spu_inst_dump(unsigned long adr, long count, int praddr)
+static int spu_inst_dump(unsigned long adr, long count, int praddr)
{
return generic_inst_dump(adr, count, praddr, print_insn_spu);
}
@@ -4175,7 +4348,7 @@ static int do_spu_cmd(void)
subcmd = inchar();
if (isxdigit(subcmd) || subcmd == '\n')
termch = subcmd;
- /* fall through */
+ fallthrough;
case 'f':
scanhex(&num);
if (num >= XMON_NUM_SPUS || !spu_info[num].spu) {
diff --git a/arch/powerpc/xmon/xmon_bpts.S b/arch/powerpc/xmon/xmon_bpts.S
new file mode 100644
index 000000000000..69726814cd27
--- /dev/null
+++ b/arch/powerpc/xmon/xmon_bpts.S
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/ppc_asm.h>
+#include <asm/asm-compat.h>
+#include <asm/asm-offsets.h>
+#include "xmon_bpts.h"
+
+/* Prefixed instructions can not cross 64 byte boundaries */
+.align 6
+.global bpt_table
+bpt_table:
+ .space NBPTS * BPT_SIZE
diff --git a/arch/powerpc/xmon/xmon_bpts.h b/arch/powerpc/xmon/xmon_bpts.h
new file mode 100644
index 000000000000..377068f52edb
--- /dev/null
+++ b/arch/powerpc/xmon/xmon_bpts.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef XMON_BPTS_H
+#define XMON_BPTS_H
+
+#define NBPTS 256
+#ifndef __ASSEMBLY__
+#include <asm/inst.h>
+#define BPT_SIZE (sizeof(ppc_inst_t) * 2)
+#define BPT_WORDS (BPT_SIZE / sizeof(ppc_inst_t))
+
+extern unsigned int bpt_table[NBPTS * BPT_WORDS];
+#endif /* __ASSEMBLY__ */
+
+#endif /* XMON_BPTS_H */